1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/prandom.h>
25 #include <linux/bpf.h>
26 #include <linux/btf.h>
27 #include <linux/hex.h>
28 #include <linux/objtool.h>
29 #include <linux/overflow.h>
30 #include <linux/rbtree_latch.h>
31 #include <linux/kallsyms.h>
32 #include <linux/rcupdate.h>
33 #include <linux/perf_event.h>
34 #include <linux/extable.h>
35 #include <linux/log2.h>
36 #include <linux/bpf_verifier.h>
37 #include <linux/nodemask.h>
38 #include <linux/nospec.h>
39 #include <linux/bpf_mem_alloc.h>
40 #include <linux/memcontrol.h>
41 #include <linux/execmem.h>
42 #include <crypto/sha2.h>
43
44 #include <asm/barrier.h>
45 #include <linux/unaligned.h>
46
47 /* Registers */
48 #define BPF_R0 regs[BPF_REG_0]
49 #define BPF_R1 regs[BPF_REG_1]
50 #define BPF_R2 regs[BPF_REG_2]
51 #define BPF_R3 regs[BPF_REG_3]
52 #define BPF_R4 regs[BPF_REG_4]
53 #define BPF_R5 regs[BPF_REG_5]
54 #define BPF_R6 regs[BPF_REG_6]
55 #define BPF_R7 regs[BPF_REG_7]
56 #define BPF_R8 regs[BPF_REG_8]
57 #define BPF_R9 regs[BPF_REG_9]
58 #define BPF_R10 regs[BPF_REG_10]
59
60 /* Named registers */
61 #define DST regs[insn->dst_reg]
62 #define SRC regs[insn->src_reg]
63 #define FP regs[BPF_REG_FP]
64 #define AX regs[BPF_REG_AX]
65 #define ARG1 regs[BPF_REG_ARG1]
66 #define CTX regs[BPF_REG_CTX]
67 #define OFF insn->off
68 #define IMM insn->imm
69
70 struct bpf_mem_alloc bpf_global_ma;
71 bool bpf_global_ma_set;
72
73 /* No hurry in this branch
74 *
75 * Exported for the bpf jit load helper.
76 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)77 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
78 {
79 u8 *ptr = NULL;
80
81 if (k >= SKF_NET_OFF) {
82 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
83 } else if (k >= SKF_LL_OFF) {
84 if (unlikely(!skb_mac_header_was_set(skb)))
85 return NULL;
86 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
87 }
88 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
89 return ptr;
90
91 return NULL;
92 }
93
94 /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
95 enum page_size_enum {
96 __PAGE_SIZE = PAGE_SIZE
97 };
98
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)99 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
100 {
101 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
102 struct bpf_prog_aux *aux;
103 struct bpf_prog *fp;
104
105 size = round_up(size, __PAGE_SIZE);
106 fp = __vmalloc(size, gfp_flags);
107 if (fp == NULL)
108 return NULL;
109
110 aux = kzalloc_obj(*aux, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
111 if (aux == NULL) {
112 vfree(fp);
113 return NULL;
114 }
115 fp->active = __alloc_percpu_gfp(sizeof(u8[BPF_NR_CONTEXTS]), 4,
116 bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
117 if (!fp->active) {
118 vfree(fp);
119 kfree(aux);
120 return NULL;
121 }
122
123 fp->pages = size / PAGE_SIZE;
124 fp->aux = aux;
125 fp->aux->main_prog_aux = aux;
126 fp->aux->prog = fp;
127 fp->jit_requested = ebpf_jit_enabled();
128 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
129 #ifdef CONFIG_CGROUP_BPF
130 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
131 #endif
132
133 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
134 #ifdef CONFIG_FINEIBT
135 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
136 #endif
137 mutex_init(&fp->aux->used_maps_mutex);
138 mutex_init(&fp->aux->ext_mutex);
139 mutex_init(&fp->aux->dst_mutex);
140 mutex_init(&fp->aux->st_ops_assoc_mutex);
141
142 #ifdef CONFIG_BPF_SYSCALL
143 bpf_prog_stream_init(fp);
144 #endif
145
146 return fp;
147 }
148
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)149 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
150 {
151 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
152 struct bpf_prog *prog;
153 int cpu;
154
155 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
156 if (!prog)
157 return NULL;
158
159 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
160 if (!prog->stats) {
161 free_percpu(prog->active);
162 kfree(prog->aux);
163 vfree(prog);
164 return NULL;
165 }
166
167 for_each_possible_cpu(cpu) {
168 struct bpf_prog_stats *pstats;
169
170 pstats = per_cpu_ptr(prog->stats, cpu);
171 u64_stats_init(&pstats->syncp);
172 }
173 return prog;
174 }
175 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
176
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)177 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
178 {
179 if (!prog->aux->nr_linfo || !prog->jit_requested)
180 return 0;
181
182 prog->aux->jited_linfo = kvzalloc_objs(*prog->aux->jited_linfo,
183 prog->aux->nr_linfo,
184 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
185 if (!prog->aux->jited_linfo)
186 return -ENOMEM;
187
188 return 0;
189 }
190
bpf_prog_jit_attempt_done(struct bpf_prog * prog)191 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
192 {
193 if (prog->aux->jited_linfo &&
194 (!prog->jited || !prog->aux->jited_linfo[0])) {
195 kvfree(prog->aux->jited_linfo);
196 prog->aux->jited_linfo = NULL;
197 }
198
199 kfree(prog->aux->kfunc_tab);
200 prog->aux->kfunc_tab = NULL;
201 }
202
203 /* The jit engine is responsible to provide an array
204 * for insn_off to the jited_off mapping (insn_to_jit_off).
205 *
206 * The idx to this array is the insn_off. Hence, the insn_off
207 * here is relative to the prog itself instead of the main prog.
208 * This array has one entry for each xlated bpf insn.
209 *
210 * jited_off is the byte off to the end of the jited insn.
211 *
212 * Hence, with
213 * insn_start:
214 * The first bpf insn off of the prog. The insn off
215 * here is relative to the main prog.
216 * e.g. if prog is a subprog, insn_start > 0
217 * linfo_idx:
218 * The prog's idx to prog->aux->linfo and jited_linfo
219 *
220 * jited_linfo[linfo_idx] = prog->bpf_func
221 *
222 * For i > linfo_idx,
223 *
224 * jited_linfo[i] = prog->bpf_func +
225 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
226 */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)227 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
228 const u32 *insn_to_jit_off)
229 {
230 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
231 const struct bpf_line_info *linfo;
232 void **jited_linfo;
233
234 if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
235 /* Userspace did not provide linfo */
236 return;
237
238 linfo_idx = prog->aux->linfo_idx;
239 linfo = &prog->aux->linfo[linfo_idx];
240 insn_start = linfo[0].insn_off;
241 insn_end = insn_start + prog->len;
242
243 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
244 jited_linfo[0] = prog->bpf_func;
245
246 nr_linfo = prog->aux->nr_linfo - linfo_idx;
247
248 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
249 /* The verifier ensures that linfo[i].insn_off is
250 * strictly increasing
251 */
252 jited_linfo[i] = prog->bpf_func +
253 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
254 }
255
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)256 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
257 gfp_t gfp_extra_flags)
258 {
259 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
260 struct bpf_prog *fp;
261 u32 pages;
262
263 size = round_up(size, PAGE_SIZE);
264 pages = size / PAGE_SIZE;
265 if (pages <= fp_old->pages)
266 return fp_old;
267
268 fp = __vmalloc(size, gfp_flags);
269 if (fp) {
270 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
271 fp->pages = pages;
272 fp->aux->prog = fp;
273
274 /* We keep fp->aux from fp_old around in the new
275 * reallocated structure.
276 */
277 fp_old->aux = NULL;
278 fp_old->stats = NULL;
279 fp_old->active = NULL;
280 __bpf_prog_free(fp_old);
281 }
282
283 return fp;
284 }
285
__bpf_prog_free(struct bpf_prog * fp)286 void __bpf_prog_free(struct bpf_prog *fp)
287 {
288 if (fp->aux) {
289 mutex_destroy(&fp->aux->used_maps_mutex);
290 mutex_destroy(&fp->aux->dst_mutex);
291 mutex_destroy(&fp->aux->st_ops_assoc_mutex);
292 kfree(fp->aux->poke_tab);
293 kfree(fp->aux);
294 }
295 free_percpu(fp->stats);
296 free_percpu(fp->active);
297 vfree(fp);
298 }
299
bpf_prog_calc_tag(struct bpf_prog * fp)300 int bpf_prog_calc_tag(struct bpf_prog *fp)
301 {
302 size_t size = bpf_prog_insn_size(fp);
303 struct bpf_insn *dst;
304 bool was_ld_map;
305 u32 i;
306
307 dst = vmalloc(size);
308 if (!dst)
309 return -ENOMEM;
310
311 /* We need to take out the map fd for the digest calculation
312 * since they are unstable from user space side.
313 */
314 for (i = 0, was_ld_map = false; i < fp->len; i++) {
315 dst[i] = fp->insnsi[i];
316 if (!was_ld_map &&
317 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
318 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
319 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
320 was_ld_map = true;
321 dst[i].imm = 0;
322 } else if (was_ld_map &&
323 dst[i].code == 0 &&
324 dst[i].dst_reg == 0 &&
325 dst[i].src_reg == 0 &&
326 dst[i].off == 0) {
327 was_ld_map = false;
328 dst[i].imm = 0;
329 } else {
330 was_ld_map = false;
331 }
332 }
333 sha256((u8 *)dst, size, fp->digest);
334 vfree(dst);
335 return 0;
336 }
337
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)338 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
339 s32 end_new, s32 curr, const bool probe_pass)
340 {
341 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
342 s32 delta = end_new - end_old;
343 s64 imm = insn->imm;
344
345 if (curr < pos && curr + imm + 1 >= end_old)
346 imm += delta;
347 else if (curr >= end_new && curr + imm + 1 < end_new)
348 imm -= delta;
349 if (imm < imm_min || imm > imm_max)
350 return -ERANGE;
351 if (!probe_pass)
352 insn->imm = imm;
353 return 0;
354 }
355
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)356 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
357 s32 end_new, s32 curr, const bool probe_pass)
358 {
359 s64 off_min, off_max, off;
360 s32 delta = end_new - end_old;
361
362 if (insn->code == (BPF_JMP32 | BPF_JA)) {
363 off = insn->imm;
364 off_min = S32_MIN;
365 off_max = S32_MAX;
366 } else {
367 off = insn->off;
368 off_min = S16_MIN;
369 off_max = S16_MAX;
370 }
371
372 if (curr < pos && curr + off + 1 >= end_old)
373 off += delta;
374 else if (curr >= end_new && curr + off + 1 < end_new)
375 off -= delta;
376 if (off < off_min || off > off_max)
377 return -ERANGE;
378 if (!probe_pass) {
379 if (insn->code == (BPF_JMP32 | BPF_JA))
380 insn->imm = off;
381 else
382 insn->off = off;
383 }
384 return 0;
385 }
386
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)387 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
388 s32 end_new, const bool probe_pass)
389 {
390 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
391 struct bpf_insn *insn = prog->insnsi;
392 int ret = 0;
393
394 for (i = 0; i < insn_cnt; i++, insn++) {
395 u8 code;
396
397 /* In the probing pass we still operate on the original,
398 * unpatched image in order to check overflows before we
399 * do any other adjustments. Therefore skip the patchlet.
400 */
401 if (probe_pass && i == pos) {
402 i = end_new;
403 insn = prog->insnsi + end_old;
404 }
405 if (bpf_pseudo_func(insn)) {
406 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
407 end_new, i, probe_pass);
408 if (ret)
409 return ret;
410 continue;
411 }
412 code = insn->code;
413 if ((BPF_CLASS(code) != BPF_JMP &&
414 BPF_CLASS(code) != BPF_JMP32) ||
415 BPF_OP(code) == BPF_EXIT)
416 continue;
417 /* Adjust offset of jmps if we cross patch boundaries. */
418 if (BPF_OP(code) == BPF_CALL) {
419 if (insn->src_reg != BPF_PSEUDO_CALL)
420 continue;
421 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
422 end_new, i, probe_pass);
423 } else {
424 ret = bpf_adj_delta_to_off(insn, pos, end_old,
425 end_new, i, probe_pass);
426 }
427 if (ret)
428 break;
429 }
430
431 return ret;
432 }
433
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)434 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
435 {
436 struct bpf_line_info *linfo;
437 u32 i, nr_linfo;
438
439 nr_linfo = prog->aux->nr_linfo;
440 if (!nr_linfo || !delta)
441 return;
442
443 linfo = prog->aux->linfo;
444
445 for (i = 0; i < nr_linfo; i++)
446 if (off < linfo[i].insn_off)
447 break;
448
449 /* Push all off < linfo[i].insn_off by delta */
450 for (; i < nr_linfo; i++)
451 linfo[i].insn_off += delta;
452 }
453
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)454 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
455 const struct bpf_insn *patch, u32 len)
456 {
457 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
458 const u32 cnt_max = S16_MAX;
459 struct bpf_prog *prog_adj;
460 int err;
461
462 /* Since our patchlet doesn't expand the image, we're done. */
463 if (insn_delta == 0) {
464 memcpy(prog->insnsi + off, patch, sizeof(*patch));
465 return prog;
466 }
467
468 insn_adj_cnt = prog->len + insn_delta;
469
470 /* Reject anything that would potentially let the insn->off
471 * target overflow when we have excessive program expansions.
472 * We need to probe here before we do any reallocation where
473 * we afterwards may not fail anymore.
474 */
475 if (insn_adj_cnt > cnt_max &&
476 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
477 return ERR_PTR(err);
478
479 /* Several new instructions need to be inserted. Make room
480 * for them. Likely, there's no need for a new allocation as
481 * last page could have large enough tailroom.
482 */
483 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
484 GFP_USER);
485 if (!prog_adj)
486 return ERR_PTR(-ENOMEM);
487
488 prog_adj->len = insn_adj_cnt;
489
490 /* Patching happens in 3 steps:
491 *
492 * 1) Move over tail of insnsi from next instruction onwards,
493 * so we can patch the single target insn with one or more
494 * new ones (patching is always from 1 to n insns, n > 0).
495 * 2) Inject new instructions at the target location.
496 * 3) Adjust branch offsets if necessary.
497 */
498 insn_rest = insn_adj_cnt - off - len;
499
500 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
501 sizeof(*patch) * insn_rest);
502 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
503
504 /* We are guaranteed to not fail at this point, otherwise
505 * the ship has sailed to reverse to the original state. An
506 * overflow cannot happen at this point.
507 */
508 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
509
510 bpf_adj_linfo(prog_adj, off, insn_delta);
511
512 return prog_adj;
513 }
514
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)515 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
516 {
517 int err;
518
519 /* Branch offsets can't overflow when program is shrinking, no need
520 * to call bpf_adj_branches(..., true) here
521 */
522 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
523 sizeof(struct bpf_insn) * (prog->len - off - cnt));
524 prog->len -= cnt;
525
526 err = bpf_adj_branches(prog, off, off + cnt, off, false);
527 WARN_ON_ONCE(err);
528 return err;
529 }
530
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)531 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
532 {
533 int i;
534
535 for (i = 0; i < fp->aux->real_func_cnt; i++)
536 bpf_prog_kallsyms_del(fp->aux->func[i]);
537 }
538
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)539 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
540 {
541 bpf_prog_kallsyms_del_subprogs(fp);
542 bpf_prog_kallsyms_del(fp);
543 }
544
545 #ifdef CONFIG_BPF_JIT
546 /* All BPF JIT sysctl knobs here. */
547 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
548 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
549 int bpf_jit_harden __read_mostly;
550 long bpf_jit_limit __read_mostly;
551 long bpf_jit_limit_max __read_mostly;
552
553 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)554 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
555 {
556 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
557
558 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
559 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
560 }
561
562 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)563 bpf_prog_ksym_set_name(struct bpf_prog *prog)
564 {
565 char *sym = prog->aux->ksym.name;
566 const char *end = sym + KSYM_NAME_LEN;
567 const struct btf_type *type;
568 const char *func_name;
569
570 BUILD_BUG_ON(sizeof("bpf_prog_") +
571 sizeof(prog->tag) * 2 +
572 /* name has been null terminated.
573 * We should need +1 for the '_' preceding
574 * the name. However, the null character
575 * is double counted between the name and the
576 * sizeof("bpf_prog_") above, so we omit
577 * the +1 here.
578 */
579 sizeof(prog->aux->name) > KSYM_NAME_LEN);
580
581 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
582 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
583
584 /* prog->aux->name will be ignored if full btf name is available */
585 if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
586 type = btf_type_by_id(prog->aux->btf,
587 prog->aux->func_info[prog->aux->func_idx].type_id);
588 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
589 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
590 return;
591 }
592
593 if (prog->aux->name[0])
594 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
595 else
596 *sym = 0;
597 }
598
bpf_get_ksym_start(struct latch_tree_node * n)599 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
600 {
601 return container_of(n, struct bpf_ksym, tnode)->start;
602 }
603
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)604 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
605 struct latch_tree_node *b)
606 {
607 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
608 }
609
bpf_tree_comp(void * key,struct latch_tree_node * n)610 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
611 {
612 unsigned long val = (unsigned long)key;
613 const struct bpf_ksym *ksym;
614
615 ksym = container_of(n, struct bpf_ksym, tnode);
616
617 if (val < ksym->start)
618 return -1;
619 /* Ensure that we detect return addresses as part of the program, when
620 * the final instruction is a call for a program part of the stack
621 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
622 */
623 if (val > ksym->end)
624 return 1;
625
626 return 0;
627 }
628
629 static const struct latch_tree_ops bpf_tree_ops = {
630 .less = bpf_tree_less,
631 .comp = bpf_tree_comp,
632 };
633
634 static DEFINE_SPINLOCK(bpf_lock);
635 static LIST_HEAD(bpf_kallsyms);
636 static struct latch_tree_root bpf_tree __cacheline_aligned;
637
bpf_ksym_add(struct bpf_ksym * ksym)638 void bpf_ksym_add(struct bpf_ksym *ksym)
639 {
640 spin_lock_bh(&bpf_lock);
641 WARN_ON_ONCE(!list_empty(&ksym->lnode));
642 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
643 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
644 spin_unlock_bh(&bpf_lock);
645 }
646
__bpf_ksym_del(struct bpf_ksym * ksym)647 static void __bpf_ksym_del(struct bpf_ksym *ksym)
648 {
649 if (list_empty(&ksym->lnode))
650 return;
651
652 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
653 list_del_rcu(&ksym->lnode);
654 }
655
bpf_ksym_del(struct bpf_ksym * ksym)656 void bpf_ksym_del(struct bpf_ksym *ksym)
657 {
658 spin_lock_bh(&bpf_lock);
659 __bpf_ksym_del(ksym);
660 spin_unlock_bh(&bpf_lock);
661 }
662
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)663 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
664 {
665 return fp->jited && !bpf_prog_was_classic(fp);
666 }
667
bpf_prog_kallsyms_add(struct bpf_prog * fp)668 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
669 {
670 if (!bpf_prog_kallsyms_candidate(fp) ||
671 !bpf_token_capable(fp->aux->token, CAP_BPF))
672 return;
673
674 bpf_prog_ksym_set_addr(fp);
675 bpf_prog_ksym_set_name(fp);
676 fp->aux->ksym.prog = true;
677
678 bpf_ksym_add(&fp->aux->ksym);
679
680 #ifdef CONFIG_FINEIBT
681 /*
682 * When FineIBT, code in the __cfi_foo() symbols can get executed
683 * and hence unwinder needs help.
684 */
685 if (cfi_mode != CFI_FINEIBT)
686 return;
687
688 snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
689 "__cfi_%s", fp->aux->ksym.name);
690
691 fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
692 fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func;
693
694 bpf_ksym_add(&fp->aux->ksym_prefix);
695 #endif
696 }
697
bpf_prog_kallsyms_del(struct bpf_prog * fp)698 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
699 {
700 if (!bpf_prog_kallsyms_candidate(fp))
701 return;
702
703 bpf_ksym_del(&fp->aux->ksym);
704 #ifdef CONFIG_FINEIBT
705 if (cfi_mode != CFI_FINEIBT)
706 return;
707 bpf_ksym_del(&fp->aux->ksym_prefix);
708 #endif
709 }
710
bpf_ksym_find(unsigned long addr)711 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
712 {
713 struct latch_tree_node *n;
714
715 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
716 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
717 }
718
bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)719 int bpf_address_lookup(unsigned long addr, unsigned long *size,
720 unsigned long *off, char *sym)
721 {
722 struct bpf_ksym *ksym;
723 int ret = 0;
724
725 rcu_read_lock();
726 ksym = bpf_ksym_find(addr);
727 if (ksym) {
728 unsigned long symbol_start = ksym->start;
729 unsigned long symbol_end = ksym->end;
730
731 ret = strscpy(sym, ksym->name, KSYM_NAME_LEN);
732
733 if (size)
734 *size = symbol_end - symbol_start;
735 if (off)
736 *off = addr - symbol_start;
737 }
738 rcu_read_unlock();
739
740 return ret;
741 }
742
is_bpf_text_address(unsigned long addr)743 bool is_bpf_text_address(unsigned long addr)
744 {
745 bool ret;
746
747 rcu_read_lock();
748 ret = bpf_ksym_find(addr) != NULL;
749 rcu_read_unlock();
750
751 return ret;
752 }
753
bpf_prog_ksym_find(unsigned long addr)754 struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
755 {
756 struct bpf_ksym *ksym;
757
758 WARN_ON_ONCE(!rcu_read_lock_held());
759 ksym = bpf_ksym_find(addr);
760
761 return ksym && ksym->prog ?
762 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
763 NULL;
764 }
765
bpf_has_frame_pointer(unsigned long ip)766 bool bpf_has_frame_pointer(unsigned long ip)
767 {
768 struct bpf_ksym *ksym;
769 unsigned long offset;
770
771 guard(rcu)();
772
773 ksym = bpf_ksym_find(ip);
774 if (!ksym || !ksym->fp_start || !ksym->fp_end)
775 return false;
776
777 offset = ip - ksym->start;
778
779 return offset >= ksym->fp_start && offset < ksym->fp_end;
780 }
781
search_bpf_extables(unsigned long addr)782 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
783 {
784 const struct exception_table_entry *e = NULL;
785 struct bpf_prog *prog;
786
787 rcu_read_lock();
788 prog = bpf_prog_ksym_find(addr);
789 if (!prog)
790 goto out;
791 if (!prog->aux->num_exentries)
792 goto out;
793
794 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
795 out:
796 rcu_read_unlock();
797 return e;
798 }
799
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)800 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
801 char *sym)
802 {
803 struct bpf_ksym *ksym;
804 unsigned int it = 0;
805 int ret = -ERANGE;
806
807 if (!bpf_jit_kallsyms_enabled())
808 return ret;
809
810 rcu_read_lock();
811 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
812 if (it++ != symnum)
813 continue;
814
815 strscpy(sym, ksym->name, KSYM_NAME_LEN);
816
817 *value = ksym->start;
818 *type = BPF_SYM_ELF_TYPE;
819
820 ret = 0;
821 break;
822 }
823 rcu_read_unlock();
824
825 return ret;
826 }
827
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)828 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
829 struct bpf_jit_poke_descriptor *poke)
830 {
831 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
832 static const u32 poke_tab_max = 1024;
833 u32 slot = prog->aux->size_poke_tab;
834 u32 size = slot + 1;
835
836 if (size > poke_tab_max)
837 return -ENOSPC;
838 if (poke->tailcall_target || poke->tailcall_target_stable ||
839 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
840 return -EINVAL;
841
842 switch (poke->reason) {
843 case BPF_POKE_REASON_TAIL_CALL:
844 if (!poke->tail_call.map)
845 return -EINVAL;
846 break;
847 default:
848 return -EINVAL;
849 }
850
851 tab = krealloc_array(tab, size, sizeof(*poke), GFP_KERNEL);
852 if (!tab)
853 return -ENOMEM;
854
855 memcpy(&tab[slot], poke, sizeof(*poke));
856 prog->aux->size_poke_tab = size;
857 prog->aux->poke_tab = tab;
858
859 return slot;
860 }
861
862 /*
863 * BPF program pack allocator.
864 *
865 * Most BPF programs are pretty small. Allocating a hole page for each
866 * program is sometime a waste. Many small bpf program also adds pressure
867 * to instruction TLB. To solve this issue, we introduce a BPF program pack
868 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
869 * to host BPF programs.
870 */
871 #define BPF_PROG_CHUNK_SHIFT 6
872 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
873 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
874
875 struct bpf_prog_pack {
876 struct list_head list;
877 void *ptr;
878 unsigned long bitmap[];
879 };
880
bpf_jit_fill_hole_with_zero(void * area,unsigned int size)881 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
882 {
883 memset(area, 0, size);
884 }
885
886 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
887
888 static DEFINE_MUTEX(pack_mutex);
889 static LIST_HEAD(pack_list);
890
891 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
892 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
893 */
894 #ifdef PMD_SIZE
895 /* PMD_SIZE is really big for some archs. It doesn't make sense to
896 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
897 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
898 * greater than or equal to 2MB.
899 */
900 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
901 #else
902 #define BPF_PROG_PACK_SIZE PAGE_SIZE
903 #endif
904
905 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
906
alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)907 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
908 {
909 struct bpf_prog_pack *pack;
910 int err;
911
912 pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT));
913 if (!pack)
914 return NULL;
915 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
916 if (!pack->ptr)
917 goto out;
918 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
919 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
920
921 set_vm_flush_reset_perms(pack->ptr);
922 err = set_memory_rox((unsigned long)pack->ptr,
923 BPF_PROG_PACK_SIZE / PAGE_SIZE);
924 if (err)
925 goto out;
926 list_add_tail(&pack->list, &pack_list);
927 return pack;
928
929 out:
930 bpf_jit_free_exec(pack->ptr);
931 kfree(pack);
932 return NULL;
933 }
934
bpf_prog_pack_alloc(u32 size,bpf_jit_fill_hole_t bpf_fill_ill_insns)935 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
936 {
937 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
938 struct bpf_prog_pack *pack;
939 unsigned long pos;
940 void *ptr = NULL;
941
942 mutex_lock(&pack_mutex);
943 if (size > BPF_PROG_PACK_SIZE) {
944 size = round_up(size, PAGE_SIZE);
945 ptr = bpf_jit_alloc_exec(size);
946 if (ptr) {
947 int err;
948
949 bpf_fill_ill_insns(ptr, size);
950 set_vm_flush_reset_perms(ptr);
951 err = set_memory_rox((unsigned long)ptr,
952 size / PAGE_SIZE);
953 if (err) {
954 bpf_jit_free_exec(ptr);
955 ptr = NULL;
956 }
957 }
958 goto out;
959 }
960 list_for_each_entry(pack, &pack_list, list) {
961 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
962 nbits, 0);
963 if (pos < BPF_PROG_CHUNK_COUNT)
964 goto found_free_area;
965 }
966
967 pack = alloc_new_pack(bpf_fill_ill_insns);
968 if (!pack)
969 goto out;
970
971 pos = 0;
972
973 found_free_area:
974 bitmap_set(pack->bitmap, pos, nbits);
975 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
976
977 out:
978 mutex_unlock(&pack_mutex);
979 return ptr;
980 }
981
bpf_prog_pack_free(void * ptr,u32 size)982 void bpf_prog_pack_free(void *ptr, u32 size)
983 {
984 struct bpf_prog_pack *pack = NULL, *tmp;
985 unsigned int nbits;
986 unsigned long pos;
987
988 mutex_lock(&pack_mutex);
989 if (size > BPF_PROG_PACK_SIZE) {
990 bpf_jit_free_exec(ptr);
991 goto out;
992 }
993
994 list_for_each_entry(tmp, &pack_list, list) {
995 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
996 pack = tmp;
997 break;
998 }
999 }
1000
1001 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
1002 goto out;
1003
1004 nbits = BPF_PROG_SIZE_TO_NBITS(size);
1005 pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
1006
1007 WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
1008 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
1009
1010 bitmap_clear(pack->bitmap, pos, nbits);
1011 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
1012 BPF_PROG_CHUNK_COUNT, 0) == 0) {
1013 list_del(&pack->list);
1014 bpf_jit_free_exec(pack->ptr);
1015 kfree(pack);
1016 }
1017 out:
1018 mutex_unlock(&pack_mutex);
1019 }
1020
1021 static atomic_long_t bpf_jit_current;
1022
1023 /* Can be overridden by an arch's JIT compiler if it has a custom,
1024 * dedicated BPF backend memory area, or if neither of the two
1025 * below apply.
1026 */
bpf_jit_alloc_exec_limit(void)1027 u64 __weak bpf_jit_alloc_exec_limit(void)
1028 {
1029 #if defined(MODULES_VADDR)
1030 return MODULES_END - MODULES_VADDR;
1031 #else
1032 return VMALLOC_END - VMALLOC_START;
1033 #endif
1034 }
1035
bpf_jit_charge_init(void)1036 static int __init bpf_jit_charge_init(void)
1037 {
1038 /* Only used as heuristic here to derive limit. */
1039 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1040 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1041 PAGE_SIZE), LONG_MAX);
1042 return 0;
1043 }
1044 pure_initcall(bpf_jit_charge_init);
1045
bpf_jit_charge_modmem(u32 size)1046 int bpf_jit_charge_modmem(u32 size)
1047 {
1048 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1049 if (!bpf_capable()) {
1050 atomic_long_sub(size, &bpf_jit_current);
1051 return -EPERM;
1052 }
1053 }
1054
1055 return 0;
1056 }
1057
bpf_jit_uncharge_modmem(u32 size)1058 void bpf_jit_uncharge_modmem(u32 size)
1059 {
1060 atomic_long_sub(size, &bpf_jit_current);
1061 }
1062
bpf_jit_alloc_exec(unsigned long size)1063 void *__weak bpf_jit_alloc_exec(unsigned long size)
1064 {
1065 return execmem_alloc(EXECMEM_BPF, size);
1066 }
1067
bpf_jit_free_exec(void * addr)1068 void __weak bpf_jit_free_exec(void *addr)
1069 {
1070 execmem_free(addr);
1071 }
1072
1073 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)1074 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1075 unsigned int alignment,
1076 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1077 {
1078 struct bpf_binary_header *hdr;
1079 u32 size, hole, start;
1080
1081 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1082 alignment > BPF_IMAGE_ALIGNMENT);
1083
1084 /* Most of BPF filters are really small, but if some of them
1085 * fill a page, allow at least 128 extra bytes to insert a
1086 * random section of illegal instructions.
1087 */
1088 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1089
1090 if (bpf_jit_charge_modmem(size))
1091 return NULL;
1092 hdr = bpf_jit_alloc_exec(size);
1093 if (!hdr) {
1094 bpf_jit_uncharge_modmem(size);
1095 return NULL;
1096 }
1097
1098 /* Fill space with illegal/arch-dep instructions. */
1099 bpf_fill_ill_insns(hdr, size);
1100
1101 hdr->size = size;
1102 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1103 PAGE_SIZE - sizeof(*hdr));
1104 start = get_random_u32_below(hole) & ~(alignment - 1);
1105
1106 /* Leave a random number of instructions before BPF code. */
1107 *image_ptr = &hdr->image[start];
1108
1109 return hdr;
1110 }
1111
bpf_jit_binary_free(struct bpf_binary_header * hdr)1112 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1113 {
1114 u32 size = hdr->size;
1115
1116 bpf_jit_free_exec(hdr);
1117 bpf_jit_uncharge_modmem(size);
1118 }
1119
1120 /* Allocate jit binary from bpf_prog_pack allocator.
1121 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1122 * to the memory. To solve this problem, a RW buffer is also allocated at
1123 * as the same time. The JIT engine should calculate offsets based on the
1124 * RO memory address, but write JITed program to the RW buffer. Once the
1125 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1126 * the JITed program to the RO memory.
1127 */
1128 struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,struct bpf_binary_header ** rw_header,u8 ** rw_image,bpf_jit_fill_hole_t bpf_fill_ill_insns)1129 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1130 unsigned int alignment,
1131 struct bpf_binary_header **rw_header,
1132 u8 **rw_image,
1133 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1134 {
1135 struct bpf_binary_header *ro_header;
1136 u32 size, hole, start;
1137
1138 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1139 alignment > BPF_IMAGE_ALIGNMENT);
1140
1141 /* add 16 bytes for a random section of illegal instructions */
1142 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1143
1144 if (bpf_jit_charge_modmem(size))
1145 return NULL;
1146 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1147 if (!ro_header) {
1148 bpf_jit_uncharge_modmem(size);
1149 return NULL;
1150 }
1151
1152 *rw_header = kvmalloc(size, GFP_KERNEL);
1153 if (!*rw_header) {
1154 bpf_prog_pack_free(ro_header, size);
1155 bpf_jit_uncharge_modmem(size);
1156 return NULL;
1157 }
1158
1159 /* Fill space with illegal/arch-dep instructions. */
1160 bpf_fill_ill_insns(*rw_header, size);
1161 (*rw_header)->size = size;
1162
1163 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1164 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1165 start = get_random_u32_below(hole) & ~(alignment - 1);
1166
1167 *image_ptr = &ro_header->image[start];
1168 *rw_image = &(*rw_header)->image[start];
1169
1170 return ro_header;
1171 }
1172
1173 /* Copy JITed text from rw_header to its final location, the ro_header. */
bpf_jit_binary_pack_finalize(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1174 int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
1175 struct bpf_binary_header *rw_header)
1176 {
1177 void *ptr;
1178
1179 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1180
1181 kvfree(rw_header);
1182
1183 if (IS_ERR(ptr)) {
1184 bpf_prog_pack_free(ro_header, ro_header->size);
1185 return PTR_ERR(ptr);
1186 }
1187 return 0;
1188 }
1189
1190 /* bpf_jit_binary_pack_free is called in two different scenarios:
1191 * 1) when the program is freed after;
1192 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1193 * For case 2), we need to free both the RO memory and the RW buffer.
1194 *
1195 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1196 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1197 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1198 * bpf_arch_text_copy (when jit fails).
1199 */
bpf_jit_binary_pack_free(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1200 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1201 struct bpf_binary_header *rw_header)
1202 {
1203 u32 size = ro_header->size;
1204
1205 bpf_prog_pack_free(ro_header, size);
1206 kvfree(rw_header);
1207 bpf_jit_uncharge_modmem(size);
1208 }
1209
1210 struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog * fp)1211 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1212 {
1213 unsigned long real_start = (unsigned long)fp->bpf_func;
1214 unsigned long addr;
1215
1216 addr = real_start & BPF_PROG_CHUNK_MASK;
1217 return (void *)addr;
1218 }
1219
1220 static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)1221 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1222 {
1223 unsigned long real_start = (unsigned long)fp->bpf_func;
1224 unsigned long addr;
1225
1226 addr = real_start & PAGE_MASK;
1227 return (void *)addr;
1228 }
1229
1230 /* This symbol is only overridden by archs that have different
1231 * requirements than the usual eBPF JITs, f.e. when they only
1232 * implement cBPF JIT, do not set images read-only, etc.
1233 */
bpf_jit_free(struct bpf_prog * fp)1234 void __weak bpf_jit_free(struct bpf_prog *fp)
1235 {
1236 if (fp->jited) {
1237 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1238
1239 bpf_jit_binary_free(hdr);
1240 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1241 }
1242
1243 bpf_prog_unlock_free(fp);
1244 }
1245
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)1246 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1247 const struct bpf_insn *insn, bool extra_pass,
1248 u64 *func_addr, bool *func_addr_fixed)
1249 {
1250 s16 off = insn->off;
1251 s32 imm = insn->imm;
1252 u8 *addr;
1253 int err;
1254
1255 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1256 if (!*func_addr_fixed) {
1257 /* Place-holder address till the last pass has collected
1258 * all addresses for JITed subprograms in which case we
1259 * can pick them up from prog->aux.
1260 */
1261 if (!extra_pass)
1262 addr = NULL;
1263 else if (prog->aux->func &&
1264 off >= 0 && off < prog->aux->real_func_cnt)
1265 addr = (u8 *)prog->aux->func[off]->bpf_func;
1266 else
1267 return -EINVAL;
1268 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1269 bpf_jit_supports_far_kfunc_call()) {
1270 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1271 if (err)
1272 return err;
1273 } else {
1274 /* Address of a BPF helper call. Since part of the core
1275 * kernel, it's always at a fixed location. __bpf_call_base
1276 * and the helper with imm relative to it are both in core
1277 * kernel.
1278 */
1279 addr = (u8 *)__bpf_call_base + imm;
1280 }
1281
1282 *func_addr = (unsigned long)addr;
1283 return 0;
1284 }
1285
bpf_jit_get_prog_name(struct bpf_prog * prog)1286 const char *bpf_jit_get_prog_name(struct bpf_prog *prog)
1287 {
1288 if (prog->aux->ksym.prog)
1289 return prog->aux->ksym.name;
1290 return prog->aux->name;
1291 }
1292
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)1293 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1294 const struct bpf_insn *aux,
1295 struct bpf_insn *to_buff,
1296 bool emit_zext)
1297 {
1298 struct bpf_insn *to = to_buff;
1299 u32 imm_rnd = get_random_u32();
1300 s16 off;
1301
1302 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1303 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1304
1305 /* Constraints on AX register:
1306 *
1307 * AX register is inaccessible from user space. It is mapped in
1308 * all JITs, and used here for constant blinding rewrites. It is
1309 * typically "stateless" meaning its contents are only valid within
1310 * the executed instruction, but not across several instructions.
1311 * There are a few exceptions however which are further detailed
1312 * below.
1313 *
1314 * Constant blinding is only used by JITs, not in the interpreter.
1315 * The interpreter uses AX in some occasions as a local temporary
1316 * register e.g. in DIV or MOD instructions.
1317 *
1318 * In restricted circumstances, the verifier can also use the AX
1319 * register for rewrites as long as they do not interfere with
1320 * the above cases!
1321 */
1322 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1323 goto out;
1324
1325 if (from->imm == 0 &&
1326 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1327 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1328 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1329 goto out;
1330 }
1331
1332 switch (from->code) {
1333 case BPF_ALU | BPF_ADD | BPF_K:
1334 case BPF_ALU | BPF_SUB | BPF_K:
1335 case BPF_ALU | BPF_AND | BPF_K:
1336 case BPF_ALU | BPF_OR | BPF_K:
1337 case BPF_ALU | BPF_XOR | BPF_K:
1338 case BPF_ALU | BPF_MUL | BPF_K:
1339 case BPF_ALU | BPF_MOV | BPF_K:
1340 case BPF_ALU | BPF_DIV | BPF_K:
1341 case BPF_ALU | BPF_MOD | BPF_K:
1342 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1343 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1344 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1345 break;
1346
1347 case BPF_ALU64 | BPF_ADD | BPF_K:
1348 case BPF_ALU64 | BPF_SUB | BPF_K:
1349 case BPF_ALU64 | BPF_AND | BPF_K:
1350 case BPF_ALU64 | BPF_OR | BPF_K:
1351 case BPF_ALU64 | BPF_XOR | BPF_K:
1352 case BPF_ALU64 | BPF_MUL | BPF_K:
1353 case BPF_ALU64 | BPF_MOV | BPF_K:
1354 case BPF_ALU64 | BPF_DIV | BPF_K:
1355 case BPF_ALU64 | BPF_MOD | BPF_K:
1356 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1357 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1358 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1359 break;
1360
1361 case BPF_JMP | BPF_JEQ | BPF_K:
1362 case BPF_JMP | BPF_JNE | BPF_K:
1363 case BPF_JMP | BPF_JGT | BPF_K:
1364 case BPF_JMP | BPF_JLT | BPF_K:
1365 case BPF_JMP | BPF_JGE | BPF_K:
1366 case BPF_JMP | BPF_JLE | BPF_K:
1367 case BPF_JMP | BPF_JSGT | BPF_K:
1368 case BPF_JMP | BPF_JSLT | BPF_K:
1369 case BPF_JMP | BPF_JSGE | BPF_K:
1370 case BPF_JMP | BPF_JSLE | BPF_K:
1371 case BPF_JMP | BPF_JSET | BPF_K:
1372 /* Accommodate for extra offset in case of a backjump. */
1373 off = from->off;
1374 if (off < 0)
1375 off -= 2;
1376 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1377 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1378 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1379 break;
1380
1381 case BPF_JMP32 | BPF_JEQ | BPF_K:
1382 case BPF_JMP32 | BPF_JNE | BPF_K:
1383 case BPF_JMP32 | BPF_JGT | BPF_K:
1384 case BPF_JMP32 | BPF_JLT | BPF_K:
1385 case BPF_JMP32 | BPF_JGE | BPF_K:
1386 case BPF_JMP32 | BPF_JLE | BPF_K:
1387 case BPF_JMP32 | BPF_JSGT | BPF_K:
1388 case BPF_JMP32 | BPF_JSLT | BPF_K:
1389 case BPF_JMP32 | BPF_JSGE | BPF_K:
1390 case BPF_JMP32 | BPF_JSLE | BPF_K:
1391 case BPF_JMP32 | BPF_JSET | BPF_K:
1392 /* Accommodate for extra offset in case of a backjump. */
1393 off = from->off;
1394 if (off < 0)
1395 off -= 2;
1396 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1397 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1398 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1399 off);
1400 break;
1401
1402 case BPF_LD | BPF_IMM | BPF_DW:
1403 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1404 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1405 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1406 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1407 break;
1408 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1409 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1410 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1411 if (emit_zext)
1412 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1413 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1414 break;
1415
1416 case BPF_ST | BPF_MEM | BPF_DW:
1417 case BPF_ST | BPF_MEM | BPF_W:
1418 case BPF_ST | BPF_MEM | BPF_H:
1419 case BPF_ST | BPF_MEM | BPF_B:
1420 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1421 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1422 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1423 break;
1424
1425 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1426 case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1427 case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1428 case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1429 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^
1430 from->imm);
1431 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1432 /*
1433 * Cannot use BPF_STX_MEM() macro here as it
1434 * hardcodes BPF_MEM mode, losing PROBE_MEM32
1435 * and breaking arena addressing in the JIT.
1436 */
1437 *to++ = (struct bpf_insn) {
1438 .code = BPF_STX | BPF_PROBE_MEM32 |
1439 BPF_SIZE(from->code),
1440 .dst_reg = from->dst_reg,
1441 .src_reg = BPF_REG_AX,
1442 .off = from->off,
1443 };
1444 break;
1445 }
1446 out:
1447 return to - to_buff;
1448 }
1449
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1450 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1451 gfp_t gfp_extra_flags)
1452 {
1453 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1454 struct bpf_prog *fp;
1455
1456 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1457 if (fp != NULL) {
1458 /* aux->prog still points to the fp_other one, so
1459 * when promoting the clone to the real program,
1460 * this still needs to be adapted.
1461 */
1462 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1463 }
1464
1465 return fp;
1466 }
1467
bpf_prog_clone_free(struct bpf_prog * fp)1468 static void bpf_prog_clone_free(struct bpf_prog *fp)
1469 {
1470 /* aux was stolen by the other clone, so we cannot free
1471 * it from this path! It will be freed eventually by the
1472 * other program on release.
1473 *
1474 * At this point, we don't need a deferred release since
1475 * clone is guaranteed to not be locked.
1476 */
1477 fp->aux = NULL;
1478 fp->stats = NULL;
1479 fp->active = NULL;
1480 __bpf_prog_free(fp);
1481 }
1482
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1483 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1484 {
1485 /* We have to repoint aux->prog to self, as we don't
1486 * know whether fp here is the clone or the original.
1487 */
1488 fp->aux->prog = fp;
1489 if (fp->aux->offload)
1490 fp->aux->offload->prog = fp;
1491 bpf_prog_clone_free(fp_other);
1492 }
1493
1494 /*
1495 * Now this function is used only to blind the main prog and must be invoked only when
1496 * bpf_prog_need_blind() returns true.
1497 */
bpf_jit_blind_constants(struct bpf_verifier_env * env,struct bpf_prog * prog)1498 struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog)
1499 {
1500 struct bpf_insn insn_buff[16], aux[2];
1501 struct bpf_prog *clone, *tmp;
1502 int insn_delta, insn_cnt;
1503 struct bpf_insn *insn;
1504 int i, rewritten;
1505
1506 if (WARN_ON_ONCE(env && env->prog != prog))
1507 return ERR_PTR(-EINVAL);
1508
1509 clone = bpf_prog_clone_create(prog, GFP_USER);
1510 if (!clone)
1511 return ERR_PTR(-ENOMEM);
1512
1513 /* make sure bpf_patch_insn_data() patches the correct prog */
1514 if (env)
1515 env->prog = clone;
1516
1517 insn_cnt = clone->len;
1518 insn = clone->insnsi;
1519
1520 for (i = 0; i < insn_cnt; i++, insn++) {
1521 if (bpf_pseudo_func(insn)) {
1522 /* ld_imm64 with an address of bpf subprog is not
1523 * a user controlled constant. Don't randomize it,
1524 * since it will conflict with jit_subprogs() logic.
1525 */
1526 insn++;
1527 i++;
1528 continue;
1529 }
1530
1531 /* We temporarily need to hold the original ld64 insn
1532 * so that we can still access the first part in the
1533 * second blinding run.
1534 */
1535 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1536 insn[1].code == 0)
1537 memcpy(aux, insn, sizeof(aux));
1538
1539 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1540 clone->aux->verifier_zext);
1541 if (!rewritten)
1542 continue;
1543
1544 if (env)
1545 tmp = bpf_patch_insn_data(env, i, insn_buff, rewritten);
1546 else
1547 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1548
1549 if (IS_ERR_OR_NULL(tmp)) {
1550 if (env)
1551 /* restore the original prog */
1552 env->prog = prog;
1553 /* Patching may have repointed aux->prog during
1554 * realloc from the original one, so we need to
1555 * fix it up here on error.
1556 */
1557 bpf_jit_prog_release_other(prog, clone);
1558 return IS_ERR(tmp) ? tmp : ERR_PTR(-ENOMEM);
1559 }
1560
1561 clone = tmp;
1562 insn_delta = rewritten - 1;
1563
1564 if (env)
1565 env->prog = clone;
1566
1567 /* Walk new program and skip insns we just inserted. */
1568 insn = clone->insnsi + i + insn_delta;
1569 insn_cnt += insn_delta;
1570 i += insn_delta;
1571 }
1572
1573 clone->blinded = 1;
1574 return clone;
1575 }
1576
bpf_insn_is_indirect_target(const struct bpf_verifier_env * env,const struct bpf_prog * prog,int insn_idx)1577 bool bpf_insn_is_indirect_target(const struct bpf_verifier_env *env, const struct bpf_prog *prog,
1578 int insn_idx)
1579 {
1580 if (!env)
1581 return false;
1582 insn_idx += prog->aux->subprog_start;
1583 return env->insn_aux_data[insn_idx].indirect_target;
1584 }
1585 #endif /* CONFIG_BPF_JIT */
1586
1587 /* Base function for offset calculation. Needs to go into .text section,
1588 * therefore keeping it non-static as well; will also be used by JITs
1589 * anyway later on, so do not let the compiler omit it. This also needs
1590 * to go into kallsyms for correlation from e.g. bpftool, so naming
1591 * must not change.
1592 */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1593 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1594 {
1595 return 0;
1596 }
1597 EXPORT_SYMBOL_GPL(__bpf_call_base);
1598
1599 /* All UAPI available opcodes. */
1600 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1601 /* 32 bit ALU operations. */ \
1602 /* Register based. */ \
1603 INSN_3(ALU, ADD, X), \
1604 INSN_3(ALU, SUB, X), \
1605 INSN_3(ALU, AND, X), \
1606 INSN_3(ALU, OR, X), \
1607 INSN_3(ALU, LSH, X), \
1608 INSN_3(ALU, RSH, X), \
1609 INSN_3(ALU, XOR, X), \
1610 INSN_3(ALU, MUL, X), \
1611 INSN_3(ALU, MOV, X), \
1612 INSN_3(ALU, ARSH, X), \
1613 INSN_3(ALU, DIV, X), \
1614 INSN_3(ALU, MOD, X), \
1615 INSN_2(ALU, NEG), \
1616 INSN_3(ALU, END, TO_BE), \
1617 INSN_3(ALU, END, TO_LE), \
1618 /* Immediate based. */ \
1619 INSN_3(ALU, ADD, K), \
1620 INSN_3(ALU, SUB, K), \
1621 INSN_3(ALU, AND, K), \
1622 INSN_3(ALU, OR, K), \
1623 INSN_3(ALU, LSH, K), \
1624 INSN_3(ALU, RSH, K), \
1625 INSN_3(ALU, XOR, K), \
1626 INSN_3(ALU, MUL, K), \
1627 INSN_3(ALU, MOV, K), \
1628 INSN_3(ALU, ARSH, K), \
1629 INSN_3(ALU, DIV, K), \
1630 INSN_3(ALU, MOD, K), \
1631 /* 64 bit ALU operations. */ \
1632 /* Register based. */ \
1633 INSN_3(ALU64, ADD, X), \
1634 INSN_3(ALU64, SUB, X), \
1635 INSN_3(ALU64, AND, X), \
1636 INSN_3(ALU64, OR, X), \
1637 INSN_3(ALU64, LSH, X), \
1638 INSN_3(ALU64, RSH, X), \
1639 INSN_3(ALU64, XOR, X), \
1640 INSN_3(ALU64, MUL, X), \
1641 INSN_3(ALU64, MOV, X), \
1642 INSN_3(ALU64, ARSH, X), \
1643 INSN_3(ALU64, DIV, X), \
1644 INSN_3(ALU64, MOD, X), \
1645 INSN_2(ALU64, NEG), \
1646 INSN_3(ALU64, END, TO_LE), \
1647 /* Immediate based. */ \
1648 INSN_3(ALU64, ADD, K), \
1649 INSN_3(ALU64, SUB, K), \
1650 INSN_3(ALU64, AND, K), \
1651 INSN_3(ALU64, OR, K), \
1652 INSN_3(ALU64, LSH, K), \
1653 INSN_3(ALU64, RSH, K), \
1654 INSN_3(ALU64, XOR, K), \
1655 INSN_3(ALU64, MUL, K), \
1656 INSN_3(ALU64, MOV, K), \
1657 INSN_3(ALU64, ARSH, K), \
1658 INSN_3(ALU64, DIV, K), \
1659 INSN_3(ALU64, MOD, K), \
1660 /* Call instruction. */ \
1661 INSN_2(JMP, CALL), \
1662 /* Exit instruction. */ \
1663 INSN_2(JMP, EXIT), \
1664 /* 32-bit Jump instructions. */ \
1665 /* Register based. */ \
1666 INSN_3(JMP32, JEQ, X), \
1667 INSN_3(JMP32, JNE, X), \
1668 INSN_3(JMP32, JGT, X), \
1669 INSN_3(JMP32, JLT, X), \
1670 INSN_3(JMP32, JGE, X), \
1671 INSN_3(JMP32, JLE, X), \
1672 INSN_3(JMP32, JSGT, X), \
1673 INSN_3(JMP32, JSLT, X), \
1674 INSN_3(JMP32, JSGE, X), \
1675 INSN_3(JMP32, JSLE, X), \
1676 INSN_3(JMP32, JSET, X), \
1677 /* Immediate based. */ \
1678 INSN_3(JMP32, JEQ, K), \
1679 INSN_3(JMP32, JNE, K), \
1680 INSN_3(JMP32, JGT, K), \
1681 INSN_3(JMP32, JLT, K), \
1682 INSN_3(JMP32, JGE, K), \
1683 INSN_3(JMP32, JLE, K), \
1684 INSN_3(JMP32, JSGT, K), \
1685 INSN_3(JMP32, JSLT, K), \
1686 INSN_3(JMP32, JSGE, K), \
1687 INSN_3(JMP32, JSLE, K), \
1688 INSN_3(JMP32, JSET, K), \
1689 /* Jump instructions. */ \
1690 /* Register based. */ \
1691 INSN_3(JMP, JEQ, X), \
1692 INSN_3(JMP, JNE, X), \
1693 INSN_3(JMP, JGT, X), \
1694 INSN_3(JMP, JLT, X), \
1695 INSN_3(JMP, JGE, X), \
1696 INSN_3(JMP, JLE, X), \
1697 INSN_3(JMP, JSGT, X), \
1698 INSN_3(JMP, JSLT, X), \
1699 INSN_3(JMP, JSGE, X), \
1700 INSN_3(JMP, JSLE, X), \
1701 INSN_3(JMP, JSET, X), \
1702 /* Immediate based. */ \
1703 INSN_3(JMP, JEQ, K), \
1704 INSN_3(JMP, JNE, K), \
1705 INSN_3(JMP, JGT, K), \
1706 INSN_3(JMP, JLT, K), \
1707 INSN_3(JMP, JGE, K), \
1708 INSN_3(JMP, JLE, K), \
1709 INSN_3(JMP, JSGT, K), \
1710 INSN_3(JMP, JSLT, K), \
1711 INSN_3(JMP, JSGE, K), \
1712 INSN_3(JMP, JSLE, K), \
1713 INSN_3(JMP, JSET, K), \
1714 INSN_2(JMP, JA), \
1715 INSN_2(JMP32, JA), \
1716 /* Atomic operations. */ \
1717 INSN_3(STX, ATOMIC, B), \
1718 INSN_3(STX, ATOMIC, H), \
1719 INSN_3(STX, ATOMIC, W), \
1720 INSN_3(STX, ATOMIC, DW), \
1721 /* Store instructions. */ \
1722 /* Register based. */ \
1723 INSN_3(STX, MEM, B), \
1724 INSN_3(STX, MEM, H), \
1725 INSN_3(STX, MEM, W), \
1726 INSN_3(STX, MEM, DW), \
1727 /* Immediate based. */ \
1728 INSN_3(ST, MEM, B), \
1729 INSN_3(ST, MEM, H), \
1730 INSN_3(ST, MEM, W), \
1731 INSN_3(ST, MEM, DW), \
1732 /* Load instructions. */ \
1733 /* Register based. */ \
1734 INSN_3(LDX, MEM, B), \
1735 INSN_3(LDX, MEM, H), \
1736 INSN_3(LDX, MEM, W), \
1737 INSN_3(LDX, MEM, DW), \
1738 INSN_3(LDX, MEMSX, B), \
1739 INSN_3(LDX, MEMSX, H), \
1740 INSN_3(LDX, MEMSX, W), \
1741 /* Immediate based. */ \
1742 INSN_3(LD, IMM, DW)
1743
bpf_opcode_in_insntable(u8 code)1744 bool bpf_opcode_in_insntable(u8 code)
1745 {
1746 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1747 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1748 static const bool public_insntable[256] = {
1749 [0 ... 255] = false,
1750 /* Now overwrite non-defaults ... */
1751 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1752 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1753 [BPF_LD | BPF_ABS | BPF_B] = true,
1754 [BPF_LD | BPF_ABS | BPF_H] = true,
1755 [BPF_LD | BPF_ABS | BPF_W] = true,
1756 [BPF_LD | BPF_IND | BPF_B] = true,
1757 [BPF_LD | BPF_IND | BPF_H] = true,
1758 [BPF_LD | BPF_IND | BPF_W] = true,
1759 [BPF_JMP | BPF_JA | BPF_X] = true,
1760 [BPF_JMP | BPF_JCOND] = true,
1761 };
1762 #undef BPF_INSN_3_TBL
1763 #undef BPF_INSN_2_TBL
1764 return public_insntable[code];
1765 }
1766
1767 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1768 /* Absolute value of s32 without undefined behavior for S32_MIN */
abs_s32(s32 x)1769 static u32 abs_s32(s32 x)
1770 {
1771 return x >= 0 ? (u32)x : -(u32)x;
1772 }
1773
1774 /**
1775 * ___bpf_prog_run - run eBPF program on a given context
1776 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1777 * @insn: is the array of eBPF instructions
1778 *
1779 * Decode and execute eBPF instructions.
1780 *
1781 * Return: whatever value is in %BPF_R0 at program exit
1782 */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)1783 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1784 {
1785 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1786 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1787 static const void * const jumptable[256] __annotate_jump_table = {
1788 [0 ... 255] = &&default_label,
1789 /* Now overwrite non-defaults ... */
1790 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1791 /* Non-UAPI available opcodes. */
1792 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1793 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1794 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1795 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1796 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1797 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1798 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1799 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1800 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1801 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1802 };
1803 #undef BPF_INSN_3_LBL
1804 #undef BPF_INSN_2_LBL
1805 u32 tail_call_cnt = 0;
1806
1807 #define CONT ({ insn++; goto select_insn; })
1808 #define CONT_JMP ({ insn++; goto select_insn; })
1809
1810 select_insn:
1811 goto *jumptable[insn->code];
1812
1813 /* Explicitly mask the register-based shift amounts with 63 or 31
1814 * to avoid undefined behavior. Normally this won't affect the
1815 * generated code, for example, in case of native 64 bit archs such
1816 * as x86-64 or arm64, the compiler is optimizing the AND away for
1817 * the interpreter. In case of JITs, each of the JIT backends compiles
1818 * the BPF shift operations to machine instructions which produce
1819 * implementation-defined results in such a case; the resulting
1820 * contents of the register may be arbitrary, but program behaviour
1821 * as a whole remains defined. In other words, in case of JIT backends,
1822 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1823 */
1824 /* ALU (shifts) */
1825 #define SHT(OPCODE, OP) \
1826 ALU64_##OPCODE##_X: \
1827 DST = DST OP (SRC & 63); \
1828 CONT; \
1829 ALU_##OPCODE##_X: \
1830 DST = (u32) DST OP ((u32) SRC & 31); \
1831 CONT; \
1832 ALU64_##OPCODE##_K: \
1833 DST = DST OP IMM; \
1834 CONT; \
1835 ALU_##OPCODE##_K: \
1836 DST = (u32) DST OP (u32) IMM; \
1837 CONT;
1838 /* ALU (rest) */
1839 #define ALU(OPCODE, OP) \
1840 ALU64_##OPCODE##_X: \
1841 DST = DST OP SRC; \
1842 CONT; \
1843 ALU_##OPCODE##_X: \
1844 DST = (u32) DST OP (u32) SRC; \
1845 CONT; \
1846 ALU64_##OPCODE##_K: \
1847 DST = DST OP IMM; \
1848 CONT; \
1849 ALU_##OPCODE##_K: \
1850 DST = (u32) DST OP (u32) IMM; \
1851 CONT;
1852 ALU(ADD, +)
1853 ALU(SUB, -)
1854 ALU(AND, &)
1855 ALU(OR, |)
1856 ALU(XOR, ^)
1857 ALU(MUL, *)
1858 SHT(LSH, <<)
1859 SHT(RSH, >>)
1860 #undef SHT
1861 #undef ALU
1862 ALU_NEG:
1863 DST = (u32) -DST;
1864 CONT;
1865 ALU64_NEG:
1866 DST = -DST;
1867 CONT;
1868 ALU_MOV_X:
1869 switch (OFF) {
1870 case 0:
1871 DST = (u32) SRC;
1872 break;
1873 case 8:
1874 DST = (u32)(s8) SRC;
1875 break;
1876 case 16:
1877 DST = (u32)(s16) SRC;
1878 break;
1879 }
1880 CONT;
1881 ALU_MOV_K:
1882 DST = (u32) IMM;
1883 CONT;
1884 ALU64_MOV_X:
1885 switch (OFF) {
1886 case 0:
1887 DST = SRC;
1888 break;
1889 case 8:
1890 DST = (s8) SRC;
1891 break;
1892 case 16:
1893 DST = (s16) SRC;
1894 break;
1895 case 32:
1896 DST = (s32) SRC;
1897 break;
1898 }
1899 CONT;
1900 ALU64_MOV_K:
1901 DST = IMM;
1902 CONT;
1903 LD_IMM_DW:
1904 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1905 insn++;
1906 CONT;
1907 ALU_ARSH_X:
1908 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1909 CONT;
1910 ALU_ARSH_K:
1911 DST = (u64) (u32) (((s32) DST) >> IMM);
1912 CONT;
1913 ALU64_ARSH_X:
1914 (*(s64 *) &DST) >>= (SRC & 63);
1915 CONT;
1916 ALU64_ARSH_K:
1917 (*(s64 *) &DST) >>= IMM;
1918 CONT;
1919 ALU64_MOD_X:
1920 switch (OFF) {
1921 case 0:
1922 div64_u64_rem(DST, SRC, &AX);
1923 DST = AX;
1924 break;
1925 case 1:
1926 AX = div64_s64(DST, SRC);
1927 DST = DST - AX * SRC;
1928 break;
1929 }
1930 CONT;
1931 ALU_MOD_X:
1932 switch (OFF) {
1933 case 0:
1934 AX = (u32) DST;
1935 DST = do_div(AX, (u32) SRC);
1936 break;
1937 case 1:
1938 AX = abs_s32((s32)DST);
1939 AX = do_div(AX, abs_s32((s32)SRC));
1940 if ((s32)DST < 0)
1941 DST = (u32)-AX;
1942 else
1943 DST = (u32)AX;
1944 break;
1945 }
1946 CONT;
1947 ALU64_MOD_K:
1948 switch (OFF) {
1949 case 0:
1950 div64_u64_rem(DST, IMM, &AX);
1951 DST = AX;
1952 break;
1953 case 1:
1954 AX = div64_s64(DST, IMM);
1955 DST = DST - AX * IMM;
1956 break;
1957 }
1958 CONT;
1959 ALU_MOD_K:
1960 switch (OFF) {
1961 case 0:
1962 AX = (u32) DST;
1963 DST = do_div(AX, (u32) IMM);
1964 break;
1965 case 1:
1966 AX = abs_s32((s32)DST);
1967 AX = do_div(AX, abs_s32((s32)IMM));
1968 if ((s32)DST < 0)
1969 DST = (u32)-AX;
1970 else
1971 DST = (u32)AX;
1972 break;
1973 }
1974 CONT;
1975 ALU64_DIV_X:
1976 switch (OFF) {
1977 case 0:
1978 DST = div64_u64(DST, SRC);
1979 break;
1980 case 1:
1981 DST = div64_s64(DST, SRC);
1982 break;
1983 }
1984 CONT;
1985 ALU_DIV_X:
1986 switch (OFF) {
1987 case 0:
1988 AX = (u32) DST;
1989 do_div(AX, (u32) SRC);
1990 DST = (u32) AX;
1991 break;
1992 case 1:
1993 AX = abs_s32((s32)DST);
1994 do_div(AX, abs_s32((s32)SRC));
1995 if (((s32)DST < 0) == ((s32)SRC < 0))
1996 DST = (u32)AX;
1997 else
1998 DST = (u32)-AX;
1999 break;
2000 }
2001 CONT;
2002 ALU64_DIV_K:
2003 switch (OFF) {
2004 case 0:
2005 DST = div64_u64(DST, IMM);
2006 break;
2007 case 1:
2008 DST = div64_s64(DST, IMM);
2009 break;
2010 }
2011 CONT;
2012 ALU_DIV_K:
2013 switch (OFF) {
2014 case 0:
2015 AX = (u32) DST;
2016 do_div(AX, (u32) IMM);
2017 DST = (u32) AX;
2018 break;
2019 case 1:
2020 AX = abs_s32((s32)DST);
2021 do_div(AX, abs_s32((s32)IMM));
2022 if (((s32)DST < 0) == ((s32)IMM < 0))
2023 DST = (u32)AX;
2024 else
2025 DST = (u32)-AX;
2026 break;
2027 }
2028 CONT;
2029 ALU_END_TO_BE:
2030 switch (IMM) {
2031 case 16:
2032 DST = (__force u16) cpu_to_be16(DST);
2033 break;
2034 case 32:
2035 DST = (__force u32) cpu_to_be32(DST);
2036 break;
2037 case 64:
2038 DST = (__force u64) cpu_to_be64(DST);
2039 break;
2040 }
2041 CONT;
2042 ALU_END_TO_LE:
2043 switch (IMM) {
2044 case 16:
2045 DST = (__force u16) cpu_to_le16(DST);
2046 break;
2047 case 32:
2048 DST = (__force u32) cpu_to_le32(DST);
2049 break;
2050 case 64:
2051 DST = (__force u64) cpu_to_le64(DST);
2052 break;
2053 }
2054 CONT;
2055 ALU64_END_TO_LE:
2056 switch (IMM) {
2057 case 16:
2058 DST = (__force u16) __swab16(DST);
2059 break;
2060 case 32:
2061 DST = (__force u32) __swab32(DST);
2062 break;
2063 case 64:
2064 DST = (__force u64) __swab64(DST);
2065 break;
2066 }
2067 CONT;
2068
2069 /* CALL */
2070 JMP_CALL:
2071 /* Function call scratches BPF_R1-BPF_R5 registers,
2072 * preserves BPF_R6-BPF_R9, and stores return value
2073 * into BPF_R0.
2074 */
2075 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
2076 BPF_R4, BPF_R5);
2077 CONT;
2078
2079 JMP_CALL_ARGS:
2080 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
2081 BPF_R3, BPF_R4,
2082 BPF_R5,
2083 insn + insn->off + 1);
2084 CONT;
2085
2086 JMP_TAIL_CALL: {
2087 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
2088 struct bpf_array *array = container_of(map, struct bpf_array, map);
2089 struct bpf_prog *prog;
2090 u32 index = BPF_R3;
2091
2092 if (unlikely(index >= array->map.max_entries))
2093 goto out;
2094
2095 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
2096 goto out;
2097
2098 prog = READ_ONCE(array->ptrs[index]);
2099 if (!prog)
2100 goto out;
2101
2102 tail_call_cnt++;
2103
2104 /* ARG1 at this point is guaranteed to point to CTX from
2105 * the verifier side due to the fact that the tail call is
2106 * handled like a helper, that is, bpf_tail_call_proto,
2107 * where arg1_type is ARG_PTR_TO_CTX.
2108 */
2109 insn = prog->insnsi;
2110 goto select_insn;
2111 out:
2112 CONT;
2113 }
2114 JMP_JA:
2115 insn += insn->off;
2116 CONT;
2117 JMP32_JA:
2118 insn += insn->imm;
2119 CONT;
2120 JMP_EXIT:
2121 return BPF_R0;
2122 /* JMP */
2123 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
2124 JMP_##OPCODE##_X: \
2125 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
2126 insn += insn->off; \
2127 CONT_JMP; \
2128 } \
2129 CONT; \
2130 JMP32_##OPCODE##_X: \
2131 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
2132 insn += insn->off; \
2133 CONT_JMP; \
2134 } \
2135 CONT; \
2136 JMP_##OPCODE##_K: \
2137 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
2138 insn += insn->off; \
2139 CONT_JMP; \
2140 } \
2141 CONT; \
2142 JMP32_##OPCODE##_K: \
2143 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
2144 insn += insn->off; \
2145 CONT_JMP; \
2146 } \
2147 CONT;
2148 COND_JMP(u, JEQ, ==)
2149 COND_JMP(u, JNE, !=)
2150 COND_JMP(u, JGT, >)
2151 COND_JMP(u, JLT, <)
2152 COND_JMP(u, JGE, >=)
2153 COND_JMP(u, JLE, <=)
2154 COND_JMP(u, JSET, &)
2155 COND_JMP(s, JSGT, >)
2156 COND_JMP(s, JSLT, <)
2157 COND_JMP(s, JSGE, >=)
2158 COND_JMP(s, JSLE, <=)
2159 #undef COND_JMP
2160 /* ST, STX and LDX*/
2161 ST_NOSPEC:
2162 /* Speculation barrier for mitigating Speculative Store Bypass,
2163 * Bounds-Check Bypass and Type Confusion. In case of arm64, we
2164 * rely on the firmware mitigation as controlled via the ssbd
2165 * kernel parameter. Whenever the mitigation is enabled, it
2166 * works for all of the kernel code with no need to provide any
2167 * additional instructions here. In case of x86, we use 'lfence'
2168 * insn for mitigation. We reuse preexisting logic from Spectre
2169 * v1 mitigation that happens to produce the required code on
2170 * x86 for v4 as well.
2171 */
2172 barrier_nospec();
2173 CONT;
2174 #define LDST(SIZEOP, SIZE) \
2175 STX_MEM_##SIZEOP: \
2176 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
2177 CONT; \
2178 ST_MEM_##SIZEOP: \
2179 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
2180 CONT; \
2181 LDX_MEM_##SIZEOP: \
2182 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2183 CONT; \
2184 LDX_PROBE_MEM_##SIZEOP: \
2185 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2186 (const void *)(long) (SRC + insn->off)); \
2187 DST = *((SIZE *)&DST); \
2188 CONT;
2189
2190 LDST(B, u8)
2191 LDST(H, u16)
2192 LDST(W, u32)
2193 LDST(DW, u64)
2194 #undef LDST
2195
2196 #define LDSX(SIZEOP, SIZE) \
2197 LDX_MEMSX_##SIZEOP: \
2198 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2199 CONT; \
2200 LDX_PROBE_MEMSX_##SIZEOP: \
2201 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2202 (const void *)(long) (SRC + insn->off)); \
2203 DST = *((SIZE *)&DST); \
2204 CONT;
2205
2206 LDSX(B, s8)
2207 LDSX(H, s16)
2208 LDSX(W, s32)
2209 #undef LDSX
2210
2211 #define ATOMIC_ALU_OP(BOP, KOP) \
2212 case BOP: \
2213 if (BPF_SIZE(insn->code) == BPF_W) \
2214 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2215 (DST + insn->off)); \
2216 else if (BPF_SIZE(insn->code) == BPF_DW) \
2217 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2218 (DST + insn->off)); \
2219 else \
2220 goto default_label; \
2221 break; \
2222 case BOP | BPF_FETCH: \
2223 if (BPF_SIZE(insn->code) == BPF_W) \
2224 SRC = (u32) atomic_fetch_##KOP( \
2225 (u32) SRC, \
2226 (atomic_t *)(unsigned long) (DST + insn->off)); \
2227 else if (BPF_SIZE(insn->code) == BPF_DW) \
2228 SRC = (u64) atomic64_fetch_##KOP( \
2229 (u64) SRC, \
2230 (atomic64_t *)(unsigned long) (DST + insn->off)); \
2231 else \
2232 goto default_label; \
2233 break;
2234
2235 STX_ATOMIC_DW:
2236 STX_ATOMIC_W:
2237 STX_ATOMIC_H:
2238 STX_ATOMIC_B:
2239 switch (IMM) {
2240 /* Atomic read-modify-write instructions support only W and DW
2241 * size modifiers.
2242 */
2243 ATOMIC_ALU_OP(BPF_ADD, add)
2244 ATOMIC_ALU_OP(BPF_AND, and)
2245 ATOMIC_ALU_OP(BPF_OR, or)
2246 ATOMIC_ALU_OP(BPF_XOR, xor)
2247 #undef ATOMIC_ALU_OP
2248
2249 case BPF_XCHG:
2250 if (BPF_SIZE(insn->code) == BPF_W)
2251 SRC = (u32) atomic_xchg(
2252 (atomic_t *)(unsigned long) (DST + insn->off),
2253 (u32) SRC);
2254 else if (BPF_SIZE(insn->code) == BPF_DW)
2255 SRC = (u64) atomic64_xchg(
2256 (atomic64_t *)(unsigned long) (DST + insn->off),
2257 (u64) SRC);
2258 else
2259 goto default_label;
2260 break;
2261 case BPF_CMPXCHG:
2262 if (BPF_SIZE(insn->code) == BPF_W)
2263 BPF_R0 = (u32) atomic_cmpxchg(
2264 (atomic_t *)(unsigned long) (DST + insn->off),
2265 (u32) BPF_R0, (u32) SRC);
2266 else if (BPF_SIZE(insn->code) == BPF_DW)
2267 BPF_R0 = (u64) atomic64_cmpxchg(
2268 (atomic64_t *)(unsigned long) (DST + insn->off),
2269 (u64) BPF_R0, (u64) SRC);
2270 else
2271 goto default_label;
2272 break;
2273 /* Atomic load and store instructions support all size
2274 * modifiers.
2275 */
2276 case BPF_LOAD_ACQ:
2277 switch (BPF_SIZE(insn->code)) {
2278 #define LOAD_ACQUIRE(SIZEOP, SIZE) \
2279 case BPF_##SIZEOP: \
2280 DST = (SIZE)smp_load_acquire( \
2281 (SIZE *)(unsigned long)(SRC + insn->off)); \
2282 break;
2283 LOAD_ACQUIRE(B, u8)
2284 LOAD_ACQUIRE(H, u16)
2285 LOAD_ACQUIRE(W, u32)
2286 #ifdef CONFIG_64BIT
2287 LOAD_ACQUIRE(DW, u64)
2288 #endif
2289 #undef LOAD_ACQUIRE
2290 default:
2291 goto default_label;
2292 }
2293 break;
2294 case BPF_STORE_REL:
2295 switch (BPF_SIZE(insn->code)) {
2296 #define STORE_RELEASE(SIZEOP, SIZE) \
2297 case BPF_##SIZEOP: \
2298 smp_store_release( \
2299 (SIZE *)(unsigned long)(DST + insn->off), (SIZE)SRC); \
2300 break;
2301 STORE_RELEASE(B, u8)
2302 STORE_RELEASE(H, u16)
2303 STORE_RELEASE(W, u32)
2304 #ifdef CONFIG_64BIT
2305 STORE_RELEASE(DW, u64)
2306 #endif
2307 #undef STORE_RELEASE
2308 default:
2309 goto default_label;
2310 }
2311 break;
2312
2313 default:
2314 goto default_label;
2315 }
2316 CONT;
2317
2318 default_label:
2319 /* If we ever reach this, we have a bug somewhere. Die hard here
2320 * instead of just returning 0; we could be somewhere in a subprog,
2321 * so execution could continue otherwise which we do /not/ want.
2322 *
2323 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2324 */
2325 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2326 insn->code, insn->imm);
2327 BUG_ON(1);
2328 return 0;
2329 }
2330
2331 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2332 #define DEFINE_BPF_PROG_RUN(stack_size) \
2333 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2334 { \
2335 u64 stack[stack_size / sizeof(u64)]; \
2336 u64 regs[MAX_BPF_EXT_REG] = {}; \
2337 \
2338 kmsan_unpoison_memory(stack, sizeof(stack)); \
2339 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2340 ARG1 = (u64) (unsigned long) ctx; \
2341 return ___bpf_prog_run(regs, insn); \
2342 }
2343
2344 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2345 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2346 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2347 const struct bpf_insn *insn) \
2348 { \
2349 u64 stack[stack_size / sizeof(u64)]; \
2350 u64 regs[MAX_BPF_EXT_REG]; \
2351 \
2352 kmsan_unpoison_memory(stack, sizeof(stack)); \
2353 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2354 BPF_R1 = r1; \
2355 BPF_R2 = r2; \
2356 BPF_R3 = r3; \
2357 BPF_R4 = r4; \
2358 BPF_R5 = r5; \
2359 return ___bpf_prog_run(regs, insn); \
2360 }
2361
2362 #define EVAL1(FN, X) FN(X)
2363 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2364 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2365 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2366 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2367 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2368
2369 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2370 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2371 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2372
2373 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2374 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2375 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2376
2377 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2378
2379 static unsigned int (*interpreters[])(const void *ctx,
2380 const struct bpf_insn *insn) = {
2381 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2382 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2383 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2384 };
2385 #undef PROG_NAME_LIST
2386 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2387 static __maybe_unused
2388 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2389 const struct bpf_insn *insn) = {
2390 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2391 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2392 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2393 };
2394 #undef PROG_NAME_LIST
2395
2396 #ifdef CONFIG_BPF_SYSCALL
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)2397 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2398 {
2399 stack_depth = max_t(u32, stack_depth, 1);
2400 insn->off = (s16) insn->imm;
2401 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2402 __bpf_call_base_args;
2403 insn->code = BPF_JMP | BPF_CALL_ARGS;
2404 }
2405 #endif
2406 #endif
2407
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)2408 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2409 const struct bpf_insn *insn)
2410 {
2411 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2412 * is not working properly, so warn about it!
2413 */
2414 WARN_ON_ONCE(1);
2415 return 0;
2416 }
2417
__bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2418 static bool __bpf_prog_map_compatible(struct bpf_map *map,
2419 const struct bpf_prog *fp)
2420 {
2421 enum bpf_prog_type prog_type = resolve_prog_type(fp);
2422 struct bpf_prog_aux *aux = fp->aux;
2423 enum bpf_cgroup_storage_type i;
2424 bool ret = false;
2425 u64 cookie;
2426
2427 if (fp->kprobe_override)
2428 return ret;
2429
2430 spin_lock(&map->owner_lock);
2431 /* There's no owner yet where we could check for compatibility. */
2432 if (!map->owner) {
2433 map->owner = bpf_map_owner_alloc(map);
2434 if (!map->owner)
2435 goto err;
2436 map->owner->type = prog_type;
2437 map->owner->jited = fp->jited;
2438 map->owner->xdp_has_frags = aux->xdp_has_frags;
2439 map->owner->sleepable = fp->sleepable;
2440 map->owner->expected_attach_type = fp->expected_attach_type;
2441 map->owner->attach_func_proto = aux->attach_func_proto;
2442 for_each_cgroup_storage_type(i) {
2443 map->owner->storage_cookie[i] =
2444 aux->cgroup_storage[i] ?
2445 aux->cgroup_storage[i]->cookie : 0;
2446 }
2447 ret = true;
2448 } else {
2449 ret = map->owner->type == prog_type &&
2450 map->owner->jited == fp->jited &&
2451 map->owner->xdp_has_frags == aux->xdp_has_frags &&
2452 map->owner->sleepable == fp->sleepable;
2453 if (ret &&
2454 map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
2455 map->owner->expected_attach_type != fp->expected_attach_type)
2456 ret = false;
2457 for_each_cgroup_storage_type(i) {
2458 if (!ret)
2459 break;
2460 cookie = aux->cgroup_storage[i] ?
2461 aux->cgroup_storage[i]->cookie : 0;
2462 ret = map->owner->storage_cookie[i] == cookie ||
2463 !cookie;
2464 }
2465 if (ret &&
2466 map->owner->attach_func_proto != aux->attach_func_proto) {
2467 switch (prog_type) {
2468 case BPF_PROG_TYPE_TRACING:
2469 case BPF_PROG_TYPE_LSM:
2470 case BPF_PROG_TYPE_EXT:
2471 case BPF_PROG_TYPE_STRUCT_OPS:
2472 ret = false;
2473 break;
2474 default:
2475 break;
2476 }
2477 }
2478 }
2479 err:
2480 spin_unlock(&map->owner_lock);
2481 return ret;
2482 }
2483
bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2484 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp)
2485 {
2486 /* XDP programs inserted into maps are not guaranteed to run on
2487 * a particular netdev (and can run outside driver context entirely
2488 * in the case of devmap and cpumap). Until device checks
2489 * are implemented, prohibit adding dev-bound programs to program maps.
2490 */
2491 if (bpf_prog_is_dev_bound(fp->aux))
2492 return false;
2493
2494 return __bpf_prog_map_compatible(map, fp);
2495 }
2496
bpf_check_tail_call(const struct bpf_prog * fp)2497 static int bpf_check_tail_call(const struct bpf_prog *fp)
2498 {
2499 struct bpf_prog_aux *aux = fp->aux;
2500 int i, ret = 0;
2501
2502 mutex_lock(&aux->used_maps_mutex);
2503 for (i = 0; i < aux->used_map_cnt; i++) {
2504 struct bpf_map *map = aux->used_maps[i];
2505
2506 if (!map_type_contains_progs(map))
2507 continue;
2508
2509 if (!__bpf_prog_map_compatible(map, fp)) {
2510 ret = -EINVAL;
2511 goto out;
2512 }
2513 }
2514
2515 out:
2516 mutex_unlock(&aux->used_maps_mutex);
2517 return ret;
2518 }
2519
bpf_prog_select_interpreter(struct bpf_prog * fp)2520 static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
2521 {
2522 bool select_interpreter = false;
2523 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2524 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2525 u32 idx = (round_up(stack_depth, 32) / 32) - 1;
2526
2527 /* may_goto may cause stack size > 512, leading to idx out-of-bounds.
2528 * But for non-JITed programs, we don't need bpf_func, so no bounds
2529 * check needed.
2530 */
2531 if (idx < ARRAY_SIZE(interpreters)) {
2532 fp->bpf_func = interpreters[idx];
2533 select_interpreter = true;
2534 } else {
2535 fp->bpf_func = __bpf_prog_ret0_warn;
2536 }
2537 #else
2538 fp->bpf_func = __bpf_prog_ret0_warn;
2539 #endif
2540 return select_interpreter;
2541 }
2542
bpf_prog_jit_compile(struct bpf_verifier_env * env,struct bpf_prog * prog)2543 static struct bpf_prog *bpf_prog_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
2544 {
2545 #ifdef CONFIG_BPF_JIT
2546 struct bpf_prog *orig_prog;
2547 struct bpf_insn_aux_data *orig_insn_aux;
2548
2549 if (!bpf_prog_need_blind(prog))
2550 return bpf_int_jit_compile(env, prog);
2551
2552 if (env) {
2553 /*
2554 * If env is not NULL, we are called from the end of bpf_check(), at this
2555 * point, only insn_aux_data is used after failure, so it should be restored
2556 * on failure.
2557 */
2558 orig_insn_aux = bpf_dup_insn_aux_data(env);
2559 if (!orig_insn_aux)
2560 return prog;
2561 }
2562
2563 orig_prog = prog;
2564 prog = bpf_jit_blind_constants(env, prog);
2565 /*
2566 * If blinding was requested and we failed during blinding, we must fall
2567 * back to the interpreter.
2568 */
2569 if (IS_ERR(prog))
2570 goto out_restore;
2571
2572 prog = bpf_int_jit_compile(env, prog);
2573 if (prog->jited) {
2574 bpf_jit_prog_release_other(prog, orig_prog);
2575 if (env)
2576 vfree(orig_insn_aux);
2577 return prog;
2578 }
2579
2580 bpf_jit_prog_release_other(orig_prog, prog);
2581
2582 out_restore:
2583 prog = orig_prog;
2584 if (env)
2585 bpf_restore_insn_aux_data(env, orig_insn_aux);
2586 #endif
2587 return prog;
2588 }
2589
__bpf_prog_select_runtime(struct bpf_verifier_env * env,struct bpf_prog * fp,int * err)2590 struct bpf_prog *__bpf_prog_select_runtime(struct bpf_verifier_env *env, struct bpf_prog *fp,
2591 int *err)
2592 {
2593 /* In case of BPF to BPF calls, verifier did all the prep
2594 * work with regards to JITing, etc.
2595 */
2596 bool jit_needed = false;
2597
2598 if (fp->bpf_func)
2599 goto finalize;
2600
2601 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2602 bpf_prog_has_kfunc_call(fp))
2603 jit_needed = true;
2604
2605 if (!bpf_prog_select_interpreter(fp))
2606 jit_needed = true;
2607
2608 /* eBPF JITs can rewrite the program in case constant
2609 * blinding is active. However, in case of error during
2610 * blinding, bpf_int_jit_compile() must always return a
2611 * valid program, which in this case would simply not
2612 * be JITed, but falls back to the interpreter.
2613 */
2614 if (!bpf_prog_is_offloaded(fp->aux)) {
2615 *err = bpf_prog_alloc_jited_linfo(fp);
2616 if (*err)
2617 return fp;
2618
2619 fp = bpf_prog_jit_compile(env, fp);
2620 bpf_prog_jit_attempt_done(fp);
2621 if (!fp->jited && jit_needed) {
2622 *err = -ENOTSUPP;
2623 return fp;
2624 }
2625 } else {
2626 *err = bpf_prog_offload_compile(fp);
2627 if (*err)
2628 return fp;
2629 }
2630
2631 finalize:
2632 *err = bpf_prog_lock_ro(fp);
2633 if (*err)
2634 return fp;
2635
2636 /* The tail call compatibility check can only be done at
2637 * this late stage as we need to determine, if we deal
2638 * with JITed or non JITed program concatenations and not
2639 * all eBPF JITs might immediately support all features.
2640 */
2641 *err = bpf_check_tail_call(fp);
2642
2643 return fp;
2644 }
2645
2646 /**
2647 * bpf_prog_select_runtime - select exec runtime for BPF program
2648 * @fp: bpf_prog populated with BPF program
2649 * @err: pointer to error variable
2650 *
2651 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2652 * The BPF program will be executed via bpf_prog_run() function.
2653 *
2654 * Return: the &fp argument along with &err set to 0 for success or
2655 * a negative errno code on failure
2656 */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)2657 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2658 {
2659 return __bpf_prog_select_runtime(NULL, fp, err);
2660 }
2661 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2662
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)2663 static unsigned int __bpf_prog_ret1(const void *ctx,
2664 const struct bpf_insn *insn)
2665 {
2666 return 1;
2667 }
2668
2669 static struct bpf_prog_dummy {
2670 struct bpf_prog prog;
2671 } dummy_bpf_prog = {
2672 .prog = {
2673 .bpf_func = __bpf_prog_ret1,
2674 },
2675 };
2676
2677 struct bpf_prog_array bpf_empty_prog_array = {
2678 .items = {
2679 { .prog = NULL },
2680 },
2681 };
2682 EXPORT_SYMBOL(bpf_empty_prog_array);
2683
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)2684 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2685 {
2686 struct bpf_prog_array *p;
2687
2688 if (prog_cnt)
2689 p = kzalloc_flex(*p, items, prog_cnt + 1, flags);
2690 else
2691 p = &bpf_empty_prog_array;
2692
2693 return p;
2694 }
2695
bpf_prog_array_free(struct bpf_prog_array * progs)2696 void bpf_prog_array_free(struct bpf_prog_array *progs)
2697 {
2698 if (!progs || progs == &bpf_empty_prog_array)
2699 return;
2700 kfree_rcu(progs, rcu);
2701 }
2702
__bpf_prog_array_free_sleepable_cb(struct rcu_head * rcu)2703 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2704 {
2705 struct bpf_prog_array *progs;
2706
2707 /*
2708 * RCU Tasks Trace grace period implies RCU grace period, there is no
2709 * need to call kfree_rcu(), just call kfree() directly.
2710 */
2711 progs = container_of(rcu, struct bpf_prog_array, rcu);
2712 kfree(progs);
2713 }
2714
bpf_prog_array_free_sleepable(struct bpf_prog_array * progs)2715 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2716 {
2717 if (!progs || progs == &bpf_empty_prog_array)
2718 return;
2719 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2720 }
2721
bpf_prog_array_length(struct bpf_prog_array * array)2722 int bpf_prog_array_length(struct bpf_prog_array *array)
2723 {
2724 struct bpf_prog_array_item *item;
2725 u32 cnt = 0;
2726
2727 for (item = array->items; item->prog; item++)
2728 if (item->prog != &dummy_bpf_prog.prog)
2729 cnt++;
2730 return cnt;
2731 }
2732
bpf_prog_array_is_empty(struct bpf_prog_array * array)2733 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2734 {
2735 struct bpf_prog_array_item *item;
2736
2737 for (item = array->items; item->prog; item++)
2738 if (item->prog != &dummy_bpf_prog.prog)
2739 return false;
2740 return true;
2741 }
2742
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)2743 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2744 u32 *prog_ids,
2745 u32 request_cnt)
2746 {
2747 struct bpf_prog_array_item *item;
2748 int i = 0;
2749
2750 for (item = array->items; item->prog; item++) {
2751 if (item->prog == &dummy_bpf_prog.prog)
2752 continue;
2753 prog_ids[i] = item->prog->aux->id;
2754 if (++i == request_cnt) {
2755 item++;
2756 break;
2757 }
2758 }
2759
2760 return !!(item->prog);
2761 }
2762
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)2763 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2764 __u32 __user *prog_ids, u32 cnt)
2765 {
2766 unsigned long err = 0;
2767 bool nospc;
2768 u32 *ids;
2769
2770 /* users of this function are doing:
2771 * cnt = bpf_prog_array_length();
2772 * if (cnt > 0)
2773 * bpf_prog_array_copy_to_user(..., cnt);
2774 * so below kcalloc doesn't need extra cnt > 0 check.
2775 */
2776 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2777 if (!ids)
2778 return -ENOMEM;
2779 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2780 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2781 kfree(ids);
2782 if (err)
2783 return -EFAULT;
2784 if (nospc)
2785 return -ENOSPC;
2786 return 0;
2787 }
2788
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2789 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2790 struct bpf_prog *old_prog)
2791 {
2792 struct bpf_prog_array_item *item;
2793
2794 for (item = array->items; item->prog; item++)
2795 if (item->prog == old_prog) {
2796 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2797 break;
2798 }
2799 }
2800
2801 /**
2802 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2803 * index into the program array with
2804 * a dummy no-op program.
2805 * @array: a bpf_prog_array
2806 * @index: the index of the program to replace
2807 *
2808 * Skips over dummy programs, by not counting them, when calculating
2809 * the position of the program to replace.
2810 *
2811 * Return:
2812 * * 0 - Success
2813 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2814 * * -ENOENT - Index out of range
2815 */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2816 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2817 {
2818 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2819 }
2820
2821 /**
2822 * bpf_prog_array_update_at() - Updates the program at the given index
2823 * into the program array.
2824 * @array: a bpf_prog_array
2825 * @index: the index of the program to update
2826 * @prog: the program to insert into the array
2827 *
2828 * Skips over dummy programs, by not counting them, when calculating
2829 * the position of the program to update.
2830 *
2831 * Return:
2832 * * 0 - Success
2833 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2834 * * -ENOENT - Index out of range
2835 */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2836 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2837 struct bpf_prog *prog)
2838 {
2839 struct bpf_prog_array_item *item;
2840
2841 if (unlikely(index < 0))
2842 return -EINVAL;
2843
2844 for (item = array->items; item->prog; item++) {
2845 if (item->prog == &dummy_bpf_prog.prog)
2846 continue;
2847 if (!index) {
2848 WRITE_ONCE(item->prog, prog);
2849 return 0;
2850 }
2851 index--;
2852 }
2853 return -ENOENT;
2854 }
2855
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,u64 bpf_cookie,struct bpf_prog_array ** new_array)2856 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2857 struct bpf_prog *exclude_prog,
2858 struct bpf_prog *include_prog,
2859 u64 bpf_cookie,
2860 struct bpf_prog_array **new_array)
2861 {
2862 int new_prog_cnt, carry_prog_cnt = 0;
2863 struct bpf_prog_array_item *existing, *new;
2864 struct bpf_prog_array *array;
2865 bool found_exclude = false;
2866
2867 /* Figure out how many existing progs we need to carry over to
2868 * the new array.
2869 */
2870 if (old_array) {
2871 existing = old_array->items;
2872 for (; existing->prog; existing++) {
2873 if (existing->prog == exclude_prog) {
2874 found_exclude = true;
2875 continue;
2876 }
2877 if (existing->prog != &dummy_bpf_prog.prog)
2878 carry_prog_cnt++;
2879 if (existing->prog == include_prog)
2880 return -EEXIST;
2881 }
2882 }
2883
2884 if (exclude_prog && !found_exclude)
2885 return -ENOENT;
2886
2887 /* How many progs (not NULL) will be in the new array? */
2888 new_prog_cnt = carry_prog_cnt;
2889 if (include_prog)
2890 new_prog_cnt += 1;
2891
2892 /* Do we have any prog (not NULL) in the new array? */
2893 if (!new_prog_cnt) {
2894 *new_array = NULL;
2895 return 0;
2896 }
2897
2898 /* +1 as the end of prog_array is marked with NULL */
2899 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2900 if (!array)
2901 return -ENOMEM;
2902 new = array->items;
2903
2904 /* Fill in the new prog array */
2905 if (carry_prog_cnt) {
2906 existing = old_array->items;
2907 for (; existing->prog; existing++) {
2908 if (existing->prog == exclude_prog ||
2909 existing->prog == &dummy_bpf_prog.prog)
2910 continue;
2911
2912 new->prog = existing->prog;
2913 new->bpf_cookie = existing->bpf_cookie;
2914 new++;
2915 }
2916 }
2917 if (include_prog) {
2918 new->prog = include_prog;
2919 new->bpf_cookie = bpf_cookie;
2920 new++;
2921 }
2922 new->prog = NULL;
2923 *new_array = array;
2924 return 0;
2925 }
2926
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2927 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2928 u32 *prog_ids, u32 request_cnt,
2929 u32 *prog_cnt)
2930 {
2931 u32 cnt = 0;
2932
2933 if (array)
2934 cnt = bpf_prog_array_length(array);
2935
2936 *prog_cnt = cnt;
2937
2938 /* return early if user requested only program count or nothing to copy */
2939 if (!request_cnt || !cnt)
2940 return 0;
2941
2942 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2943 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2944 : 0;
2945 }
2946
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2947 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2948 struct bpf_map **used_maps, u32 len)
2949 {
2950 struct bpf_map *map;
2951 bool sleepable;
2952 u32 i;
2953
2954 sleepable = aux->prog->sleepable;
2955 for (i = 0; i < len; i++) {
2956 map = used_maps[i];
2957 if (map->ops->map_poke_untrack)
2958 map->ops->map_poke_untrack(map, aux);
2959 if (sleepable)
2960 atomic64_dec(&map->sleepable_refcnt);
2961 bpf_map_put(map);
2962 }
2963 }
2964
bpf_free_used_maps(struct bpf_prog_aux * aux)2965 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2966 {
2967 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2968 kfree(aux->used_maps);
2969 }
2970
__bpf_free_used_btfs(struct btf_mod_pair * used_btfs,u32 len)2971 void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len)
2972 {
2973 #ifdef CONFIG_BPF_SYSCALL
2974 struct btf_mod_pair *btf_mod;
2975 u32 i;
2976
2977 for (i = 0; i < len; i++) {
2978 btf_mod = &used_btfs[i];
2979 if (btf_mod->module)
2980 module_put(btf_mod->module);
2981 btf_put(btf_mod->btf);
2982 }
2983 #endif
2984 }
2985
bpf_free_used_btfs(struct bpf_prog_aux * aux)2986 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2987 {
2988 __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt);
2989 kfree(aux->used_btfs);
2990 }
2991
bpf_prog_free_deferred(struct work_struct * work)2992 static void bpf_prog_free_deferred(struct work_struct *work)
2993 {
2994 struct bpf_prog_aux *aux;
2995 int i;
2996
2997 aux = container_of(work, struct bpf_prog_aux, work);
2998 #ifdef CONFIG_BPF_SYSCALL
2999 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
3000 bpf_prog_stream_free(aux->prog);
3001 #endif
3002 #ifdef CONFIG_CGROUP_BPF
3003 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
3004 bpf_cgroup_atype_put(aux->cgroup_atype);
3005 #endif
3006 bpf_free_used_maps(aux);
3007 bpf_free_used_btfs(aux);
3008 bpf_prog_disassoc_struct_ops(aux->prog);
3009 if (bpf_prog_is_dev_bound(aux))
3010 bpf_prog_dev_bound_destroy(aux->prog);
3011 #ifdef CONFIG_PERF_EVENTS
3012 if (aux->prog->has_callchain_buf)
3013 put_callchain_buffers();
3014 #endif
3015 if (aux->dst_trampoline)
3016 bpf_trampoline_put(aux->dst_trampoline);
3017 for (i = 0; i < aux->real_func_cnt; i++) {
3018 /* We can just unlink the subprog poke descriptor table as
3019 * it was originally linked to the main program and is also
3020 * released along with it.
3021 */
3022 aux->func[i]->aux->poke_tab = NULL;
3023 bpf_jit_free(aux->func[i]);
3024 }
3025 if (aux->real_func_cnt) {
3026 kfree(aux->func);
3027 bpf_prog_unlock_free(aux->prog);
3028 } else {
3029 bpf_jit_free(aux->prog);
3030 }
3031 }
3032
bpf_prog_free(struct bpf_prog * fp)3033 void bpf_prog_free(struct bpf_prog *fp)
3034 {
3035 struct bpf_prog_aux *aux = fp->aux;
3036
3037 if (aux->dst_prog)
3038 bpf_prog_put(aux->dst_prog);
3039 bpf_token_put(aux->token);
3040 INIT_WORK(&aux->work, bpf_prog_free_deferred);
3041 schedule_work(&aux->work);
3042 }
3043 EXPORT_SYMBOL_GPL(bpf_prog_free);
3044
3045 /* RNG for unprivileged user space with separated state from prandom_u32(). */
3046 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
3047
bpf_user_rnd_init_once(void)3048 void bpf_user_rnd_init_once(void)
3049 {
3050 prandom_init_once(&bpf_user_rnd_state);
3051 }
3052
BPF_CALL_0(bpf_user_rnd_u32)3053 BPF_CALL_0(bpf_user_rnd_u32)
3054 {
3055 /* Should someone ever have the rather unwise idea to use some
3056 * of the registers passed into this function, then note that
3057 * this function is called from native eBPF and classic-to-eBPF
3058 * transformations. Register assignments from both sides are
3059 * different, f.e. classic always sets fn(ctx, A, X) here.
3060 */
3061 struct rnd_state *state;
3062 u32 res;
3063
3064 state = &get_cpu_var(bpf_user_rnd_state);
3065 res = prandom_u32_state(state);
3066 put_cpu_var(bpf_user_rnd_state);
3067
3068 return res;
3069 }
3070
BPF_CALL_0(bpf_get_raw_cpu_id)3071 BPF_CALL_0(bpf_get_raw_cpu_id)
3072 {
3073 return raw_smp_processor_id();
3074 }
3075
3076 /* Weak definitions of helper functions in case we don't have bpf syscall. */
3077 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
3078 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
3079 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
3080 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
3081 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
3082 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
3083 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
3084 const struct bpf_func_proto bpf_spin_lock_proto __weak;
3085 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
3086 const struct bpf_func_proto bpf_jiffies64_proto __weak;
3087
3088 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
3089 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
3090 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
3091 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
3092 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
3093 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
3094 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
3095
3096 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
3097 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
3098 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
3099 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
3100 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
3101 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
3102 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
3103 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
3104 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
3105 const struct bpf_func_proto bpf_set_retval_proto __weak;
3106 const struct bpf_func_proto bpf_get_retval_proto __weak;
3107
bpf_get_trace_printk_proto(void)3108 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
3109 {
3110 return NULL;
3111 }
3112
bpf_get_trace_vprintk_proto(void)3113 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
3114 {
3115 return NULL;
3116 }
3117
bpf_get_perf_event_read_value_proto(void)3118 const struct bpf_func_proto * __weak bpf_get_perf_event_read_value_proto(void)
3119 {
3120 return NULL;
3121 }
3122
3123 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)3124 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
3125 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
3126 {
3127 return -ENOTSUPP;
3128 }
3129 EXPORT_SYMBOL_GPL(bpf_event_output);
3130
3131 /* Always built-in helper functions. */
3132 const struct bpf_func_proto bpf_tail_call_proto = {
3133 /* func is unused for tail_call, we set it to pass the
3134 * get_helper_proto check
3135 */
3136 .func = BPF_PTR_POISON,
3137 .gpl_only = false,
3138 .ret_type = RET_VOID,
3139 .arg1_type = ARG_PTR_TO_CTX,
3140 .arg2_type = ARG_CONST_MAP_PTR,
3141 .arg3_type = ARG_ANYTHING,
3142 };
3143
3144 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
3145 * It is encouraged to implement bpf_int_jit_compile() instead, so that
3146 * eBPF and implicitly also cBPF can get JITed!
3147 */
bpf_int_jit_compile(struct bpf_verifier_env * env,struct bpf_prog * prog)3148 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
3149 {
3150 return prog;
3151 }
3152
3153 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
3154 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
3155 */
bpf_jit_compile(struct bpf_prog * prog)3156 void __weak bpf_jit_compile(struct bpf_prog *prog)
3157 {
3158 }
3159
bpf_helper_changes_pkt_data(enum bpf_func_id func_id)3160 bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
3161 {
3162 return false;
3163 }
3164
3165 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
3166 * analysis code and wants explicit zero extension inserted by verifier.
3167 * Otherwise, return FALSE.
3168 *
3169 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
3170 * you don't override this. JITs that don't want these extra insns can detect
3171 * them using insn_is_zext.
3172 */
bpf_jit_needs_zext(void)3173 bool __weak bpf_jit_needs_zext(void)
3174 {
3175 return false;
3176 }
3177
3178 /* By default, enable the verifier's mitigations against Spectre v1 and v4 for
3179 * all archs. The value returned must not change at runtime as there is
3180 * currently no support for reloading programs that were loaded without
3181 * mitigations.
3182 */
bpf_jit_bypass_spec_v1(void)3183 bool __weak bpf_jit_bypass_spec_v1(void)
3184 {
3185 return false;
3186 }
3187
bpf_jit_bypass_spec_v4(void)3188 bool __weak bpf_jit_bypass_spec_v4(void)
3189 {
3190 return false;
3191 }
3192
3193 /* Return true if the JIT inlines the call to the helper corresponding to
3194 * the imm.
3195 *
3196 * The verifier will not patch the insn->imm for the call to the helper if
3197 * this returns true.
3198 */
bpf_jit_inlines_helper_call(s32 imm)3199 bool __weak bpf_jit_inlines_helper_call(s32 imm)
3200 {
3201 return false;
3202 }
3203
3204 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)3205 bool __weak bpf_jit_supports_subprog_tailcalls(void)
3206 {
3207 return false;
3208 }
3209
bpf_jit_supports_percpu_insn(void)3210 bool __weak bpf_jit_supports_percpu_insn(void)
3211 {
3212 return false;
3213 }
3214
bpf_jit_supports_kfunc_call(void)3215 bool __weak bpf_jit_supports_kfunc_call(void)
3216 {
3217 return false;
3218 }
3219
bpf_jit_supports_far_kfunc_call(void)3220 bool __weak bpf_jit_supports_far_kfunc_call(void)
3221 {
3222 return false;
3223 }
3224
bpf_jit_supports_arena(void)3225 bool __weak bpf_jit_supports_arena(void)
3226 {
3227 return false;
3228 }
3229
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)3230 bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
3231 {
3232 return false;
3233 }
3234
bpf_jit_supports_fsession(void)3235 bool __weak bpf_jit_supports_fsession(void)
3236 {
3237 return false;
3238 }
3239
bpf_arch_uaddress_limit(void)3240 u64 __weak bpf_arch_uaddress_limit(void)
3241 {
3242 #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
3243 return TASK_SIZE;
3244 #else
3245 return 0;
3246 #endif
3247 }
3248
3249 /* Return TRUE if the JIT backend satisfies the following two conditions:
3250 * 1) JIT backend supports atomic_xchg() on pointer-sized words.
3251 * 2) Under the specific arch, the implementation of xchg() is the same
3252 * as atomic_xchg() on pointer-sized words.
3253 */
bpf_jit_supports_ptr_xchg(void)3254 bool __weak bpf_jit_supports_ptr_xchg(void)
3255 {
3256 return false;
3257 }
3258
3259 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
3260 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
3261 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)3262 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
3263 int len)
3264 {
3265 return -EFAULT;
3266 }
3267
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type old_t,enum bpf_text_poke_type new_t,void * old_addr,void * new_addr)3268 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
3269 enum bpf_text_poke_type new_t, void *old_addr,
3270 void *new_addr)
3271 {
3272 return -ENOTSUPP;
3273 }
3274
bpf_arch_text_copy(void * dst,void * src,size_t len)3275 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
3276 {
3277 return ERR_PTR(-ENOTSUPP);
3278 }
3279
bpf_arch_text_invalidate(void * dst,size_t len)3280 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
3281 {
3282 return -ENOTSUPP;
3283 }
3284
bpf_jit_supports_exceptions(void)3285 bool __weak bpf_jit_supports_exceptions(void)
3286 {
3287 return false;
3288 }
3289
bpf_jit_supports_private_stack(void)3290 bool __weak bpf_jit_supports_private_stack(void)
3291 {
3292 return false;
3293 }
3294
arch_bpf_stack_walk(bool (* consume_fn)(void * cookie,u64 ip,u64 sp,u64 bp),void * cookie)3295 void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3296 {
3297 }
3298
bpf_jit_supports_timed_may_goto(void)3299 bool __weak bpf_jit_supports_timed_may_goto(void)
3300 {
3301 return false;
3302 }
3303
arch_bpf_timed_may_goto(void)3304 u64 __weak arch_bpf_timed_may_goto(void)
3305 {
3306 return 0;
3307 }
3308
bpf_prog_report_may_goto_violation(void)3309 static noinline void bpf_prog_report_may_goto_violation(void)
3310 {
3311 #ifdef CONFIG_BPF_SYSCALL
3312 struct bpf_stream_stage ss;
3313 struct bpf_prog *prog;
3314
3315 prog = bpf_prog_find_from_stack();
3316 if (!prog)
3317 return;
3318 bpf_stream_stage(ss, prog, BPF_STDERR, ({
3319 bpf_stream_printk(ss, "ERROR: Timeout detected for may_goto instruction\n");
3320 bpf_stream_dump_stack(ss);
3321 }));
3322 #endif
3323 }
3324
bpf_check_timed_may_goto(struct bpf_timed_may_goto * p)3325 u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *p)
3326 {
3327 u64 time = ktime_get_mono_fast_ns();
3328
3329 /* Populate the timestamp for this stack frame, and refresh count. */
3330 if (!p->timestamp) {
3331 p->timestamp = time;
3332 return BPF_MAX_TIMED_LOOPS;
3333 }
3334 /* Check if we've exhausted our time slice, and zero count. */
3335 if (unlikely(time - p->timestamp >= (NSEC_PER_SEC / 4))) {
3336 bpf_prog_report_may_goto_violation();
3337 return 0;
3338 }
3339 /* Refresh the count for the stack frame. */
3340 return BPF_MAX_TIMED_LOOPS;
3341 }
3342
3343 /* for configs without MMU or 32-bit */
3344 __weak const struct bpf_map_ops arena_map_ops;
bpf_arena_get_user_vm_start(struct bpf_arena * arena)3345 __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
3346 {
3347 return 0;
3348 }
bpf_arena_get_kern_vm_start(struct bpf_arena * arena)3349 __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
3350 {
3351 return 0;
3352 }
3353
3354 #ifdef CONFIG_BPF_SYSCALL
bpf_global_ma_init(void)3355 static int __init bpf_global_ma_init(void)
3356 {
3357 int ret;
3358
3359 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
3360 bpf_global_ma_set = !ret;
3361 return ret;
3362 }
3363 late_initcall(bpf_global_ma_init);
3364 #endif
3365
3366 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
3367 EXPORT_SYMBOL(bpf_stats_enabled_key);
3368
3369 /* All definitions of tracepoints related to BPF. */
3370 #define CREATE_TRACE_POINTS
3371 #include <linux/bpf_trace.h>
3372
3373 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
3374 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
3375
3376 #ifdef CONFIG_BPF_SYSCALL
3377
bpf_get_linfo_file_line(struct btf * btf,const struct bpf_line_info * linfo,const char ** filep,const char ** linep,int * nump)3378 void bpf_get_linfo_file_line(struct btf *btf, const struct bpf_line_info *linfo,
3379 const char **filep, const char **linep, int *nump)
3380 {
3381 /* Get base component of the file path. */
3382 if (filep) {
3383 *filep = btf_name_by_offset(btf, linfo->file_name_off);
3384 *filep = kbasename(*filep);
3385 }
3386
3387 /* Obtain the source line, and strip whitespace in prefix. */
3388 if (linep) {
3389 *linep = btf_name_by_offset(btf, linfo->line_off);
3390 while (isspace(**linep))
3391 *linep += 1;
3392 }
3393
3394 if (nump)
3395 *nump = BPF_LINE_INFO_LINE_NUM(linfo->line_col);
3396 }
3397
bpf_find_linfo(const struct bpf_prog * prog,u32 insn_off)3398 const struct bpf_line_info *bpf_find_linfo(const struct bpf_prog *prog, u32 insn_off)
3399 {
3400 const struct bpf_line_info *linfo;
3401 u32 nr_linfo;
3402 int l, r, m;
3403
3404 nr_linfo = prog->aux->nr_linfo;
3405 if (!nr_linfo || insn_off >= prog->len)
3406 return NULL;
3407
3408 linfo = prog->aux->linfo;
3409 /* Loop invariant: linfo[l].insn_off <= insns_off.
3410 * linfo[0].insn_off == 0 which always satisfies above condition.
3411 * Binary search is searching for rightmost linfo entry that satisfies
3412 * the above invariant, giving us the desired record that covers given
3413 * instruction offset.
3414 */
3415 l = 0;
3416 r = nr_linfo - 1;
3417 while (l < r) {
3418 /* (r - l + 1) / 2 means we break a tie to the right, so if:
3419 * l=1, r=2, linfo[l].insn_off <= insn_off, linfo[r].insn_off > insn_off,
3420 * then m=2, we see that linfo[m].insn_off > insn_off, and so
3421 * r becomes 1 and we exit the loop with correct l==1.
3422 * If the tie was broken to the left, m=1 would end us up in
3423 * an endless loop where l and m stay at 1 and r stays at 2.
3424 */
3425 m = l + (r - l + 1) / 2;
3426 if (linfo[m].insn_off <= insn_off)
3427 l = m;
3428 else
3429 r = m - 1;
3430 }
3431
3432 return &linfo[l];
3433 }
3434
bpf_prog_get_file_line(struct bpf_prog * prog,unsigned long ip,const char ** filep,const char ** linep,int * nump)3435 int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep,
3436 const char **linep, int *nump)
3437 {
3438 int idx = -1, insn_start, insn_end, len;
3439 struct bpf_line_info *linfo;
3440 void **jited_linfo;
3441 struct btf *btf;
3442 int nr_linfo;
3443
3444 btf = prog->aux->btf;
3445 linfo = prog->aux->linfo;
3446 jited_linfo = prog->aux->jited_linfo;
3447
3448 if (!btf || !linfo || !jited_linfo)
3449 return -EINVAL;
3450 len = prog->aux->func ? prog->aux->func[prog->aux->func_idx]->len : prog->len;
3451
3452 linfo = &prog->aux->linfo[prog->aux->linfo_idx];
3453 jited_linfo = &prog->aux->jited_linfo[prog->aux->linfo_idx];
3454
3455 insn_start = linfo[0].insn_off;
3456 insn_end = insn_start + len;
3457 nr_linfo = prog->aux->nr_linfo - prog->aux->linfo_idx;
3458
3459 for (int i = 0; i < nr_linfo &&
3460 linfo[i].insn_off >= insn_start && linfo[i].insn_off < insn_end; i++) {
3461 if (jited_linfo[i] >= (void *)ip)
3462 break;
3463 idx = i;
3464 }
3465
3466 if (idx == -1)
3467 return -ENOENT;
3468
3469 bpf_get_linfo_file_line(btf, &linfo[idx], filep, linep, nump);
3470 return 0;
3471 }
3472
3473 struct walk_stack_ctx {
3474 struct bpf_prog *prog;
3475 };
3476
find_from_stack_cb(void * cookie,u64 ip,u64 sp,u64 bp)3477 static bool find_from_stack_cb(void *cookie, u64 ip, u64 sp, u64 bp)
3478 {
3479 struct walk_stack_ctx *ctxp = cookie;
3480 struct bpf_prog *prog;
3481
3482 /*
3483 * The RCU read lock is held to safely traverse the latch tree, but we
3484 * don't need its protection when accessing the prog, since it has an
3485 * active stack frame on the current stack trace, and won't disappear.
3486 */
3487 rcu_read_lock();
3488 prog = bpf_prog_ksym_find(ip);
3489 rcu_read_unlock();
3490 if (!prog)
3491 return true;
3492 /* Make sure we return the main prog if we found a subprog */
3493 ctxp->prog = prog->aux->main_prog_aux->prog;
3494 return false;
3495 }
3496
bpf_prog_find_from_stack(void)3497 struct bpf_prog *bpf_prog_find_from_stack(void)
3498 {
3499 struct walk_stack_ctx ctx = {};
3500
3501 arch_bpf_stack_walk(find_from_stack_cb, &ctx);
3502 return ctx.prog;
3503 }
3504
3505 #endif
3506