1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20 #include <uapi/linux/btf.h>
21 #include <crypto/sha1.h>
22 #include <linux/filter.h>
23 #include <linux/skbuff.h>
24 #include <linux/vmalloc.h>
25 #include <linux/prandom.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/hex.h>
29 #include <linux/objtool.h>
30 #include <linux/overflow.h>
31 #include <linux/rbtree_latch.h>
32 #include <linux/kallsyms.h>
33 #include <linux/rcupdate.h>
34 #include <linux/perf_event.h>
35 #include <linux/extable.h>
36 #include <linux/log2.h>
37 #include <linux/bpf_verifier.h>
38 #include <linux/nodemask.h>
39 #include <linux/nospec.h>
40 #include <linux/bpf_mem_alloc.h>
41 #include <linux/memcontrol.h>
42 #include <linux/execmem.h>
43 #include <crypto/sha2.h>
44
45 #include <asm/barrier.h>
46 #include <linux/unaligned.h>
47
48 /* Registers */
49 #define BPF_R0 regs[BPF_REG_0]
50 #define BPF_R1 regs[BPF_REG_1]
51 #define BPF_R2 regs[BPF_REG_2]
52 #define BPF_R3 regs[BPF_REG_3]
53 #define BPF_R4 regs[BPF_REG_4]
54 #define BPF_R5 regs[BPF_REG_5]
55 #define BPF_R6 regs[BPF_REG_6]
56 #define BPF_R7 regs[BPF_REG_7]
57 #define BPF_R8 regs[BPF_REG_8]
58 #define BPF_R9 regs[BPF_REG_9]
59 #define BPF_R10 regs[BPF_REG_10]
60
61 /* Named registers */
62 #define DST regs[insn->dst_reg]
63 #define SRC regs[insn->src_reg]
64 #define FP regs[BPF_REG_FP]
65 #define AX regs[BPF_REG_AX]
66 #define ARG1 regs[BPF_REG_ARG1]
67 #define CTX regs[BPF_REG_CTX]
68 #define OFF insn->off
69 #define IMM insn->imm
70
71 struct bpf_mem_alloc bpf_global_ma;
72 bool bpf_global_ma_set;
73
74 /* No hurry in this branch
75 *
76 * Exported for the bpf jit load helper.
77 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)78 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
79 {
80 u8 *ptr = NULL;
81
82 if (k >= SKF_NET_OFF) {
83 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
84 } else if (k >= SKF_LL_OFF) {
85 if (unlikely(!skb_mac_header_was_set(skb)))
86 return NULL;
87 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
88 }
89 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
90 return ptr;
91
92 return NULL;
93 }
94
95 /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
96 enum page_size_enum {
97 __PAGE_SIZE = PAGE_SIZE
98 };
99
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)100 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
101 {
102 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
103 struct bpf_prog_aux *aux;
104 struct bpf_prog *fp;
105
106 size = round_up(size, __PAGE_SIZE);
107 fp = __vmalloc(size, gfp_flags);
108 if (fp == NULL)
109 return NULL;
110
111 aux = kzalloc_obj(*aux, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
112 if (aux == NULL) {
113 vfree(fp);
114 return NULL;
115 }
116 fp->active = __alloc_percpu_gfp(sizeof(u8[BPF_NR_CONTEXTS]), 4,
117 bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
118 if (!fp->active) {
119 vfree(fp);
120 kfree(aux);
121 return NULL;
122 }
123
124 fp->pages = size / PAGE_SIZE;
125 fp->aux = aux;
126 fp->aux->main_prog_aux = aux;
127 fp->aux->prog = fp;
128 fp->jit_requested = ebpf_jit_enabled();
129 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
130 #ifdef CONFIG_CGROUP_BPF
131 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
132 #endif
133
134 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
135 #ifdef CONFIG_FINEIBT
136 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
137 #endif
138 mutex_init(&fp->aux->used_maps_mutex);
139 mutex_init(&fp->aux->ext_mutex);
140 mutex_init(&fp->aux->dst_mutex);
141 mutex_init(&fp->aux->st_ops_assoc_mutex);
142
143 #ifdef CONFIG_BPF_SYSCALL
144 bpf_prog_stream_init(fp);
145 #endif
146
147 return fp;
148 }
149
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)150 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
151 {
152 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
153 struct bpf_prog *prog;
154 int cpu;
155
156 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
157 if (!prog)
158 return NULL;
159
160 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
161 if (!prog->stats) {
162 free_percpu(prog->active);
163 kfree(prog->aux);
164 vfree(prog);
165 return NULL;
166 }
167
168 for_each_possible_cpu(cpu) {
169 struct bpf_prog_stats *pstats;
170
171 pstats = per_cpu_ptr(prog->stats, cpu);
172 u64_stats_init(&pstats->syncp);
173 }
174 return prog;
175 }
176 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
177
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)178 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
179 {
180 if (!prog->aux->nr_linfo || !prog->jit_requested)
181 return 0;
182
183 prog->aux->jited_linfo = kvzalloc_objs(*prog->aux->jited_linfo,
184 prog->aux->nr_linfo,
185 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
186 if (!prog->aux->jited_linfo)
187 return -ENOMEM;
188
189 return 0;
190 }
191
bpf_prog_jit_attempt_done(struct bpf_prog * prog)192 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
193 {
194 if (prog->aux->jited_linfo &&
195 (!prog->jited || !prog->aux->jited_linfo[0])) {
196 kvfree(prog->aux->jited_linfo);
197 prog->aux->jited_linfo = NULL;
198 }
199
200 kfree(prog->aux->kfunc_tab);
201 prog->aux->kfunc_tab = NULL;
202 }
203
204 /* The jit engine is responsible to provide an array
205 * for insn_off to the jited_off mapping (insn_to_jit_off).
206 *
207 * The idx to this array is the insn_off. Hence, the insn_off
208 * here is relative to the prog itself instead of the main prog.
209 * This array has one entry for each xlated bpf insn.
210 *
211 * jited_off is the byte off to the end of the jited insn.
212 *
213 * Hence, with
214 * insn_start:
215 * The first bpf insn off of the prog. The insn off
216 * here is relative to the main prog.
217 * e.g. if prog is a subprog, insn_start > 0
218 * linfo_idx:
219 * The prog's idx to prog->aux->linfo and jited_linfo
220 *
221 * jited_linfo[linfo_idx] = prog->bpf_func
222 *
223 * For i > linfo_idx,
224 *
225 * jited_linfo[i] = prog->bpf_func +
226 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
227 */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)228 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
229 const u32 *insn_to_jit_off)
230 {
231 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
232 const struct bpf_line_info *linfo;
233 void **jited_linfo;
234
235 if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
236 /* Userspace did not provide linfo */
237 return;
238
239 linfo_idx = prog->aux->linfo_idx;
240 linfo = &prog->aux->linfo[linfo_idx];
241 insn_start = linfo[0].insn_off;
242 insn_end = insn_start + prog->len;
243
244 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
245 jited_linfo[0] = prog->bpf_func;
246
247 nr_linfo = prog->aux->nr_linfo - linfo_idx;
248
249 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
250 /* The verifier ensures that linfo[i].insn_off is
251 * strictly increasing
252 */
253 jited_linfo[i] = prog->bpf_func +
254 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
255 }
256
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)257 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
258 gfp_t gfp_extra_flags)
259 {
260 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
261 struct bpf_prog *fp;
262 u32 pages;
263
264 size = round_up(size, PAGE_SIZE);
265 pages = size / PAGE_SIZE;
266 if (pages <= fp_old->pages)
267 return fp_old;
268
269 fp = __vmalloc(size, gfp_flags);
270 if (fp) {
271 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
272 fp->pages = pages;
273 fp->aux->prog = fp;
274
275 /* We keep fp->aux from fp_old around in the new
276 * reallocated structure.
277 */
278 fp_old->aux = NULL;
279 fp_old->stats = NULL;
280 fp_old->active = NULL;
281 __bpf_prog_free(fp_old);
282 }
283
284 return fp;
285 }
286
__bpf_prog_free(struct bpf_prog * fp)287 void __bpf_prog_free(struct bpf_prog *fp)
288 {
289 if (fp->aux) {
290 mutex_destroy(&fp->aux->used_maps_mutex);
291 mutex_destroy(&fp->aux->dst_mutex);
292 mutex_destroy(&fp->aux->st_ops_assoc_mutex);
293 kfree(fp->aux->poke_tab);
294 kfree(fp->aux);
295 }
296 free_percpu(fp->stats);
297 free_percpu(fp->active);
298 vfree(fp);
299 }
300
bpf_prog_calc_tag(struct bpf_prog * fp)301 int bpf_prog_calc_tag(struct bpf_prog *fp)
302 {
303 size_t size = bpf_prog_insn_size(fp);
304 struct bpf_insn *dst;
305 bool was_ld_map;
306 u32 i;
307
308 dst = vmalloc(size);
309 if (!dst)
310 return -ENOMEM;
311
312 /* We need to take out the map fd for the digest calculation
313 * since they are unstable from user space side.
314 */
315 for (i = 0, was_ld_map = false; i < fp->len; i++) {
316 dst[i] = fp->insnsi[i];
317 if (!was_ld_map &&
318 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
319 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
320 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
321 was_ld_map = true;
322 dst[i].imm = 0;
323 } else if (was_ld_map &&
324 dst[i].code == 0 &&
325 dst[i].dst_reg == 0 &&
326 dst[i].src_reg == 0 &&
327 dst[i].off == 0) {
328 was_ld_map = false;
329 dst[i].imm = 0;
330 } else {
331 was_ld_map = false;
332 }
333 }
334 sha256((u8 *)dst, size, fp->digest);
335 vfree(dst);
336 return 0;
337 }
338
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)339 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
340 s32 end_new, s32 curr, const bool probe_pass)
341 {
342 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
343 s32 delta = end_new - end_old;
344 s64 imm = insn->imm;
345
346 if (curr < pos && curr + imm + 1 >= end_old)
347 imm += delta;
348 else if (curr >= end_new && curr + imm + 1 < end_new)
349 imm -= delta;
350 if (imm < imm_min || imm > imm_max)
351 return -ERANGE;
352 if (!probe_pass)
353 insn->imm = imm;
354 return 0;
355 }
356
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)357 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
358 s32 end_new, s32 curr, const bool probe_pass)
359 {
360 s64 off_min, off_max, off;
361 s32 delta = end_new - end_old;
362
363 if (insn->code == (BPF_JMP32 | BPF_JA)) {
364 off = insn->imm;
365 off_min = S32_MIN;
366 off_max = S32_MAX;
367 } else {
368 off = insn->off;
369 off_min = S16_MIN;
370 off_max = S16_MAX;
371 }
372
373 if (curr < pos && curr + off + 1 >= end_old)
374 off += delta;
375 else if (curr >= end_new && curr + off + 1 < end_new)
376 off -= delta;
377 if (off < off_min || off > off_max)
378 return -ERANGE;
379 if (!probe_pass) {
380 if (insn->code == (BPF_JMP32 | BPF_JA))
381 insn->imm = off;
382 else
383 insn->off = off;
384 }
385 return 0;
386 }
387
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)388 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
389 s32 end_new, const bool probe_pass)
390 {
391 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
392 struct bpf_insn *insn = prog->insnsi;
393 int ret = 0;
394
395 for (i = 0; i < insn_cnt; i++, insn++) {
396 u8 code;
397
398 /* In the probing pass we still operate on the original,
399 * unpatched image in order to check overflows before we
400 * do any other adjustments. Therefore skip the patchlet.
401 */
402 if (probe_pass && i == pos) {
403 i = end_new;
404 insn = prog->insnsi + end_old;
405 }
406 if (bpf_pseudo_func(insn)) {
407 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
408 end_new, i, probe_pass);
409 if (ret)
410 return ret;
411 continue;
412 }
413 code = insn->code;
414 if ((BPF_CLASS(code) != BPF_JMP &&
415 BPF_CLASS(code) != BPF_JMP32) ||
416 BPF_OP(code) == BPF_EXIT)
417 continue;
418 /* Adjust offset of jmps if we cross patch boundaries. */
419 if (BPF_OP(code) == BPF_CALL) {
420 if (insn->src_reg != BPF_PSEUDO_CALL)
421 continue;
422 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
423 end_new, i, probe_pass);
424 } else {
425 ret = bpf_adj_delta_to_off(insn, pos, end_old,
426 end_new, i, probe_pass);
427 }
428 if (ret)
429 break;
430 }
431
432 return ret;
433 }
434
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)435 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
436 {
437 struct bpf_line_info *linfo;
438 u32 i, nr_linfo;
439
440 nr_linfo = prog->aux->nr_linfo;
441 if (!nr_linfo || !delta)
442 return;
443
444 linfo = prog->aux->linfo;
445
446 for (i = 0; i < nr_linfo; i++)
447 if (off < linfo[i].insn_off)
448 break;
449
450 /* Push all off < linfo[i].insn_off by delta */
451 for (; i < nr_linfo; i++)
452 linfo[i].insn_off += delta;
453 }
454
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)455 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
456 const struct bpf_insn *patch, u32 len)
457 {
458 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
459 const u32 cnt_max = S16_MAX;
460 struct bpf_prog *prog_adj;
461 int err;
462
463 /* Since our patchlet doesn't expand the image, we're done. */
464 if (insn_delta == 0) {
465 memcpy(prog->insnsi + off, patch, sizeof(*patch));
466 return prog;
467 }
468
469 insn_adj_cnt = prog->len + insn_delta;
470
471 /* Reject anything that would potentially let the insn->off
472 * target overflow when we have excessive program expansions.
473 * We need to probe here before we do any reallocation where
474 * we afterwards may not fail anymore.
475 */
476 if (insn_adj_cnt > cnt_max &&
477 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
478 return ERR_PTR(err);
479
480 /* Several new instructions need to be inserted. Make room
481 * for them. Likely, there's no need for a new allocation as
482 * last page could have large enough tailroom.
483 */
484 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
485 GFP_USER);
486 if (!prog_adj)
487 return ERR_PTR(-ENOMEM);
488
489 prog_adj->len = insn_adj_cnt;
490
491 /* Patching happens in 3 steps:
492 *
493 * 1) Move over tail of insnsi from next instruction onwards,
494 * so we can patch the single target insn with one or more
495 * new ones (patching is always from 1 to n insns, n > 0).
496 * 2) Inject new instructions at the target location.
497 * 3) Adjust branch offsets if necessary.
498 */
499 insn_rest = insn_adj_cnt - off - len;
500
501 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
502 sizeof(*patch) * insn_rest);
503 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
504
505 /* We are guaranteed to not fail at this point, otherwise
506 * the ship has sailed to reverse to the original state. An
507 * overflow cannot happen at this point.
508 */
509 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
510
511 bpf_adj_linfo(prog_adj, off, insn_delta);
512
513 return prog_adj;
514 }
515
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)516 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
517 {
518 int err;
519
520 /* Branch offsets can't overflow when program is shrinking, no need
521 * to call bpf_adj_branches(..., true) here
522 */
523 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
524 sizeof(struct bpf_insn) * (prog->len - off - cnt));
525 prog->len -= cnt;
526
527 err = bpf_adj_branches(prog, off, off + cnt, off, false);
528 WARN_ON_ONCE(err);
529 return err;
530 }
531
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)532 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
533 {
534 int i;
535
536 for (i = 0; i < fp->aux->real_func_cnt; i++)
537 bpf_prog_kallsyms_del(fp->aux->func[i]);
538 }
539
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)540 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
541 {
542 bpf_prog_kallsyms_del_subprogs(fp);
543 bpf_prog_kallsyms_del(fp);
544 }
545
546 #ifdef CONFIG_BPF_JIT
547 /* All BPF JIT sysctl knobs here. */
548 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
549 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
550 int bpf_jit_harden __read_mostly;
551 long bpf_jit_limit __read_mostly;
552 long bpf_jit_limit_max __read_mostly;
553
554 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)555 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
556 {
557 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
558
559 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
560 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
561 }
562
563 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)564 bpf_prog_ksym_set_name(struct bpf_prog *prog)
565 {
566 char *sym = prog->aux->ksym.name;
567 const char *end = sym + KSYM_NAME_LEN;
568 const struct btf_type *type;
569 const char *func_name;
570
571 BUILD_BUG_ON(sizeof("bpf_prog_") +
572 sizeof(prog->tag) * 2 +
573 /* name has been null terminated.
574 * We should need +1 for the '_' preceding
575 * the name. However, the null character
576 * is double counted between the name and the
577 * sizeof("bpf_prog_") above, so we omit
578 * the +1 here.
579 */
580 sizeof(prog->aux->name) > KSYM_NAME_LEN);
581
582 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
583 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
584
585 /* prog->aux->name will be ignored if full btf name is available */
586 if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
587 type = btf_type_by_id(prog->aux->btf,
588 prog->aux->func_info[prog->aux->func_idx].type_id);
589 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
590 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
591 return;
592 }
593
594 if (prog->aux->name[0])
595 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
596 else
597 *sym = 0;
598 }
599
bpf_get_ksym_start(struct latch_tree_node * n)600 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
601 {
602 return container_of(n, struct bpf_ksym, tnode)->start;
603 }
604
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)605 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
606 struct latch_tree_node *b)
607 {
608 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
609 }
610
bpf_tree_comp(void * key,struct latch_tree_node * n)611 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
612 {
613 unsigned long val = (unsigned long)key;
614 const struct bpf_ksym *ksym;
615
616 ksym = container_of(n, struct bpf_ksym, tnode);
617
618 if (val < ksym->start)
619 return -1;
620 /* Ensure that we detect return addresses as part of the program, when
621 * the final instruction is a call for a program part of the stack
622 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
623 */
624 if (val > ksym->end)
625 return 1;
626
627 return 0;
628 }
629
630 static const struct latch_tree_ops bpf_tree_ops = {
631 .less = bpf_tree_less,
632 .comp = bpf_tree_comp,
633 };
634
635 static DEFINE_SPINLOCK(bpf_lock);
636 static LIST_HEAD(bpf_kallsyms);
637 static struct latch_tree_root bpf_tree __cacheline_aligned;
638
bpf_ksym_add(struct bpf_ksym * ksym)639 void bpf_ksym_add(struct bpf_ksym *ksym)
640 {
641 spin_lock_bh(&bpf_lock);
642 WARN_ON_ONCE(!list_empty(&ksym->lnode));
643 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
644 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
645 spin_unlock_bh(&bpf_lock);
646 }
647
__bpf_ksym_del(struct bpf_ksym * ksym)648 static void __bpf_ksym_del(struct bpf_ksym *ksym)
649 {
650 if (list_empty(&ksym->lnode))
651 return;
652
653 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
654 list_del_rcu(&ksym->lnode);
655 }
656
bpf_ksym_del(struct bpf_ksym * ksym)657 void bpf_ksym_del(struct bpf_ksym *ksym)
658 {
659 spin_lock_bh(&bpf_lock);
660 __bpf_ksym_del(ksym);
661 spin_unlock_bh(&bpf_lock);
662 }
663
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)664 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
665 {
666 return fp->jited && !bpf_prog_was_classic(fp);
667 }
668
bpf_prog_kallsyms_add(struct bpf_prog * fp)669 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
670 {
671 if (!bpf_prog_kallsyms_candidate(fp) ||
672 !bpf_token_capable(fp->aux->token, CAP_BPF))
673 return;
674
675 bpf_prog_ksym_set_addr(fp);
676 bpf_prog_ksym_set_name(fp);
677 fp->aux->ksym.prog = true;
678
679 bpf_ksym_add(&fp->aux->ksym);
680
681 #ifdef CONFIG_FINEIBT
682 /*
683 * When FineIBT, code in the __cfi_foo() symbols can get executed
684 * and hence unwinder needs help.
685 */
686 if (cfi_mode != CFI_FINEIBT)
687 return;
688
689 snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
690 "__cfi_%s", fp->aux->ksym.name);
691
692 fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
693 fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func;
694
695 bpf_ksym_add(&fp->aux->ksym_prefix);
696 #endif
697 }
698
bpf_prog_kallsyms_del(struct bpf_prog * fp)699 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
700 {
701 if (!bpf_prog_kallsyms_candidate(fp))
702 return;
703
704 bpf_ksym_del(&fp->aux->ksym);
705 #ifdef CONFIG_FINEIBT
706 if (cfi_mode != CFI_FINEIBT)
707 return;
708 bpf_ksym_del(&fp->aux->ksym_prefix);
709 #endif
710 }
711
bpf_ksym_find(unsigned long addr)712 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
713 {
714 struct latch_tree_node *n;
715
716 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
717 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
718 }
719
bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)720 int bpf_address_lookup(unsigned long addr, unsigned long *size,
721 unsigned long *off, char *sym)
722 {
723 struct bpf_ksym *ksym;
724 int ret = 0;
725
726 rcu_read_lock();
727 ksym = bpf_ksym_find(addr);
728 if (ksym) {
729 unsigned long symbol_start = ksym->start;
730 unsigned long symbol_end = ksym->end;
731
732 ret = strscpy(sym, ksym->name, KSYM_NAME_LEN);
733
734 if (size)
735 *size = symbol_end - symbol_start;
736 if (off)
737 *off = addr - symbol_start;
738 }
739 rcu_read_unlock();
740
741 return ret;
742 }
743
is_bpf_text_address(unsigned long addr)744 bool is_bpf_text_address(unsigned long addr)
745 {
746 bool ret;
747
748 rcu_read_lock();
749 ret = bpf_ksym_find(addr) != NULL;
750 rcu_read_unlock();
751
752 return ret;
753 }
754
bpf_prog_ksym_find(unsigned long addr)755 struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
756 {
757 struct bpf_ksym *ksym;
758
759 WARN_ON_ONCE(!rcu_read_lock_held());
760 ksym = bpf_ksym_find(addr);
761
762 return ksym && ksym->prog ?
763 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
764 NULL;
765 }
766
bpf_has_frame_pointer(unsigned long ip)767 bool bpf_has_frame_pointer(unsigned long ip)
768 {
769 struct bpf_ksym *ksym;
770 unsigned long offset;
771
772 guard(rcu)();
773
774 ksym = bpf_ksym_find(ip);
775 if (!ksym || !ksym->fp_start || !ksym->fp_end)
776 return false;
777
778 offset = ip - ksym->start;
779
780 return offset >= ksym->fp_start && offset < ksym->fp_end;
781 }
782
search_bpf_extables(unsigned long addr)783 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
784 {
785 const struct exception_table_entry *e = NULL;
786 struct bpf_prog *prog;
787
788 rcu_read_lock();
789 prog = bpf_prog_ksym_find(addr);
790 if (!prog)
791 goto out;
792 if (!prog->aux->num_exentries)
793 goto out;
794
795 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
796 out:
797 rcu_read_unlock();
798 return e;
799 }
800
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)801 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
802 char *sym)
803 {
804 struct bpf_ksym *ksym;
805 unsigned int it = 0;
806 int ret = -ERANGE;
807
808 if (!bpf_jit_kallsyms_enabled())
809 return ret;
810
811 rcu_read_lock();
812 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
813 if (it++ != symnum)
814 continue;
815
816 strscpy(sym, ksym->name, KSYM_NAME_LEN);
817
818 *value = ksym->start;
819 *type = BPF_SYM_ELF_TYPE;
820
821 ret = 0;
822 break;
823 }
824 rcu_read_unlock();
825
826 return ret;
827 }
828
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)829 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
830 struct bpf_jit_poke_descriptor *poke)
831 {
832 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
833 static const u32 poke_tab_max = 1024;
834 u32 slot = prog->aux->size_poke_tab;
835 u32 size = slot + 1;
836
837 if (size > poke_tab_max)
838 return -ENOSPC;
839 if (poke->tailcall_target || poke->tailcall_target_stable ||
840 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
841 return -EINVAL;
842
843 switch (poke->reason) {
844 case BPF_POKE_REASON_TAIL_CALL:
845 if (!poke->tail_call.map)
846 return -EINVAL;
847 break;
848 default:
849 return -EINVAL;
850 }
851
852 tab = krealloc_array(tab, size, sizeof(*poke), GFP_KERNEL);
853 if (!tab)
854 return -ENOMEM;
855
856 memcpy(&tab[slot], poke, sizeof(*poke));
857 prog->aux->size_poke_tab = size;
858 prog->aux->poke_tab = tab;
859
860 return slot;
861 }
862
863 /*
864 * BPF program pack allocator.
865 *
866 * Most BPF programs are pretty small. Allocating a hole page for each
867 * program is sometime a waste. Many small bpf program also adds pressure
868 * to instruction TLB. To solve this issue, we introduce a BPF program pack
869 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
870 * to host BPF programs.
871 */
872 #define BPF_PROG_CHUNK_SHIFT 6
873 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
874 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
875
876 struct bpf_prog_pack {
877 struct list_head list;
878 void *ptr;
879 unsigned long bitmap[];
880 };
881
bpf_jit_fill_hole_with_zero(void * area,unsigned int size)882 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
883 {
884 memset(area, 0, size);
885 }
886
887 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
888
889 static DEFINE_MUTEX(pack_mutex);
890 static LIST_HEAD(pack_list);
891
892 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
893 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
894 */
895 #ifdef PMD_SIZE
896 /* PMD_SIZE is really big for some archs. It doesn't make sense to
897 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
898 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
899 * greater than or equal to 2MB.
900 */
901 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
902 #else
903 #define BPF_PROG_PACK_SIZE PAGE_SIZE
904 #endif
905
906 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
907
alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)908 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
909 {
910 struct bpf_prog_pack *pack;
911 int err;
912
913 pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT));
914 if (!pack)
915 return NULL;
916 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
917 if (!pack->ptr)
918 goto out;
919 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
920 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
921
922 set_vm_flush_reset_perms(pack->ptr);
923 err = set_memory_rox((unsigned long)pack->ptr,
924 BPF_PROG_PACK_SIZE / PAGE_SIZE);
925 if (err)
926 goto out;
927 list_add_tail(&pack->list, &pack_list);
928 return pack;
929
930 out:
931 bpf_jit_free_exec(pack->ptr);
932 kfree(pack);
933 return NULL;
934 }
935
bpf_prog_pack_alloc(u32 size,bpf_jit_fill_hole_t bpf_fill_ill_insns)936 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
937 {
938 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
939 struct bpf_prog_pack *pack;
940 unsigned long pos;
941 void *ptr = NULL;
942
943 mutex_lock(&pack_mutex);
944 if (size > BPF_PROG_PACK_SIZE) {
945 size = round_up(size, PAGE_SIZE);
946 ptr = bpf_jit_alloc_exec(size);
947 if (ptr) {
948 int err;
949
950 bpf_fill_ill_insns(ptr, size);
951 set_vm_flush_reset_perms(ptr);
952 err = set_memory_rox((unsigned long)ptr,
953 size / PAGE_SIZE);
954 if (err) {
955 bpf_jit_free_exec(ptr);
956 ptr = NULL;
957 }
958 }
959 goto out;
960 }
961 list_for_each_entry(pack, &pack_list, list) {
962 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
963 nbits, 0);
964 if (pos < BPF_PROG_CHUNK_COUNT)
965 goto found_free_area;
966 }
967
968 pack = alloc_new_pack(bpf_fill_ill_insns);
969 if (!pack)
970 goto out;
971
972 pos = 0;
973
974 found_free_area:
975 bitmap_set(pack->bitmap, pos, nbits);
976 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
977
978 out:
979 mutex_unlock(&pack_mutex);
980 return ptr;
981 }
982
bpf_prog_pack_free(void * ptr,u32 size)983 void bpf_prog_pack_free(void *ptr, u32 size)
984 {
985 struct bpf_prog_pack *pack = NULL, *tmp;
986 unsigned int nbits;
987 unsigned long pos;
988
989 mutex_lock(&pack_mutex);
990 if (size > BPF_PROG_PACK_SIZE) {
991 bpf_jit_free_exec(ptr);
992 goto out;
993 }
994
995 list_for_each_entry(tmp, &pack_list, list) {
996 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
997 pack = tmp;
998 break;
999 }
1000 }
1001
1002 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
1003 goto out;
1004
1005 nbits = BPF_PROG_SIZE_TO_NBITS(size);
1006 pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
1007
1008 WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
1009 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
1010
1011 bitmap_clear(pack->bitmap, pos, nbits);
1012 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
1013 BPF_PROG_CHUNK_COUNT, 0) == 0) {
1014 list_del(&pack->list);
1015 bpf_jit_free_exec(pack->ptr);
1016 kfree(pack);
1017 }
1018 out:
1019 mutex_unlock(&pack_mutex);
1020 }
1021
1022 static atomic_long_t bpf_jit_current;
1023
1024 /* Can be overridden by an arch's JIT compiler if it has a custom,
1025 * dedicated BPF backend memory area, or if neither of the two
1026 * below apply.
1027 */
bpf_jit_alloc_exec_limit(void)1028 u64 __weak bpf_jit_alloc_exec_limit(void)
1029 {
1030 #if defined(MODULES_VADDR)
1031 return MODULES_END - MODULES_VADDR;
1032 #else
1033 return VMALLOC_END - VMALLOC_START;
1034 #endif
1035 }
1036
bpf_jit_charge_init(void)1037 static int __init bpf_jit_charge_init(void)
1038 {
1039 /* Only used as heuristic here to derive limit. */
1040 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1041 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1042 PAGE_SIZE), LONG_MAX);
1043 return 0;
1044 }
1045 pure_initcall(bpf_jit_charge_init);
1046
bpf_jit_charge_modmem(u32 size)1047 int bpf_jit_charge_modmem(u32 size)
1048 {
1049 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1050 if (!bpf_capable()) {
1051 atomic_long_sub(size, &bpf_jit_current);
1052 return -EPERM;
1053 }
1054 }
1055
1056 return 0;
1057 }
1058
bpf_jit_uncharge_modmem(u32 size)1059 void bpf_jit_uncharge_modmem(u32 size)
1060 {
1061 atomic_long_sub(size, &bpf_jit_current);
1062 }
1063
bpf_jit_alloc_exec(unsigned long size)1064 void *__weak bpf_jit_alloc_exec(unsigned long size)
1065 {
1066 return execmem_alloc(EXECMEM_BPF, size);
1067 }
1068
bpf_jit_free_exec(void * addr)1069 void __weak bpf_jit_free_exec(void *addr)
1070 {
1071 execmem_free(addr);
1072 }
1073
1074 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)1075 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1076 unsigned int alignment,
1077 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1078 {
1079 struct bpf_binary_header *hdr;
1080 u32 size, hole, start;
1081
1082 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1083 alignment > BPF_IMAGE_ALIGNMENT);
1084
1085 /* Most of BPF filters are really small, but if some of them
1086 * fill a page, allow at least 128 extra bytes to insert a
1087 * random section of illegal instructions.
1088 */
1089 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1090
1091 if (bpf_jit_charge_modmem(size))
1092 return NULL;
1093 hdr = bpf_jit_alloc_exec(size);
1094 if (!hdr) {
1095 bpf_jit_uncharge_modmem(size);
1096 return NULL;
1097 }
1098
1099 /* Fill space with illegal/arch-dep instructions. */
1100 bpf_fill_ill_insns(hdr, size);
1101
1102 hdr->size = size;
1103 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1104 PAGE_SIZE - sizeof(*hdr));
1105 start = get_random_u32_below(hole) & ~(alignment - 1);
1106
1107 /* Leave a random number of instructions before BPF code. */
1108 *image_ptr = &hdr->image[start];
1109
1110 return hdr;
1111 }
1112
bpf_jit_binary_free(struct bpf_binary_header * hdr)1113 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1114 {
1115 u32 size = hdr->size;
1116
1117 bpf_jit_free_exec(hdr);
1118 bpf_jit_uncharge_modmem(size);
1119 }
1120
1121 /* Allocate jit binary from bpf_prog_pack allocator.
1122 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1123 * to the memory. To solve this problem, a RW buffer is also allocated at
1124 * as the same time. The JIT engine should calculate offsets based on the
1125 * RO memory address, but write JITed program to the RW buffer. Once the
1126 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1127 * the JITed program to the RO memory.
1128 */
1129 struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,struct bpf_binary_header ** rw_header,u8 ** rw_image,bpf_jit_fill_hole_t bpf_fill_ill_insns)1130 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1131 unsigned int alignment,
1132 struct bpf_binary_header **rw_header,
1133 u8 **rw_image,
1134 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1135 {
1136 struct bpf_binary_header *ro_header;
1137 u32 size, hole, start;
1138
1139 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1140 alignment > BPF_IMAGE_ALIGNMENT);
1141
1142 /* add 16 bytes for a random section of illegal instructions */
1143 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1144
1145 if (bpf_jit_charge_modmem(size))
1146 return NULL;
1147 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1148 if (!ro_header) {
1149 bpf_jit_uncharge_modmem(size);
1150 return NULL;
1151 }
1152
1153 *rw_header = kvmalloc(size, GFP_KERNEL);
1154 if (!*rw_header) {
1155 bpf_prog_pack_free(ro_header, size);
1156 bpf_jit_uncharge_modmem(size);
1157 return NULL;
1158 }
1159
1160 /* Fill space with illegal/arch-dep instructions. */
1161 bpf_fill_ill_insns(*rw_header, size);
1162 (*rw_header)->size = size;
1163
1164 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1165 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1166 start = get_random_u32_below(hole) & ~(alignment - 1);
1167
1168 *image_ptr = &ro_header->image[start];
1169 *rw_image = &(*rw_header)->image[start];
1170
1171 return ro_header;
1172 }
1173
1174 /* Copy JITed text from rw_header to its final location, the ro_header. */
bpf_jit_binary_pack_finalize(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1175 int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
1176 struct bpf_binary_header *rw_header)
1177 {
1178 void *ptr;
1179
1180 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1181
1182 kvfree(rw_header);
1183
1184 if (IS_ERR(ptr)) {
1185 bpf_prog_pack_free(ro_header, ro_header->size);
1186 return PTR_ERR(ptr);
1187 }
1188 return 0;
1189 }
1190
1191 /* bpf_jit_binary_pack_free is called in two different scenarios:
1192 * 1) when the program is freed after;
1193 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1194 * For case 2), we need to free both the RO memory and the RW buffer.
1195 *
1196 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1197 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1198 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1199 * bpf_arch_text_copy (when jit fails).
1200 */
bpf_jit_binary_pack_free(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1201 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1202 struct bpf_binary_header *rw_header)
1203 {
1204 u32 size = ro_header->size;
1205
1206 bpf_prog_pack_free(ro_header, size);
1207 kvfree(rw_header);
1208 bpf_jit_uncharge_modmem(size);
1209 }
1210
1211 struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog * fp)1212 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1213 {
1214 unsigned long real_start = (unsigned long)fp->bpf_func;
1215 unsigned long addr;
1216
1217 addr = real_start & BPF_PROG_CHUNK_MASK;
1218 return (void *)addr;
1219 }
1220
1221 static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)1222 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1223 {
1224 unsigned long real_start = (unsigned long)fp->bpf_func;
1225 unsigned long addr;
1226
1227 addr = real_start & PAGE_MASK;
1228 return (void *)addr;
1229 }
1230
1231 /* This symbol is only overridden by archs that have different
1232 * requirements than the usual eBPF JITs, f.e. when they only
1233 * implement cBPF JIT, do not set images read-only, etc.
1234 */
bpf_jit_free(struct bpf_prog * fp)1235 void __weak bpf_jit_free(struct bpf_prog *fp)
1236 {
1237 if (fp->jited) {
1238 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1239
1240 bpf_jit_binary_free(hdr);
1241 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1242 }
1243
1244 bpf_prog_unlock_free(fp);
1245 }
1246
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)1247 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1248 const struct bpf_insn *insn, bool extra_pass,
1249 u64 *func_addr, bool *func_addr_fixed)
1250 {
1251 s16 off = insn->off;
1252 s32 imm = insn->imm;
1253 u8 *addr;
1254 int err;
1255
1256 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1257 if (!*func_addr_fixed) {
1258 /* Place-holder address till the last pass has collected
1259 * all addresses for JITed subprograms in which case we
1260 * can pick them up from prog->aux.
1261 */
1262 if (!extra_pass)
1263 addr = NULL;
1264 else if (prog->aux->func &&
1265 off >= 0 && off < prog->aux->real_func_cnt)
1266 addr = (u8 *)prog->aux->func[off]->bpf_func;
1267 else
1268 return -EINVAL;
1269 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1270 bpf_jit_supports_far_kfunc_call()) {
1271 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1272 if (err)
1273 return err;
1274 } else {
1275 /* Address of a BPF helper call. Since part of the core
1276 * kernel, it's always at a fixed location. __bpf_call_base
1277 * and the helper with imm relative to it are both in core
1278 * kernel.
1279 */
1280 addr = (u8 *)__bpf_call_base + imm;
1281 }
1282
1283 *func_addr = (unsigned long)addr;
1284 return 0;
1285 }
1286
bpf_jit_get_prog_name(struct bpf_prog * prog)1287 const char *bpf_jit_get_prog_name(struct bpf_prog *prog)
1288 {
1289 if (prog->aux->ksym.prog)
1290 return prog->aux->ksym.name;
1291 return prog->aux->name;
1292 }
1293
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)1294 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1295 const struct bpf_insn *aux,
1296 struct bpf_insn *to_buff,
1297 bool emit_zext)
1298 {
1299 struct bpf_insn *to = to_buff;
1300 u32 imm_rnd = get_random_u32();
1301 s16 off;
1302
1303 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1304 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1305
1306 /* Constraints on AX register:
1307 *
1308 * AX register is inaccessible from user space. It is mapped in
1309 * all JITs, and used here for constant blinding rewrites. It is
1310 * typically "stateless" meaning its contents are only valid within
1311 * the executed instruction, but not across several instructions.
1312 * There are a few exceptions however which are further detailed
1313 * below.
1314 *
1315 * Constant blinding is only used by JITs, not in the interpreter.
1316 * The interpreter uses AX in some occasions as a local temporary
1317 * register e.g. in DIV or MOD instructions.
1318 *
1319 * In restricted circumstances, the verifier can also use the AX
1320 * register for rewrites as long as they do not interfere with
1321 * the above cases!
1322 */
1323 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1324 goto out;
1325
1326 if (from->imm == 0 &&
1327 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1328 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1329 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1330 goto out;
1331 }
1332
1333 switch (from->code) {
1334 case BPF_ALU | BPF_ADD | BPF_K:
1335 case BPF_ALU | BPF_SUB | BPF_K:
1336 case BPF_ALU | BPF_AND | BPF_K:
1337 case BPF_ALU | BPF_OR | BPF_K:
1338 case BPF_ALU | BPF_XOR | BPF_K:
1339 case BPF_ALU | BPF_MUL | BPF_K:
1340 case BPF_ALU | BPF_MOV | BPF_K:
1341 case BPF_ALU | BPF_DIV | BPF_K:
1342 case BPF_ALU | BPF_MOD | BPF_K:
1343 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1344 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1345 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1346 break;
1347
1348 case BPF_ALU64 | BPF_ADD | BPF_K:
1349 case BPF_ALU64 | BPF_SUB | BPF_K:
1350 case BPF_ALU64 | BPF_AND | BPF_K:
1351 case BPF_ALU64 | BPF_OR | BPF_K:
1352 case BPF_ALU64 | BPF_XOR | BPF_K:
1353 case BPF_ALU64 | BPF_MUL | BPF_K:
1354 case BPF_ALU64 | BPF_MOV | BPF_K:
1355 case BPF_ALU64 | BPF_DIV | BPF_K:
1356 case BPF_ALU64 | BPF_MOD | BPF_K:
1357 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1358 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1359 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1360 break;
1361
1362 case BPF_JMP | BPF_JEQ | BPF_K:
1363 case BPF_JMP | BPF_JNE | BPF_K:
1364 case BPF_JMP | BPF_JGT | BPF_K:
1365 case BPF_JMP | BPF_JLT | BPF_K:
1366 case BPF_JMP | BPF_JGE | BPF_K:
1367 case BPF_JMP | BPF_JLE | BPF_K:
1368 case BPF_JMP | BPF_JSGT | BPF_K:
1369 case BPF_JMP | BPF_JSLT | BPF_K:
1370 case BPF_JMP | BPF_JSGE | BPF_K:
1371 case BPF_JMP | BPF_JSLE | BPF_K:
1372 case BPF_JMP | BPF_JSET | BPF_K:
1373 /* Accommodate for extra offset in case of a backjump. */
1374 off = from->off;
1375 if (off < 0)
1376 off -= 2;
1377 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1378 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1379 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1380 break;
1381
1382 case BPF_JMP32 | BPF_JEQ | BPF_K:
1383 case BPF_JMP32 | BPF_JNE | BPF_K:
1384 case BPF_JMP32 | BPF_JGT | BPF_K:
1385 case BPF_JMP32 | BPF_JLT | BPF_K:
1386 case BPF_JMP32 | BPF_JGE | BPF_K:
1387 case BPF_JMP32 | BPF_JLE | BPF_K:
1388 case BPF_JMP32 | BPF_JSGT | BPF_K:
1389 case BPF_JMP32 | BPF_JSLT | BPF_K:
1390 case BPF_JMP32 | BPF_JSGE | BPF_K:
1391 case BPF_JMP32 | BPF_JSLE | BPF_K:
1392 case BPF_JMP32 | BPF_JSET | BPF_K:
1393 /* Accommodate for extra offset in case of a backjump. */
1394 off = from->off;
1395 if (off < 0)
1396 off -= 2;
1397 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1398 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1399 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1400 off);
1401 break;
1402
1403 case BPF_LD | BPF_IMM | BPF_DW:
1404 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1405 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1406 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1407 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1408 break;
1409 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1410 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1411 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1412 if (emit_zext)
1413 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1414 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1415 break;
1416
1417 case BPF_ST | BPF_MEM | BPF_DW:
1418 case BPF_ST | BPF_MEM | BPF_W:
1419 case BPF_ST | BPF_MEM | BPF_H:
1420 case BPF_ST | BPF_MEM | BPF_B:
1421 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1422 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1423 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1424 break;
1425
1426 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1427 case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1428 case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1429 case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1430 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^
1431 from->imm);
1432 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1433 /*
1434 * Cannot use BPF_STX_MEM() macro here as it
1435 * hardcodes BPF_MEM mode, losing PROBE_MEM32
1436 * and breaking arena addressing in the JIT.
1437 */
1438 *to++ = (struct bpf_insn) {
1439 .code = BPF_STX | BPF_PROBE_MEM32 |
1440 BPF_SIZE(from->code),
1441 .dst_reg = from->dst_reg,
1442 .src_reg = BPF_REG_AX,
1443 .off = from->off,
1444 };
1445 break;
1446 }
1447 out:
1448 return to - to_buff;
1449 }
1450
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1451 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1452 gfp_t gfp_extra_flags)
1453 {
1454 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1455 struct bpf_prog *fp;
1456
1457 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1458 if (fp != NULL) {
1459 /* aux->prog still points to the fp_other one, so
1460 * when promoting the clone to the real program,
1461 * this still needs to be adapted.
1462 */
1463 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1464 }
1465
1466 return fp;
1467 }
1468
bpf_prog_clone_free(struct bpf_prog * fp)1469 static void bpf_prog_clone_free(struct bpf_prog *fp)
1470 {
1471 /* aux was stolen by the other clone, so we cannot free
1472 * it from this path! It will be freed eventually by the
1473 * other program on release.
1474 *
1475 * At this point, we don't need a deferred release since
1476 * clone is guaranteed to not be locked.
1477 */
1478 fp->aux = NULL;
1479 fp->stats = NULL;
1480 fp->active = NULL;
1481 __bpf_prog_free(fp);
1482 }
1483
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1484 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1485 {
1486 /* We have to repoint aux->prog to self, as we don't
1487 * know whether fp here is the clone or the original.
1488 */
1489 fp->aux->prog = fp;
1490 bpf_prog_clone_free(fp_other);
1491 }
1492
adjust_insn_arrays(struct bpf_prog * prog,u32 off,u32 len)1493 static void adjust_insn_arrays(struct bpf_prog *prog, u32 off, u32 len)
1494 {
1495 #ifdef CONFIG_BPF_SYSCALL
1496 struct bpf_map *map;
1497 int i;
1498
1499 if (len <= 1)
1500 return;
1501
1502 for (i = 0; i < prog->aux->used_map_cnt; i++) {
1503 map = prog->aux->used_maps[i];
1504 if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY)
1505 bpf_insn_array_adjust(map, off, len);
1506 }
1507 #endif
1508 }
1509
bpf_jit_blind_constants(struct bpf_prog * prog)1510 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1511 {
1512 struct bpf_insn insn_buff[16], aux[2];
1513 struct bpf_prog *clone, *tmp;
1514 int insn_delta, insn_cnt;
1515 struct bpf_insn *insn;
1516 int i, rewritten;
1517
1518 if (!prog->blinding_requested || prog->blinded)
1519 return prog;
1520
1521 clone = bpf_prog_clone_create(prog, GFP_USER);
1522 if (!clone)
1523 return ERR_PTR(-ENOMEM);
1524
1525 insn_cnt = clone->len;
1526 insn = clone->insnsi;
1527
1528 for (i = 0; i < insn_cnt; i++, insn++) {
1529 if (bpf_pseudo_func(insn)) {
1530 /* ld_imm64 with an address of bpf subprog is not
1531 * a user controlled constant. Don't randomize it,
1532 * since it will conflict with jit_subprogs() logic.
1533 */
1534 insn++;
1535 i++;
1536 continue;
1537 }
1538
1539 /* We temporarily need to hold the original ld64 insn
1540 * so that we can still access the first part in the
1541 * second blinding run.
1542 */
1543 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1544 insn[1].code == 0)
1545 memcpy(aux, insn, sizeof(aux));
1546
1547 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1548 clone->aux->verifier_zext);
1549 if (!rewritten)
1550 continue;
1551
1552 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1553 if (IS_ERR(tmp)) {
1554 /* Patching may have repointed aux->prog during
1555 * realloc from the original one, so we need to
1556 * fix it up here on error.
1557 */
1558 bpf_jit_prog_release_other(prog, clone);
1559 return tmp;
1560 }
1561
1562 clone = tmp;
1563 insn_delta = rewritten - 1;
1564
1565 /* Instructions arrays must be updated using absolute xlated offsets */
1566 adjust_insn_arrays(clone, prog->aux->subprog_start + i, rewritten);
1567
1568 /* Walk new program and skip insns we just inserted. */
1569 insn = clone->insnsi + i + insn_delta;
1570 insn_cnt += insn_delta;
1571 i += insn_delta;
1572 }
1573
1574 clone->blinded = 1;
1575 return clone;
1576 }
1577 #endif /* CONFIG_BPF_JIT */
1578
1579 /* Base function for offset calculation. Needs to go into .text section,
1580 * therefore keeping it non-static as well; will also be used by JITs
1581 * anyway later on, so do not let the compiler omit it. This also needs
1582 * to go into kallsyms for correlation from e.g. bpftool, so naming
1583 * must not change.
1584 */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1585 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1586 {
1587 return 0;
1588 }
1589 EXPORT_SYMBOL_GPL(__bpf_call_base);
1590
1591 /* All UAPI available opcodes. */
1592 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1593 /* 32 bit ALU operations. */ \
1594 /* Register based. */ \
1595 INSN_3(ALU, ADD, X), \
1596 INSN_3(ALU, SUB, X), \
1597 INSN_3(ALU, AND, X), \
1598 INSN_3(ALU, OR, X), \
1599 INSN_3(ALU, LSH, X), \
1600 INSN_3(ALU, RSH, X), \
1601 INSN_3(ALU, XOR, X), \
1602 INSN_3(ALU, MUL, X), \
1603 INSN_3(ALU, MOV, X), \
1604 INSN_3(ALU, ARSH, X), \
1605 INSN_3(ALU, DIV, X), \
1606 INSN_3(ALU, MOD, X), \
1607 INSN_2(ALU, NEG), \
1608 INSN_3(ALU, END, TO_BE), \
1609 INSN_3(ALU, END, TO_LE), \
1610 /* Immediate based. */ \
1611 INSN_3(ALU, ADD, K), \
1612 INSN_3(ALU, SUB, K), \
1613 INSN_3(ALU, AND, K), \
1614 INSN_3(ALU, OR, K), \
1615 INSN_3(ALU, LSH, K), \
1616 INSN_3(ALU, RSH, K), \
1617 INSN_3(ALU, XOR, K), \
1618 INSN_3(ALU, MUL, K), \
1619 INSN_3(ALU, MOV, K), \
1620 INSN_3(ALU, ARSH, K), \
1621 INSN_3(ALU, DIV, K), \
1622 INSN_3(ALU, MOD, K), \
1623 /* 64 bit ALU operations. */ \
1624 /* Register based. */ \
1625 INSN_3(ALU64, ADD, X), \
1626 INSN_3(ALU64, SUB, X), \
1627 INSN_3(ALU64, AND, X), \
1628 INSN_3(ALU64, OR, X), \
1629 INSN_3(ALU64, LSH, X), \
1630 INSN_3(ALU64, RSH, X), \
1631 INSN_3(ALU64, XOR, X), \
1632 INSN_3(ALU64, MUL, X), \
1633 INSN_3(ALU64, MOV, X), \
1634 INSN_3(ALU64, ARSH, X), \
1635 INSN_3(ALU64, DIV, X), \
1636 INSN_3(ALU64, MOD, X), \
1637 INSN_2(ALU64, NEG), \
1638 INSN_3(ALU64, END, TO_LE), \
1639 /* Immediate based. */ \
1640 INSN_3(ALU64, ADD, K), \
1641 INSN_3(ALU64, SUB, K), \
1642 INSN_3(ALU64, AND, K), \
1643 INSN_3(ALU64, OR, K), \
1644 INSN_3(ALU64, LSH, K), \
1645 INSN_3(ALU64, RSH, K), \
1646 INSN_3(ALU64, XOR, K), \
1647 INSN_3(ALU64, MUL, K), \
1648 INSN_3(ALU64, MOV, K), \
1649 INSN_3(ALU64, ARSH, K), \
1650 INSN_3(ALU64, DIV, K), \
1651 INSN_3(ALU64, MOD, K), \
1652 /* Call instruction. */ \
1653 INSN_2(JMP, CALL), \
1654 /* Exit instruction. */ \
1655 INSN_2(JMP, EXIT), \
1656 /* 32-bit Jump instructions. */ \
1657 /* Register based. */ \
1658 INSN_3(JMP32, JEQ, X), \
1659 INSN_3(JMP32, JNE, X), \
1660 INSN_3(JMP32, JGT, X), \
1661 INSN_3(JMP32, JLT, X), \
1662 INSN_3(JMP32, JGE, X), \
1663 INSN_3(JMP32, JLE, X), \
1664 INSN_3(JMP32, JSGT, X), \
1665 INSN_3(JMP32, JSLT, X), \
1666 INSN_3(JMP32, JSGE, X), \
1667 INSN_3(JMP32, JSLE, X), \
1668 INSN_3(JMP32, JSET, X), \
1669 /* Immediate based. */ \
1670 INSN_3(JMP32, JEQ, K), \
1671 INSN_3(JMP32, JNE, K), \
1672 INSN_3(JMP32, JGT, K), \
1673 INSN_3(JMP32, JLT, K), \
1674 INSN_3(JMP32, JGE, K), \
1675 INSN_3(JMP32, JLE, K), \
1676 INSN_3(JMP32, JSGT, K), \
1677 INSN_3(JMP32, JSLT, K), \
1678 INSN_3(JMP32, JSGE, K), \
1679 INSN_3(JMP32, JSLE, K), \
1680 INSN_3(JMP32, JSET, K), \
1681 /* Jump instructions. */ \
1682 /* Register based. */ \
1683 INSN_3(JMP, JEQ, X), \
1684 INSN_3(JMP, JNE, X), \
1685 INSN_3(JMP, JGT, X), \
1686 INSN_3(JMP, JLT, X), \
1687 INSN_3(JMP, JGE, X), \
1688 INSN_3(JMP, JLE, X), \
1689 INSN_3(JMP, JSGT, X), \
1690 INSN_3(JMP, JSLT, X), \
1691 INSN_3(JMP, JSGE, X), \
1692 INSN_3(JMP, JSLE, X), \
1693 INSN_3(JMP, JSET, X), \
1694 /* Immediate based. */ \
1695 INSN_3(JMP, JEQ, K), \
1696 INSN_3(JMP, JNE, K), \
1697 INSN_3(JMP, JGT, K), \
1698 INSN_3(JMP, JLT, K), \
1699 INSN_3(JMP, JGE, K), \
1700 INSN_3(JMP, JLE, K), \
1701 INSN_3(JMP, JSGT, K), \
1702 INSN_3(JMP, JSLT, K), \
1703 INSN_3(JMP, JSGE, K), \
1704 INSN_3(JMP, JSLE, K), \
1705 INSN_3(JMP, JSET, K), \
1706 INSN_2(JMP, JA), \
1707 INSN_2(JMP32, JA), \
1708 /* Atomic operations. */ \
1709 INSN_3(STX, ATOMIC, B), \
1710 INSN_3(STX, ATOMIC, H), \
1711 INSN_3(STX, ATOMIC, W), \
1712 INSN_3(STX, ATOMIC, DW), \
1713 /* Store instructions. */ \
1714 /* Register based. */ \
1715 INSN_3(STX, MEM, B), \
1716 INSN_3(STX, MEM, H), \
1717 INSN_3(STX, MEM, W), \
1718 INSN_3(STX, MEM, DW), \
1719 /* Immediate based. */ \
1720 INSN_3(ST, MEM, B), \
1721 INSN_3(ST, MEM, H), \
1722 INSN_3(ST, MEM, W), \
1723 INSN_3(ST, MEM, DW), \
1724 /* Load instructions. */ \
1725 /* Register based. */ \
1726 INSN_3(LDX, MEM, B), \
1727 INSN_3(LDX, MEM, H), \
1728 INSN_3(LDX, MEM, W), \
1729 INSN_3(LDX, MEM, DW), \
1730 INSN_3(LDX, MEMSX, B), \
1731 INSN_3(LDX, MEMSX, H), \
1732 INSN_3(LDX, MEMSX, W), \
1733 /* Immediate based. */ \
1734 INSN_3(LD, IMM, DW)
1735
bpf_opcode_in_insntable(u8 code)1736 bool bpf_opcode_in_insntable(u8 code)
1737 {
1738 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1739 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1740 static const bool public_insntable[256] = {
1741 [0 ... 255] = false,
1742 /* Now overwrite non-defaults ... */
1743 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1744 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1745 [BPF_LD | BPF_ABS | BPF_B] = true,
1746 [BPF_LD | BPF_ABS | BPF_H] = true,
1747 [BPF_LD | BPF_ABS | BPF_W] = true,
1748 [BPF_LD | BPF_IND | BPF_B] = true,
1749 [BPF_LD | BPF_IND | BPF_H] = true,
1750 [BPF_LD | BPF_IND | BPF_W] = true,
1751 [BPF_JMP | BPF_JA | BPF_X] = true,
1752 [BPF_JMP | BPF_JCOND] = true,
1753 };
1754 #undef BPF_INSN_3_TBL
1755 #undef BPF_INSN_2_TBL
1756 return public_insntable[code];
1757 }
1758
1759 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1760 /* Absolute value of s32 without undefined behavior for S32_MIN */
abs_s32(s32 x)1761 static u32 abs_s32(s32 x)
1762 {
1763 return x >= 0 ? (u32)x : -(u32)x;
1764 }
1765
1766 /**
1767 * ___bpf_prog_run - run eBPF program on a given context
1768 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1769 * @insn: is the array of eBPF instructions
1770 *
1771 * Decode and execute eBPF instructions.
1772 *
1773 * Return: whatever value is in %BPF_R0 at program exit
1774 */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)1775 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1776 {
1777 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1778 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1779 static const void * const jumptable[256] __annotate_jump_table = {
1780 [0 ... 255] = &&default_label,
1781 /* Now overwrite non-defaults ... */
1782 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1783 /* Non-UAPI available opcodes. */
1784 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1785 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1786 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1787 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1788 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1789 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1790 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1791 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1792 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1793 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1794 };
1795 #undef BPF_INSN_3_LBL
1796 #undef BPF_INSN_2_LBL
1797 u32 tail_call_cnt = 0;
1798
1799 #define CONT ({ insn++; goto select_insn; })
1800 #define CONT_JMP ({ insn++; goto select_insn; })
1801
1802 select_insn:
1803 goto *jumptable[insn->code];
1804
1805 /* Explicitly mask the register-based shift amounts with 63 or 31
1806 * to avoid undefined behavior. Normally this won't affect the
1807 * generated code, for example, in case of native 64 bit archs such
1808 * as x86-64 or arm64, the compiler is optimizing the AND away for
1809 * the interpreter. In case of JITs, each of the JIT backends compiles
1810 * the BPF shift operations to machine instructions which produce
1811 * implementation-defined results in such a case; the resulting
1812 * contents of the register may be arbitrary, but program behaviour
1813 * as a whole remains defined. In other words, in case of JIT backends,
1814 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1815 */
1816 /* ALU (shifts) */
1817 #define SHT(OPCODE, OP) \
1818 ALU64_##OPCODE##_X: \
1819 DST = DST OP (SRC & 63); \
1820 CONT; \
1821 ALU_##OPCODE##_X: \
1822 DST = (u32) DST OP ((u32) SRC & 31); \
1823 CONT; \
1824 ALU64_##OPCODE##_K: \
1825 DST = DST OP IMM; \
1826 CONT; \
1827 ALU_##OPCODE##_K: \
1828 DST = (u32) DST OP (u32) IMM; \
1829 CONT;
1830 /* ALU (rest) */
1831 #define ALU(OPCODE, OP) \
1832 ALU64_##OPCODE##_X: \
1833 DST = DST OP SRC; \
1834 CONT; \
1835 ALU_##OPCODE##_X: \
1836 DST = (u32) DST OP (u32) SRC; \
1837 CONT; \
1838 ALU64_##OPCODE##_K: \
1839 DST = DST OP IMM; \
1840 CONT; \
1841 ALU_##OPCODE##_K: \
1842 DST = (u32) DST OP (u32) IMM; \
1843 CONT;
1844 ALU(ADD, +)
1845 ALU(SUB, -)
1846 ALU(AND, &)
1847 ALU(OR, |)
1848 ALU(XOR, ^)
1849 ALU(MUL, *)
1850 SHT(LSH, <<)
1851 SHT(RSH, >>)
1852 #undef SHT
1853 #undef ALU
1854 ALU_NEG:
1855 DST = (u32) -DST;
1856 CONT;
1857 ALU64_NEG:
1858 DST = -DST;
1859 CONT;
1860 ALU_MOV_X:
1861 switch (OFF) {
1862 case 0:
1863 DST = (u32) SRC;
1864 break;
1865 case 8:
1866 DST = (u32)(s8) SRC;
1867 break;
1868 case 16:
1869 DST = (u32)(s16) SRC;
1870 break;
1871 }
1872 CONT;
1873 ALU_MOV_K:
1874 DST = (u32) IMM;
1875 CONT;
1876 ALU64_MOV_X:
1877 switch (OFF) {
1878 case 0:
1879 DST = SRC;
1880 break;
1881 case 8:
1882 DST = (s8) SRC;
1883 break;
1884 case 16:
1885 DST = (s16) SRC;
1886 break;
1887 case 32:
1888 DST = (s32) SRC;
1889 break;
1890 }
1891 CONT;
1892 ALU64_MOV_K:
1893 DST = IMM;
1894 CONT;
1895 LD_IMM_DW:
1896 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1897 insn++;
1898 CONT;
1899 ALU_ARSH_X:
1900 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1901 CONT;
1902 ALU_ARSH_K:
1903 DST = (u64) (u32) (((s32) DST) >> IMM);
1904 CONT;
1905 ALU64_ARSH_X:
1906 (*(s64 *) &DST) >>= (SRC & 63);
1907 CONT;
1908 ALU64_ARSH_K:
1909 (*(s64 *) &DST) >>= IMM;
1910 CONT;
1911 ALU64_MOD_X:
1912 switch (OFF) {
1913 case 0:
1914 div64_u64_rem(DST, SRC, &AX);
1915 DST = AX;
1916 break;
1917 case 1:
1918 AX = div64_s64(DST, SRC);
1919 DST = DST - AX * SRC;
1920 break;
1921 }
1922 CONT;
1923 ALU_MOD_X:
1924 switch (OFF) {
1925 case 0:
1926 AX = (u32) DST;
1927 DST = do_div(AX, (u32) SRC);
1928 break;
1929 case 1:
1930 AX = abs_s32((s32)DST);
1931 AX = do_div(AX, abs_s32((s32)SRC));
1932 if ((s32)DST < 0)
1933 DST = (u32)-AX;
1934 else
1935 DST = (u32)AX;
1936 break;
1937 }
1938 CONT;
1939 ALU64_MOD_K:
1940 switch (OFF) {
1941 case 0:
1942 div64_u64_rem(DST, IMM, &AX);
1943 DST = AX;
1944 break;
1945 case 1:
1946 AX = div64_s64(DST, IMM);
1947 DST = DST - AX * IMM;
1948 break;
1949 }
1950 CONT;
1951 ALU_MOD_K:
1952 switch (OFF) {
1953 case 0:
1954 AX = (u32) DST;
1955 DST = do_div(AX, (u32) IMM);
1956 break;
1957 case 1:
1958 AX = abs_s32((s32)DST);
1959 AX = do_div(AX, abs_s32((s32)IMM));
1960 if ((s32)DST < 0)
1961 DST = (u32)-AX;
1962 else
1963 DST = (u32)AX;
1964 break;
1965 }
1966 CONT;
1967 ALU64_DIV_X:
1968 switch (OFF) {
1969 case 0:
1970 DST = div64_u64(DST, SRC);
1971 break;
1972 case 1:
1973 DST = div64_s64(DST, SRC);
1974 break;
1975 }
1976 CONT;
1977 ALU_DIV_X:
1978 switch (OFF) {
1979 case 0:
1980 AX = (u32) DST;
1981 do_div(AX, (u32) SRC);
1982 DST = (u32) AX;
1983 break;
1984 case 1:
1985 AX = abs_s32((s32)DST);
1986 do_div(AX, abs_s32((s32)SRC));
1987 if (((s32)DST < 0) == ((s32)SRC < 0))
1988 DST = (u32)AX;
1989 else
1990 DST = (u32)-AX;
1991 break;
1992 }
1993 CONT;
1994 ALU64_DIV_K:
1995 switch (OFF) {
1996 case 0:
1997 DST = div64_u64(DST, IMM);
1998 break;
1999 case 1:
2000 DST = div64_s64(DST, IMM);
2001 break;
2002 }
2003 CONT;
2004 ALU_DIV_K:
2005 switch (OFF) {
2006 case 0:
2007 AX = (u32) DST;
2008 do_div(AX, (u32) IMM);
2009 DST = (u32) AX;
2010 break;
2011 case 1:
2012 AX = abs_s32((s32)DST);
2013 do_div(AX, abs_s32((s32)IMM));
2014 if (((s32)DST < 0) == ((s32)IMM < 0))
2015 DST = (u32)AX;
2016 else
2017 DST = (u32)-AX;
2018 break;
2019 }
2020 CONT;
2021 ALU_END_TO_BE:
2022 switch (IMM) {
2023 case 16:
2024 DST = (__force u16) cpu_to_be16(DST);
2025 break;
2026 case 32:
2027 DST = (__force u32) cpu_to_be32(DST);
2028 break;
2029 case 64:
2030 DST = (__force u64) cpu_to_be64(DST);
2031 break;
2032 }
2033 CONT;
2034 ALU_END_TO_LE:
2035 switch (IMM) {
2036 case 16:
2037 DST = (__force u16) cpu_to_le16(DST);
2038 break;
2039 case 32:
2040 DST = (__force u32) cpu_to_le32(DST);
2041 break;
2042 case 64:
2043 DST = (__force u64) cpu_to_le64(DST);
2044 break;
2045 }
2046 CONT;
2047 ALU64_END_TO_LE:
2048 switch (IMM) {
2049 case 16:
2050 DST = (__force u16) __swab16(DST);
2051 break;
2052 case 32:
2053 DST = (__force u32) __swab32(DST);
2054 break;
2055 case 64:
2056 DST = (__force u64) __swab64(DST);
2057 break;
2058 }
2059 CONT;
2060
2061 /* CALL */
2062 JMP_CALL:
2063 /* Function call scratches BPF_R1-BPF_R5 registers,
2064 * preserves BPF_R6-BPF_R9, and stores return value
2065 * into BPF_R0.
2066 */
2067 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
2068 BPF_R4, BPF_R5);
2069 CONT;
2070
2071 JMP_CALL_ARGS:
2072 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
2073 BPF_R3, BPF_R4,
2074 BPF_R5,
2075 insn + insn->off + 1);
2076 CONT;
2077
2078 JMP_TAIL_CALL: {
2079 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
2080 struct bpf_array *array = container_of(map, struct bpf_array, map);
2081 struct bpf_prog *prog;
2082 u32 index = BPF_R3;
2083
2084 if (unlikely(index >= array->map.max_entries))
2085 goto out;
2086
2087 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
2088 goto out;
2089
2090 tail_call_cnt++;
2091
2092 prog = READ_ONCE(array->ptrs[index]);
2093 if (!prog)
2094 goto out;
2095
2096 /* ARG1 at this point is guaranteed to point to CTX from
2097 * the verifier side due to the fact that the tail call is
2098 * handled like a helper, that is, bpf_tail_call_proto,
2099 * where arg1_type is ARG_PTR_TO_CTX.
2100 */
2101 insn = prog->insnsi;
2102 goto select_insn;
2103 out:
2104 CONT;
2105 }
2106 JMP_JA:
2107 insn += insn->off;
2108 CONT;
2109 JMP32_JA:
2110 insn += insn->imm;
2111 CONT;
2112 JMP_EXIT:
2113 return BPF_R0;
2114 /* JMP */
2115 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
2116 JMP_##OPCODE##_X: \
2117 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
2118 insn += insn->off; \
2119 CONT_JMP; \
2120 } \
2121 CONT; \
2122 JMP32_##OPCODE##_X: \
2123 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
2124 insn += insn->off; \
2125 CONT_JMP; \
2126 } \
2127 CONT; \
2128 JMP_##OPCODE##_K: \
2129 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
2130 insn += insn->off; \
2131 CONT_JMP; \
2132 } \
2133 CONT; \
2134 JMP32_##OPCODE##_K: \
2135 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
2136 insn += insn->off; \
2137 CONT_JMP; \
2138 } \
2139 CONT;
2140 COND_JMP(u, JEQ, ==)
2141 COND_JMP(u, JNE, !=)
2142 COND_JMP(u, JGT, >)
2143 COND_JMP(u, JLT, <)
2144 COND_JMP(u, JGE, >=)
2145 COND_JMP(u, JLE, <=)
2146 COND_JMP(u, JSET, &)
2147 COND_JMP(s, JSGT, >)
2148 COND_JMP(s, JSLT, <)
2149 COND_JMP(s, JSGE, >=)
2150 COND_JMP(s, JSLE, <=)
2151 #undef COND_JMP
2152 /* ST, STX and LDX*/
2153 ST_NOSPEC:
2154 /* Speculation barrier for mitigating Speculative Store Bypass,
2155 * Bounds-Check Bypass and Type Confusion. In case of arm64, we
2156 * rely on the firmware mitigation as controlled via the ssbd
2157 * kernel parameter. Whenever the mitigation is enabled, it
2158 * works for all of the kernel code with no need to provide any
2159 * additional instructions here. In case of x86, we use 'lfence'
2160 * insn for mitigation. We reuse preexisting logic from Spectre
2161 * v1 mitigation that happens to produce the required code on
2162 * x86 for v4 as well.
2163 */
2164 barrier_nospec();
2165 CONT;
2166 #define LDST(SIZEOP, SIZE) \
2167 STX_MEM_##SIZEOP: \
2168 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
2169 CONT; \
2170 ST_MEM_##SIZEOP: \
2171 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
2172 CONT; \
2173 LDX_MEM_##SIZEOP: \
2174 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2175 CONT; \
2176 LDX_PROBE_MEM_##SIZEOP: \
2177 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2178 (const void *)(long) (SRC + insn->off)); \
2179 DST = *((SIZE *)&DST); \
2180 CONT;
2181
2182 LDST(B, u8)
2183 LDST(H, u16)
2184 LDST(W, u32)
2185 LDST(DW, u64)
2186 #undef LDST
2187
2188 #define LDSX(SIZEOP, SIZE) \
2189 LDX_MEMSX_##SIZEOP: \
2190 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2191 CONT; \
2192 LDX_PROBE_MEMSX_##SIZEOP: \
2193 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2194 (const void *)(long) (SRC + insn->off)); \
2195 DST = *((SIZE *)&DST); \
2196 CONT;
2197
2198 LDSX(B, s8)
2199 LDSX(H, s16)
2200 LDSX(W, s32)
2201 #undef LDSX
2202
2203 #define ATOMIC_ALU_OP(BOP, KOP) \
2204 case BOP: \
2205 if (BPF_SIZE(insn->code) == BPF_W) \
2206 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2207 (DST + insn->off)); \
2208 else if (BPF_SIZE(insn->code) == BPF_DW) \
2209 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2210 (DST + insn->off)); \
2211 else \
2212 goto default_label; \
2213 break; \
2214 case BOP | BPF_FETCH: \
2215 if (BPF_SIZE(insn->code) == BPF_W) \
2216 SRC = (u32) atomic_fetch_##KOP( \
2217 (u32) SRC, \
2218 (atomic_t *)(unsigned long) (DST + insn->off)); \
2219 else if (BPF_SIZE(insn->code) == BPF_DW) \
2220 SRC = (u64) atomic64_fetch_##KOP( \
2221 (u64) SRC, \
2222 (atomic64_t *)(unsigned long) (DST + insn->off)); \
2223 else \
2224 goto default_label; \
2225 break;
2226
2227 STX_ATOMIC_DW:
2228 STX_ATOMIC_W:
2229 STX_ATOMIC_H:
2230 STX_ATOMIC_B:
2231 switch (IMM) {
2232 /* Atomic read-modify-write instructions support only W and DW
2233 * size modifiers.
2234 */
2235 ATOMIC_ALU_OP(BPF_ADD, add)
2236 ATOMIC_ALU_OP(BPF_AND, and)
2237 ATOMIC_ALU_OP(BPF_OR, or)
2238 ATOMIC_ALU_OP(BPF_XOR, xor)
2239 #undef ATOMIC_ALU_OP
2240
2241 case BPF_XCHG:
2242 if (BPF_SIZE(insn->code) == BPF_W)
2243 SRC = (u32) atomic_xchg(
2244 (atomic_t *)(unsigned long) (DST + insn->off),
2245 (u32) SRC);
2246 else if (BPF_SIZE(insn->code) == BPF_DW)
2247 SRC = (u64) atomic64_xchg(
2248 (atomic64_t *)(unsigned long) (DST + insn->off),
2249 (u64) SRC);
2250 else
2251 goto default_label;
2252 break;
2253 case BPF_CMPXCHG:
2254 if (BPF_SIZE(insn->code) == BPF_W)
2255 BPF_R0 = (u32) atomic_cmpxchg(
2256 (atomic_t *)(unsigned long) (DST + insn->off),
2257 (u32) BPF_R0, (u32) SRC);
2258 else if (BPF_SIZE(insn->code) == BPF_DW)
2259 BPF_R0 = (u64) atomic64_cmpxchg(
2260 (atomic64_t *)(unsigned long) (DST + insn->off),
2261 (u64) BPF_R0, (u64) SRC);
2262 else
2263 goto default_label;
2264 break;
2265 /* Atomic load and store instructions support all size
2266 * modifiers.
2267 */
2268 case BPF_LOAD_ACQ:
2269 switch (BPF_SIZE(insn->code)) {
2270 #define LOAD_ACQUIRE(SIZEOP, SIZE) \
2271 case BPF_##SIZEOP: \
2272 DST = (SIZE)smp_load_acquire( \
2273 (SIZE *)(unsigned long)(SRC + insn->off)); \
2274 break;
2275 LOAD_ACQUIRE(B, u8)
2276 LOAD_ACQUIRE(H, u16)
2277 LOAD_ACQUIRE(W, u32)
2278 #ifdef CONFIG_64BIT
2279 LOAD_ACQUIRE(DW, u64)
2280 #endif
2281 #undef LOAD_ACQUIRE
2282 default:
2283 goto default_label;
2284 }
2285 break;
2286 case BPF_STORE_REL:
2287 switch (BPF_SIZE(insn->code)) {
2288 #define STORE_RELEASE(SIZEOP, SIZE) \
2289 case BPF_##SIZEOP: \
2290 smp_store_release( \
2291 (SIZE *)(unsigned long)(DST + insn->off), (SIZE)SRC); \
2292 break;
2293 STORE_RELEASE(B, u8)
2294 STORE_RELEASE(H, u16)
2295 STORE_RELEASE(W, u32)
2296 #ifdef CONFIG_64BIT
2297 STORE_RELEASE(DW, u64)
2298 #endif
2299 #undef STORE_RELEASE
2300 default:
2301 goto default_label;
2302 }
2303 break;
2304
2305 default:
2306 goto default_label;
2307 }
2308 CONT;
2309
2310 default_label:
2311 /* If we ever reach this, we have a bug somewhere. Die hard here
2312 * instead of just returning 0; we could be somewhere in a subprog,
2313 * so execution could continue otherwise which we do /not/ want.
2314 *
2315 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2316 */
2317 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2318 insn->code, insn->imm);
2319 BUG_ON(1);
2320 return 0;
2321 }
2322
2323 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2324 #define DEFINE_BPF_PROG_RUN(stack_size) \
2325 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2326 { \
2327 u64 stack[stack_size / sizeof(u64)]; \
2328 u64 regs[MAX_BPF_EXT_REG] = {}; \
2329 \
2330 kmsan_unpoison_memory(stack, sizeof(stack)); \
2331 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2332 ARG1 = (u64) (unsigned long) ctx; \
2333 return ___bpf_prog_run(regs, insn); \
2334 }
2335
2336 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2337 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2338 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2339 const struct bpf_insn *insn) \
2340 { \
2341 u64 stack[stack_size / sizeof(u64)]; \
2342 u64 regs[MAX_BPF_EXT_REG]; \
2343 \
2344 kmsan_unpoison_memory(stack, sizeof(stack)); \
2345 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2346 BPF_R1 = r1; \
2347 BPF_R2 = r2; \
2348 BPF_R3 = r3; \
2349 BPF_R4 = r4; \
2350 BPF_R5 = r5; \
2351 return ___bpf_prog_run(regs, insn); \
2352 }
2353
2354 #define EVAL1(FN, X) FN(X)
2355 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2356 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2357 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2358 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2359 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2360
2361 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2362 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2363 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2364
2365 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2366 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2367 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2368
2369 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2370
2371 static unsigned int (*interpreters[])(const void *ctx,
2372 const struct bpf_insn *insn) = {
2373 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2374 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2375 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2376 };
2377 #undef PROG_NAME_LIST
2378 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2379 static __maybe_unused
2380 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2381 const struct bpf_insn *insn) = {
2382 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2383 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2384 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2385 };
2386 #undef PROG_NAME_LIST
2387
2388 #ifdef CONFIG_BPF_SYSCALL
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)2389 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2390 {
2391 stack_depth = max_t(u32, stack_depth, 1);
2392 insn->off = (s16) insn->imm;
2393 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2394 __bpf_call_base_args;
2395 insn->code = BPF_JMP | BPF_CALL_ARGS;
2396 }
2397 #endif
2398 #endif
2399
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)2400 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2401 const struct bpf_insn *insn)
2402 {
2403 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2404 * is not working properly, so warn about it!
2405 */
2406 WARN_ON_ONCE(1);
2407 return 0;
2408 }
2409
__bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2410 static bool __bpf_prog_map_compatible(struct bpf_map *map,
2411 const struct bpf_prog *fp)
2412 {
2413 enum bpf_prog_type prog_type = resolve_prog_type(fp);
2414 struct bpf_prog_aux *aux = fp->aux;
2415 enum bpf_cgroup_storage_type i;
2416 bool ret = false;
2417 u64 cookie;
2418
2419 if (fp->kprobe_override)
2420 return ret;
2421
2422 spin_lock(&map->owner_lock);
2423 /* There's no owner yet where we could check for compatibility. */
2424 if (!map->owner) {
2425 map->owner = bpf_map_owner_alloc(map);
2426 if (!map->owner)
2427 goto err;
2428 map->owner->type = prog_type;
2429 map->owner->jited = fp->jited;
2430 map->owner->xdp_has_frags = aux->xdp_has_frags;
2431 map->owner->sleepable = fp->sleepable;
2432 map->owner->expected_attach_type = fp->expected_attach_type;
2433 map->owner->attach_func_proto = aux->attach_func_proto;
2434 for_each_cgroup_storage_type(i) {
2435 map->owner->storage_cookie[i] =
2436 aux->cgroup_storage[i] ?
2437 aux->cgroup_storage[i]->cookie : 0;
2438 }
2439 ret = true;
2440 } else {
2441 ret = map->owner->type == prog_type &&
2442 map->owner->jited == fp->jited &&
2443 map->owner->xdp_has_frags == aux->xdp_has_frags &&
2444 map->owner->sleepable == fp->sleepable;
2445 if (ret &&
2446 map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
2447 map->owner->expected_attach_type != fp->expected_attach_type)
2448 ret = false;
2449 for_each_cgroup_storage_type(i) {
2450 if (!ret)
2451 break;
2452 cookie = aux->cgroup_storage[i] ?
2453 aux->cgroup_storage[i]->cookie : 0;
2454 ret = map->owner->storage_cookie[i] == cookie ||
2455 !cookie;
2456 }
2457 if (ret &&
2458 map->owner->attach_func_proto != aux->attach_func_proto) {
2459 switch (prog_type) {
2460 case BPF_PROG_TYPE_TRACING:
2461 case BPF_PROG_TYPE_LSM:
2462 case BPF_PROG_TYPE_EXT:
2463 case BPF_PROG_TYPE_STRUCT_OPS:
2464 ret = false;
2465 break;
2466 default:
2467 break;
2468 }
2469 }
2470 }
2471 err:
2472 spin_unlock(&map->owner_lock);
2473 return ret;
2474 }
2475
bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2476 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp)
2477 {
2478 /* XDP programs inserted into maps are not guaranteed to run on
2479 * a particular netdev (and can run outside driver context entirely
2480 * in the case of devmap and cpumap). Until device checks
2481 * are implemented, prohibit adding dev-bound programs to program maps.
2482 */
2483 if (bpf_prog_is_dev_bound(fp->aux))
2484 return false;
2485
2486 return __bpf_prog_map_compatible(map, fp);
2487 }
2488
bpf_check_tail_call(const struct bpf_prog * fp)2489 static int bpf_check_tail_call(const struct bpf_prog *fp)
2490 {
2491 struct bpf_prog_aux *aux = fp->aux;
2492 int i, ret = 0;
2493
2494 mutex_lock(&aux->used_maps_mutex);
2495 for (i = 0; i < aux->used_map_cnt; i++) {
2496 struct bpf_map *map = aux->used_maps[i];
2497
2498 if (!map_type_contains_progs(map))
2499 continue;
2500
2501 if (!__bpf_prog_map_compatible(map, fp)) {
2502 ret = -EINVAL;
2503 goto out;
2504 }
2505 }
2506
2507 out:
2508 mutex_unlock(&aux->used_maps_mutex);
2509 return ret;
2510 }
2511
bpf_prog_select_interpreter(struct bpf_prog * fp)2512 static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
2513 {
2514 bool select_interpreter = false;
2515 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2516 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2517 u32 idx = (round_up(stack_depth, 32) / 32) - 1;
2518
2519 /* may_goto may cause stack size > 512, leading to idx out-of-bounds.
2520 * But for non-JITed programs, we don't need bpf_func, so no bounds
2521 * check needed.
2522 */
2523 if (idx < ARRAY_SIZE(interpreters)) {
2524 fp->bpf_func = interpreters[idx];
2525 select_interpreter = true;
2526 } else {
2527 fp->bpf_func = __bpf_prog_ret0_warn;
2528 }
2529 #else
2530 fp->bpf_func = __bpf_prog_ret0_warn;
2531 #endif
2532 return select_interpreter;
2533 }
2534
2535 /**
2536 * bpf_prog_select_runtime - select exec runtime for BPF program
2537 * @fp: bpf_prog populated with BPF program
2538 * @err: pointer to error variable
2539 *
2540 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2541 * The BPF program will be executed via bpf_prog_run() function.
2542 *
2543 * Return: the &fp argument along with &err set to 0 for success or
2544 * a negative errno code on failure
2545 */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)2546 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2547 {
2548 /* In case of BPF to BPF calls, verifier did all the prep
2549 * work with regards to JITing, etc.
2550 */
2551 bool jit_needed = false;
2552
2553 if (fp->bpf_func)
2554 goto finalize;
2555
2556 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2557 bpf_prog_has_kfunc_call(fp))
2558 jit_needed = true;
2559
2560 if (!bpf_prog_select_interpreter(fp))
2561 jit_needed = true;
2562
2563 /* eBPF JITs can rewrite the program in case constant
2564 * blinding is active. However, in case of error during
2565 * blinding, bpf_int_jit_compile() must always return a
2566 * valid program, which in this case would simply not
2567 * be JITed, but falls back to the interpreter.
2568 */
2569 if (!bpf_prog_is_offloaded(fp->aux)) {
2570 *err = bpf_prog_alloc_jited_linfo(fp);
2571 if (*err)
2572 return fp;
2573
2574 fp = bpf_int_jit_compile(fp);
2575 bpf_prog_jit_attempt_done(fp);
2576 if (!fp->jited && jit_needed) {
2577 *err = -ENOTSUPP;
2578 return fp;
2579 }
2580 } else {
2581 *err = bpf_prog_offload_compile(fp);
2582 if (*err)
2583 return fp;
2584 }
2585
2586 finalize:
2587 *err = bpf_prog_lock_ro(fp);
2588 if (*err)
2589 return fp;
2590
2591 /* The tail call compatibility check can only be done at
2592 * this late stage as we need to determine, if we deal
2593 * with JITed or non JITed program concatenations and not
2594 * all eBPF JITs might immediately support all features.
2595 */
2596 *err = bpf_check_tail_call(fp);
2597
2598 return fp;
2599 }
2600 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2601
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)2602 static unsigned int __bpf_prog_ret1(const void *ctx,
2603 const struct bpf_insn *insn)
2604 {
2605 return 1;
2606 }
2607
2608 static struct bpf_prog_dummy {
2609 struct bpf_prog prog;
2610 } dummy_bpf_prog = {
2611 .prog = {
2612 .bpf_func = __bpf_prog_ret1,
2613 },
2614 };
2615
2616 struct bpf_empty_prog_array bpf_empty_prog_array = {
2617 .null_prog = NULL,
2618 };
2619 EXPORT_SYMBOL(bpf_empty_prog_array);
2620
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)2621 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2622 {
2623 struct bpf_prog_array *p;
2624
2625 if (prog_cnt)
2626 p = kzalloc_flex(*p, items, prog_cnt + 1, flags);
2627 else
2628 p = &bpf_empty_prog_array.hdr;
2629
2630 return p;
2631 }
2632
bpf_prog_array_free(struct bpf_prog_array * progs)2633 void bpf_prog_array_free(struct bpf_prog_array *progs)
2634 {
2635 if (!progs || progs == &bpf_empty_prog_array.hdr)
2636 return;
2637 kfree_rcu(progs, rcu);
2638 }
2639
__bpf_prog_array_free_sleepable_cb(struct rcu_head * rcu)2640 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2641 {
2642 struct bpf_prog_array *progs;
2643
2644 /* If RCU Tasks Trace grace period implies RCU grace period, there is
2645 * no need to call kfree_rcu(), just call kfree() directly.
2646 */
2647 progs = container_of(rcu, struct bpf_prog_array, rcu);
2648 if (rcu_trace_implies_rcu_gp())
2649 kfree(progs);
2650 else
2651 kfree_rcu(progs, rcu);
2652 }
2653
bpf_prog_array_free_sleepable(struct bpf_prog_array * progs)2654 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2655 {
2656 if (!progs || progs == &bpf_empty_prog_array.hdr)
2657 return;
2658 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2659 }
2660
bpf_prog_array_length(struct bpf_prog_array * array)2661 int bpf_prog_array_length(struct bpf_prog_array *array)
2662 {
2663 struct bpf_prog_array_item *item;
2664 u32 cnt = 0;
2665
2666 for (item = array->items; item->prog; item++)
2667 if (item->prog != &dummy_bpf_prog.prog)
2668 cnt++;
2669 return cnt;
2670 }
2671
bpf_prog_array_is_empty(struct bpf_prog_array * array)2672 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2673 {
2674 struct bpf_prog_array_item *item;
2675
2676 for (item = array->items; item->prog; item++)
2677 if (item->prog != &dummy_bpf_prog.prog)
2678 return false;
2679 return true;
2680 }
2681
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)2682 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2683 u32 *prog_ids,
2684 u32 request_cnt)
2685 {
2686 struct bpf_prog_array_item *item;
2687 int i = 0;
2688
2689 for (item = array->items; item->prog; item++) {
2690 if (item->prog == &dummy_bpf_prog.prog)
2691 continue;
2692 prog_ids[i] = item->prog->aux->id;
2693 if (++i == request_cnt) {
2694 item++;
2695 break;
2696 }
2697 }
2698
2699 return !!(item->prog);
2700 }
2701
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)2702 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2703 __u32 __user *prog_ids, u32 cnt)
2704 {
2705 unsigned long err = 0;
2706 bool nospc;
2707 u32 *ids;
2708
2709 /* users of this function are doing:
2710 * cnt = bpf_prog_array_length();
2711 * if (cnt > 0)
2712 * bpf_prog_array_copy_to_user(..., cnt);
2713 * so below kcalloc doesn't need extra cnt > 0 check.
2714 */
2715 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2716 if (!ids)
2717 return -ENOMEM;
2718 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2719 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2720 kfree(ids);
2721 if (err)
2722 return -EFAULT;
2723 if (nospc)
2724 return -ENOSPC;
2725 return 0;
2726 }
2727
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2728 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2729 struct bpf_prog *old_prog)
2730 {
2731 struct bpf_prog_array_item *item;
2732
2733 for (item = array->items; item->prog; item++)
2734 if (item->prog == old_prog) {
2735 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2736 break;
2737 }
2738 }
2739
2740 /**
2741 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2742 * index into the program array with
2743 * a dummy no-op program.
2744 * @array: a bpf_prog_array
2745 * @index: the index of the program to replace
2746 *
2747 * Skips over dummy programs, by not counting them, when calculating
2748 * the position of the program to replace.
2749 *
2750 * Return:
2751 * * 0 - Success
2752 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2753 * * -ENOENT - Index out of range
2754 */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2755 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2756 {
2757 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2758 }
2759
2760 /**
2761 * bpf_prog_array_update_at() - Updates the program at the given index
2762 * into the program array.
2763 * @array: a bpf_prog_array
2764 * @index: the index of the program to update
2765 * @prog: the program to insert into the array
2766 *
2767 * Skips over dummy programs, by not counting them, when calculating
2768 * the position of the program to update.
2769 *
2770 * Return:
2771 * * 0 - Success
2772 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2773 * * -ENOENT - Index out of range
2774 */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2775 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2776 struct bpf_prog *prog)
2777 {
2778 struct bpf_prog_array_item *item;
2779
2780 if (unlikely(index < 0))
2781 return -EINVAL;
2782
2783 for (item = array->items; item->prog; item++) {
2784 if (item->prog == &dummy_bpf_prog.prog)
2785 continue;
2786 if (!index) {
2787 WRITE_ONCE(item->prog, prog);
2788 return 0;
2789 }
2790 index--;
2791 }
2792 return -ENOENT;
2793 }
2794
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,u64 bpf_cookie,struct bpf_prog_array ** new_array)2795 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2796 struct bpf_prog *exclude_prog,
2797 struct bpf_prog *include_prog,
2798 u64 bpf_cookie,
2799 struct bpf_prog_array **new_array)
2800 {
2801 int new_prog_cnt, carry_prog_cnt = 0;
2802 struct bpf_prog_array_item *existing, *new;
2803 struct bpf_prog_array *array;
2804 bool found_exclude = false;
2805
2806 /* Figure out how many existing progs we need to carry over to
2807 * the new array.
2808 */
2809 if (old_array) {
2810 existing = old_array->items;
2811 for (; existing->prog; existing++) {
2812 if (existing->prog == exclude_prog) {
2813 found_exclude = true;
2814 continue;
2815 }
2816 if (existing->prog != &dummy_bpf_prog.prog)
2817 carry_prog_cnt++;
2818 if (existing->prog == include_prog)
2819 return -EEXIST;
2820 }
2821 }
2822
2823 if (exclude_prog && !found_exclude)
2824 return -ENOENT;
2825
2826 /* How many progs (not NULL) will be in the new array? */
2827 new_prog_cnt = carry_prog_cnt;
2828 if (include_prog)
2829 new_prog_cnt += 1;
2830
2831 /* Do we have any prog (not NULL) in the new array? */
2832 if (!new_prog_cnt) {
2833 *new_array = NULL;
2834 return 0;
2835 }
2836
2837 /* +1 as the end of prog_array is marked with NULL */
2838 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2839 if (!array)
2840 return -ENOMEM;
2841 new = array->items;
2842
2843 /* Fill in the new prog array */
2844 if (carry_prog_cnt) {
2845 existing = old_array->items;
2846 for (; existing->prog; existing++) {
2847 if (existing->prog == exclude_prog ||
2848 existing->prog == &dummy_bpf_prog.prog)
2849 continue;
2850
2851 new->prog = existing->prog;
2852 new->bpf_cookie = existing->bpf_cookie;
2853 new++;
2854 }
2855 }
2856 if (include_prog) {
2857 new->prog = include_prog;
2858 new->bpf_cookie = bpf_cookie;
2859 new++;
2860 }
2861 new->prog = NULL;
2862 *new_array = array;
2863 return 0;
2864 }
2865
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2866 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2867 u32 *prog_ids, u32 request_cnt,
2868 u32 *prog_cnt)
2869 {
2870 u32 cnt = 0;
2871
2872 if (array)
2873 cnt = bpf_prog_array_length(array);
2874
2875 *prog_cnt = cnt;
2876
2877 /* return early if user requested only program count or nothing to copy */
2878 if (!request_cnt || !cnt)
2879 return 0;
2880
2881 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2882 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2883 : 0;
2884 }
2885
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2886 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2887 struct bpf_map **used_maps, u32 len)
2888 {
2889 struct bpf_map *map;
2890 bool sleepable;
2891 u32 i;
2892
2893 sleepable = aux->prog->sleepable;
2894 for (i = 0; i < len; i++) {
2895 map = used_maps[i];
2896 if (map->ops->map_poke_untrack)
2897 map->ops->map_poke_untrack(map, aux);
2898 if (sleepable)
2899 atomic64_dec(&map->sleepable_refcnt);
2900 bpf_map_put(map);
2901 }
2902 }
2903
bpf_free_used_maps(struct bpf_prog_aux * aux)2904 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2905 {
2906 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2907 kfree(aux->used_maps);
2908 }
2909
__bpf_free_used_btfs(struct btf_mod_pair * used_btfs,u32 len)2910 void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len)
2911 {
2912 #ifdef CONFIG_BPF_SYSCALL
2913 struct btf_mod_pair *btf_mod;
2914 u32 i;
2915
2916 for (i = 0; i < len; i++) {
2917 btf_mod = &used_btfs[i];
2918 if (btf_mod->module)
2919 module_put(btf_mod->module);
2920 btf_put(btf_mod->btf);
2921 }
2922 #endif
2923 }
2924
bpf_free_used_btfs(struct bpf_prog_aux * aux)2925 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2926 {
2927 __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt);
2928 kfree(aux->used_btfs);
2929 }
2930
bpf_prog_free_deferred(struct work_struct * work)2931 static void bpf_prog_free_deferred(struct work_struct *work)
2932 {
2933 struct bpf_prog_aux *aux;
2934 int i;
2935
2936 aux = container_of(work, struct bpf_prog_aux, work);
2937 #ifdef CONFIG_BPF_SYSCALL
2938 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2939 bpf_prog_stream_free(aux->prog);
2940 #endif
2941 #ifdef CONFIG_CGROUP_BPF
2942 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2943 bpf_cgroup_atype_put(aux->cgroup_atype);
2944 #endif
2945 bpf_free_used_maps(aux);
2946 bpf_free_used_btfs(aux);
2947 bpf_prog_disassoc_struct_ops(aux->prog);
2948 if (bpf_prog_is_dev_bound(aux))
2949 bpf_prog_dev_bound_destroy(aux->prog);
2950 #ifdef CONFIG_PERF_EVENTS
2951 if (aux->prog->has_callchain_buf)
2952 put_callchain_buffers();
2953 #endif
2954 if (aux->dst_trampoline)
2955 bpf_trampoline_put(aux->dst_trampoline);
2956 for (i = 0; i < aux->real_func_cnt; i++) {
2957 /* We can just unlink the subprog poke descriptor table as
2958 * it was originally linked to the main program and is also
2959 * released along with it.
2960 */
2961 aux->func[i]->aux->poke_tab = NULL;
2962 bpf_jit_free(aux->func[i]);
2963 }
2964 if (aux->real_func_cnt) {
2965 kfree(aux->func);
2966 bpf_prog_unlock_free(aux->prog);
2967 } else {
2968 bpf_jit_free(aux->prog);
2969 }
2970 }
2971
bpf_prog_free(struct bpf_prog * fp)2972 void bpf_prog_free(struct bpf_prog *fp)
2973 {
2974 struct bpf_prog_aux *aux = fp->aux;
2975
2976 if (aux->dst_prog)
2977 bpf_prog_put(aux->dst_prog);
2978 bpf_token_put(aux->token);
2979 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2980 schedule_work(&aux->work);
2981 }
2982 EXPORT_SYMBOL_GPL(bpf_prog_free);
2983
2984 /* RNG for unprivileged user space with separated state from prandom_u32(). */
2985 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2986
bpf_user_rnd_init_once(void)2987 void bpf_user_rnd_init_once(void)
2988 {
2989 prandom_init_once(&bpf_user_rnd_state);
2990 }
2991
BPF_CALL_0(bpf_user_rnd_u32)2992 BPF_CALL_0(bpf_user_rnd_u32)
2993 {
2994 /* Should someone ever have the rather unwise idea to use some
2995 * of the registers passed into this function, then note that
2996 * this function is called from native eBPF and classic-to-eBPF
2997 * transformations. Register assignments from both sides are
2998 * different, f.e. classic always sets fn(ctx, A, X) here.
2999 */
3000 struct rnd_state *state;
3001 u32 res;
3002
3003 state = &get_cpu_var(bpf_user_rnd_state);
3004 res = prandom_u32_state(state);
3005 put_cpu_var(bpf_user_rnd_state);
3006
3007 return res;
3008 }
3009
BPF_CALL_0(bpf_get_raw_cpu_id)3010 BPF_CALL_0(bpf_get_raw_cpu_id)
3011 {
3012 return raw_smp_processor_id();
3013 }
3014
3015 /* Weak definitions of helper functions in case we don't have bpf syscall. */
3016 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
3017 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
3018 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
3019 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
3020 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
3021 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
3022 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
3023 const struct bpf_func_proto bpf_spin_lock_proto __weak;
3024 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
3025 const struct bpf_func_proto bpf_jiffies64_proto __weak;
3026
3027 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
3028 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
3029 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
3030 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
3031 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
3032 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
3033 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
3034
3035 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
3036 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
3037 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
3038 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
3039 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
3040 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
3041 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
3042 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
3043 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
3044 const struct bpf_func_proto bpf_set_retval_proto __weak;
3045 const struct bpf_func_proto bpf_get_retval_proto __weak;
3046
bpf_get_trace_printk_proto(void)3047 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
3048 {
3049 return NULL;
3050 }
3051
bpf_get_trace_vprintk_proto(void)3052 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
3053 {
3054 return NULL;
3055 }
3056
bpf_get_perf_event_read_value_proto(void)3057 const struct bpf_func_proto * __weak bpf_get_perf_event_read_value_proto(void)
3058 {
3059 return NULL;
3060 }
3061
3062 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)3063 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
3064 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
3065 {
3066 return -ENOTSUPP;
3067 }
3068 EXPORT_SYMBOL_GPL(bpf_event_output);
3069
3070 /* Always built-in helper functions. */
3071 const struct bpf_func_proto bpf_tail_call_proto = {
3072 /* func is unused for tail_call, we set it to pass the
3073 * get_helper_proto check
3074 */
3075 .func = BPF_PTR_POISON,
3076 .gpl_only = false,
3077 .ret_type = RET_VOID,
3078 .arg1_type = ARG_PTR_TO_CTX,
3079 .arg2_type = ARG_CONST_MAP_PTR,
3080 .arg3_type = ARG_ANYTHING,
3081 };
3082
3083 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
3084 * It is encouraged to implement bpf_int_jit_compile() instead, so that
3085 * eBPF and implicitly also cBPF can get JITed!
3086 */
bpf_int_jit_compile(struct bpf_prog * prog)3087 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
3088 {
3089 return prog;
3090 }
3091
3092 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
3093 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
3094 */
bpf_jit_compile(struct bpf_prog * prog)3095 void __weak bpf_jit_compile(struct bpf_prog *prog)
3096 {
3097 }
3098
bpf_helper_changes_pkt_data(enum bpf_func_id func_id)3099 bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
3100 {
3101 return false;
3102 }
3103
3104 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
3105 * analysis code and wants explicit zero extension inserted by verifier.
3106 * Otherwise, return FALSE.
3107 *
3108 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
3109 * you don't override this. JITs that don't want these extra insns can detect
3110 * them using insn_is_zext.
3111 */
bpf_jit_needs_zext(void)3112 bool __weak bpf_jit_needs_zext(void)
3113 {
3114 return false;
3115 }
3116
3117 /* By default, enable the verifier's mitigations against Spectre v1 and v4 for
3118 * all archs. The value returned must not change at runtime as there is
3119 * currently no support for reloading programs that were loaded without
3120 * mitigations.
3121 */
bpf_jit_bypass_spec_v1(void)3122 bool __weak bpf_jit_bypass_spec_v1(void)
3123 {
3124 return false;
3125 }
3126
bpf_jit_bypass_spec_v4(void)3127 bool __weak bpf_jit_bypass_spec_v4(void)
3128 {
3129 return false;
3130 }
3131
3132 /* Return true if the JIT inlines the call to the helper corresponding to
3133 * the imm.
3134 *
3135 * The verifier will not patch the insn->imm for the call to the helper if
3136 * this returns true.
3137 */
bpf_jit_inlines_helper_call(s32 imm)3138 bool __weak bpf_jit_inlines_helper_call(s32 imm)
3139 {
3140 return false;
3141 }
3142
3143 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)3144 bool __weak bpf_jit_supports_subprog_tailcalls(void)
3145 {
3146 return false;
3147 }
3148
bpf_jit_supports_percpu_insn(void)3149 bool __weak bpf_jit_supports_percpu_insn(void)
3150 {
3151 return false;
3152 }
3153
bpf_jit_supports_kfunc_call(void)3154 bool __weak bpf_jit_supports_kfunc_call(void)
3155 {
3156 return false;
3157 }
3158
bpf_jit_supports_far_kfunc_call(void)3159 bool __weak bpf_jit_supports_far_kfunc_call(void)
3160 {
3161 return false;
3162 }
3163
bpf_jit_supports_arena(void)3164 bool __weak bpf_jit_supports_arena(void)
3165 {
3166 return false;
3167 }
3168
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)3169 bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
3170 {
3171 return false;
3172 }
3173
bpf_jit_supports_fsession(void)3174 bool __weak bpf_jit_supports_fsession(void)
3175 {
3176 return false;
3177 }
3178
bpf_arch_uaddress_limit(void)3179 u64 __weak bpf_arch_uaddress_limit(void)
3180 {
3181 #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
3182 return TASK_SIZE;
3183 #else
3184 return 0;
3185 #endif
3186 }
3187
3188 /* Return TRUE if the JIT backend satisfies the following two conditions:
3189 * 1) JIT backend supports atomic_xchg() on pointer-sized words.
3190 * 2) Under the specific arch, the implementation of xchg() is the same
3191 * as atomic_xchg() on pointer-sized words.
3192 */
bpf_jit_supports_ptr_xchg(void)3193 bool __weak bpf_jit_supports_ptr_xchg(void)
3194 {
3195 return false;
3196 }
3197
3198 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
3199 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
3200 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)3201 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
3202 int len)
3203 {
3204 return -EFAULT;
3205 }
3206
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type old_t,enum bpf_text_poke_type new_t,void * old_addr,void * new_addr)3207 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
3208 enum bpf_text_poke_type new_t, void *old_addr,
3209 void *new_addr)
3210 {
3211 return -ENOTSUPP;
3212 }
3213
bpf_arch_text_copy(void * dst,void * src,size_t len)3214 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
3215 {
3216 return ERR_PTR(-ENOTSUPP);
3217 }
3218
bpf_arch_text_invalidate(void * dst,size_t len)3219 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
3220 {
3221 return -ENOTSUPP;
3222 }
3223
bpf_jit_supports_exceptions(void)3224 bool __weak bpf_jit_supports_exceptions(void)
3225 {
3226 return false;
3227 }
3228
bpf_jit_supports_private_stack(void)3229 bool __weak bpf_jit_supports_private_stack(void)
3230 {
3231 return false;
3232 }
3233
arch_bpf_stack_walk(bool (* consume_fn)(void * cookie,u64 ip,u64 sp,u64 bp),void * cookie)3234 void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3235 {
3236 }
3237
bpf_jit_supports_timed_may_goto(void)3238 bool __weak bpf_jit_supports_timed_may_goto(void)
3239 {
3240 return false;
3241 }
3242
arch_bpf_timed_may_goto(void)3243 u64 __weak arch_bpf_timed_may_goto(void)
3244 {
3245 return 0;
3246 }
3247
bpf_prog_report_may_goto_violation(void)3248 static noinline void bpf_prog_report_may_goto_violation(void)
3249 {
3250 #ifdef CONFIG_BPF_SYSCALL
3251 struct bpf_stream_stage ss;
3252 struct bpf_prog *prog;
3253
3254 prog = bpf_prog_find_from_stack();
3255 if (!prog)
3256 return;
3257 bpf_stream_stage(ss, prog, BPF_STDERR, ({
3258 bpf_stream_printk(ss, "ERROR: Timeout detected for may_goto instruction\n");
3259 bpf_stream_dump_stack(ss);
3260 }));
3261 #endif
3262 }
3263
bpf_check_timed_may_goto(struct bpf_timed_may_goto * p)3264 u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *p)
3265 {
3266 u64 time = ktime_get_mono_fast_ns();
3267
3268 /* Populate the timestamp for this stack frame, and refresh count. */
3269 if (!p->timestamp) {
3270 p->timestamp = time;
3271 return BPF_MAX_TIMED_LOOPS;
3272 }
3273 /* Check if we've exhausted our time slice, and zero count. */
3274 if (unlikely(time - p->timestamp >= (NSEC_PER_SEC / 4))) {
3275 bpf_prog_report_may_goto_violation();
3276 return 0;
3277 }
3278 /* Refresh the count for the stack frame. */
3279 return BPF_MAX_TIMED_LOOPS;
3280 }
3281
3282 /* for configs without MMU or 32-bit */
3283 __weak const struct bpf_map_ops arena_map_ops;
bpf_arena_get_user_vm_start(struct bpf_arena * arena)3284 __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
3285 {
3286 return 0;
3287 }
bpf_arena_get_kern_vm_start(struct bpf_arena * arena)3288 __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
3289 {
3290 return 0;
3291 }
3292
3293 #ifdef CONFIG_BPF_SYSCALL
bpf_global_ma_init(void)3294 static int __init bpf_global_ma_init(void)
3295 {
3296 int ret;
3297
3298 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
3299 bpf_global_ma_set = !ret;
3300 return ret;
3301 }
3302 late_initcall(bpf_global_ma_init);
3303 #endif
3304
3305 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
3306 EXPORT_SYMBOL(bpf_stats_enabled_key);
3307
3308 /* All definitions of tracepoints related to BPF. */
3309 #define CREATE_TRACE_POINTS
3310 #include <linux/bpf_trace.h>
3311
3312 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
3313 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
3314
3315 #ifdef CONFIG_BPF_SYSCALL
3316
bpf_prog_get_file_line(struct bpf_prog * prog,unsigned long ip,const char ** filep,const char ** linep,int * nump)3317 int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep,
3318 const char **linep, int *nump)
3319 {
3320 int idx = -1, insn_start, insn_end, len;
3321 struct bpf_line_info *linfo;
3322 void **jited_linfo;
3323 struct btf *btf;
3324 int nr_linfo;
3325
3326 btf = prog->aux->btf;
3327 linfo = prog->aux->linfo;
3328 jited_linfo = prog->aux->jited_linfo;
3329
3330 if (!btf || !linfo || !jited_linfo)
3331 return -EINVAL;
3332 len = prog->aux->func ? prog->aux->func[prog->aux->func_idx]->len : prog->len;
3333
3334 linfo = &prog->aux->linfo[prog->aux->linfo_idx];
3335 jited_linfo = &prog->aux->jited_linfo[prog->aux->linfo_idx];
3336
3337 insn_start = linfo[0].insn_off;
3338 insn_end = insn_start + len;
3339 nr_linfo = prog->aux->nr_linfo - prog->aux->linfo_idx;
3340
3341 for (int i = 0; i < nr_linfo &&
3342 linfo[i].insn_off >= insn_start && linfo[i].insn_off < insn_end; i++) {
3343 if (jited_linfo[i] >= (void *)ip)
3344 break;
3345 idx = i;
3346 }
3347
3348 if (idx == -1)
3349 return -ENOENT;
3350
3351 /* Get base component of the file path. */
3352 *filep = btf_name_by_offset(btf, linfo[idx].file_name_off);
3353 *filep = kbasename(*filep);
3354 /* Obtain the source line, and strip whitespace in prefix. */
3355 *linep = btf_name_by_offset(btf, linfo[idx].line_off);
3356 while (isspace(**linep))
3357 *linep += 1;
3358 *nump = BPF_LINE_INFO_LINE_NUM(linfo[idx].line_col);
3359 return 0;
3360 }
3361
3362 struct walk_stack_ctx {
3363 struct bpf_prog *prog;
3364 };
3365
find_from_stack_cb(void * cookie,u64 ip,u64 sp,u64 bp)3366 static bool find_from_stack_cb(void *cookie, u64 ip, u64 sp, u64 bp)
3367 {
3368 struct walk_stack_ctx *ctxp = cookie;
3369 struct bpf_prog *prog;
3370
3371 /*
3372 * The RCU read lock is held to safely traverse the latch tree, but we
3373 * don't need its protection when accessing the prog, since it has an
3374 * active stack frame on the current stack trace, and won't disappear.
3375 */
3376 rcu_read_lock();
3377 prog = bpf_prog_ksym_find(ip);
3378 rcu_read_unlock();
3379 if (!prog)
3380 return true;
3381 /* Make sure we return the main prog if we found a subprog */
3382 ctxp->prog = prog->aux->main_prog_aux->prog;
3383 return false;
3384 }
3385
bpf_prog_find_from_stack(void)3386 struct bpf_prog *bpf_prog_find_from_stack(void)
3387 {
3388 struct walk_stack_ctx ctx = {};
3389
3390 arch_bpf_stack_walk(find_from_stack_cb, &ctx);
3391 return ctx.prog;
3392 }
3393
3394 #endif
3395