1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <uapi/linux/btf.h> 21 #include <crypto/sha1.h> 22 #include <linux/filter.h> 23 #include <linux/skbuff.h> 24 #include <linux/vmalloc.h> 25 #include <linux/prandom.h> 26 #include <linux/bpf.h> 27 #include <linux/btf.h> 28 #include <linux/hex.h> 29 #include <linux/objtool.h> 30 #include <linux/overflow.h> 31 #include <linux/rbtree_latch.h> 32 #include <linux/kallsyms.h> 33 #include <linux/rcupdate.h> 34 #include <linux/perf_event.h> 35 #include <linux/extable.h> 36 #include <linux/log2.h> 37 #include <linux/bpf_verifier.h> 38 #include <linux/nodemask.h> 39 #include <linux/nospec.h> 40 #include <linux/bpf_mem_alloc.h> 41 #include <linux/memcontrol.h> 42 #include <linux/execmem.h> 43 #include <crypto/sha2.h> 44 45 #include <asm/barrier.h> 46 #include <linux/unaligned.h> 47 48 /* Registers */ 49 #define BPF_R0 regs[BPF_REG_0] 50 #define BPF_R1 regs[BPF_REG_1] 51 #define BPF_R2 regs[BPF_REG_2] 52 #define BPF_R3 regs[BPF_REG_3] 53 #define BPF_R4 regs[BPF_REG_4] 54 #define BPF_R5 regs[BPF_REG_5] 55 #define BPF_R6 regs[BPF_REG_6] 56 #define BPF_R7 regs[BPF_REG_7] 57 #define BPF_R8 regs[BPF_REG_8] 58 #define BPF_R9 regs[BPF_REG_9] 59 #define BPF_R10 regs[BPF_REG_10] 60 61 /* Named registers */ 62 #define DST regs[insn->dst_reg] 63 #define SRC regs[insn->src_reg] 64 #define FP regs[BPF_REG_FP] 65 #define AX regs[BPF_REG_AX] 66 #define ARG1 regs[BPF_REG_ARG1] 67 #define CTX regs[BPF_REG_CTX] 68 #define OFF insn->off 69 #define IMM insn->imm 70 71 struct bpf_mem_alloc bpf_global_ma; 72 bool bpf_global_ma_set; 73 74 /* No hurry in this branch 75 * 76 * Exported for the bpf jit load helper. 77 */ 78 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 79 { 80 u8 *ptr = NULL; 81 82 if (k >= SKF_NET_OFF) { 83 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 84 } else if (k >= SKF_LL_OFF) { 85 if (unlikely(!skb_mac_header_was_set(skb))) 86 return NULL; 87 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 88 } 89 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 90 return ptr; 91 92 return NULL; 93 } 94 95 /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */ 96 enum page_size_enum { 97 __PAGE_SIZE = PAGE_SIZE 98 }; 99 100 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags) 101 { 102 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 103 struct bpf_prog_aux *aux; 104 struct bpf_prog *fp; 105 106 size = round_up(size, __PAGE_SIZE); 107 fp = __vmalloc(size, gfp_flags); 108 if (fp == NULL) 109 return NULL; 110 111 aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); 112 if (aux == NULL) { 113 vfree(fp); 114 return NULL; 115 } 116 fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); 117 if (!fp->active) { 118 vfree(fp); 119 kfree(aux); 120 return NULL; 121 } 122 123 fp->pages = size / PAGE_SIZE; 124 fp->aux = aux; 125 fp->aux->main_prog_aux = aux; 126 fp->aux->prog = fp; 127 fp->jit_requested = ebpf_jit_enabled(); 128 fp->blinding_requested = bpf_jit_blinding_enabled(fp); 129 #ifdef CONFIG_CGROUP_BPF 130 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID; 131 #endif 132 133 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); 134 #ifdef CONFIG_FINEIBT 135 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode); 136 #endif 137 mutex_init(&fp->aux->used_maps_mutex); 138 mutex_init(&fp->aux->ext_mutex); 139 mutex_init(&fp->aux->dst_mutex); 140 141 #ifdef CONFIG_BPF_SYSCALL 142 bpf_prog_stream_init(fp); 143 #endif 144 145 return fp; 146 } 147 148 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 149 { 150 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 151 struct bpf_prog *prog; 152 int cpu; 153 154 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags); 155 if (!prog) 156 return NULL; 157 158 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); 159 if (!prog->stats) { 160 free_percpu(prog->active); 161 kfree(prog->aux); 162 vfree(prog); 163 return NULL; 164 } 165 166 for_each_possible_cpu(cpu) { 167 struct bpf_prog_stats *pstats; 168 169 pstats = per_cpu_ptr(prog->stats, cpu); 170 u64_stats_init(&pstats->syncp); 171 } 172 return prog; 173 } 174 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 175 176 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) 177 { 178 if (!prog->aux->nr_linfo || !prog->jit_requested) 179 return 0; 180 181 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo, 182 sizeof(*prog->aux->jited_linfo), 183 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN)); 184 if (!prog->aux->jited_linfo) 185 return -ENOMEM; 186 187 return 0; 188 } 189 190 void bpf_prog_jit_attempt_done(struct bpf_prog *prog) 191 { 192 if (prog->aux->jited_linfo && 193 (!prog->jited || !prog->aux->jited_linfo[0])) { 194 kvfree(prog->aux->jited_linfo); 195 prog->aux->jited_linfo = NULL; 196 } 197 198 kfree(prog->aux->kfunc_tab); 199 prog->aux->kfunc_tab = NULL; 200 } 201 202 /* The jit engine is responsible to provide an array 203 * for insn_off to the jited_off mapping (insn_to_jit_off). 204 * 205 * The idx to this array is the insn_off. Hence, the insn_off 206 * here is relative to the prog itself instead of the main prog. 207 * This array has one entry for each xlated bpf insn. 208 * 209 * jited_off is the byte off to the end of the jited insn. 210 * 211 * Hence, with 212 * insn_start: 213 * The first bpf insn off of the prog. The insn off 214 * here is relative to the main prog. 215 * e.g. if prog is a subprog, insn_start > 0 216 * linfo_idx: 217 * The prog's idx to prog->aux->linfo and jited_linfo 218 * 219 * jited_linfo[linfo_idx] = prog->bpf_func 220 * 221 * For i > linfo_idx, 222 * 223 * jited_linfo[i] = prog->bpf_func + 224 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1] 225 */ 226 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, 227 const u32 *insn_to_jit_off) 228 { 229 u32 linfo_idx, insn_start, insn_end, nr_linfo, i; 230 const struct bpf_line_info *linfo; 231 void **jited_linfo; 232 233 if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt) 234 /* Userspace did not provide linfo */ 235 return; 236 237 linfo_idx = prog->aux->linfo_idx; 238 linfo = &prog->aux->linfo[linfo_idx]; 239 insn_start = linfo[0].insn_off; 240 insn_end = insn_start + prog->len; 241 242 jited_linfo = &prog->aux->jited_linfo[linfo_idx]; 243 jited_linfo[0] = prog->bpf_func; 244 245 nr_linfo = prog->aux->nr_linfo - linfo_idx; 246 247 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++) 248 /* The verifier ensures that linfo[i].insn_off is 249 * strictly increasing 250 */ 251 jited_linfo[i] = prog->bpf_func + 252 insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; 253 } 254 255 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 256 gfp_t gfp_extra_flags) 257 { 258 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 259 struct bpf_prog *fp; 260 u32 pages; 261 262 size = round_up(size, PAGE_SIZE); 263 pages = size / PAGE_SIZE; 264 if (pages <= fp_old->pages) 265 return fp_old; 266 267 fp = __vmalloc(size, gfp_flags); 268 if (fp) { 269 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 270 fp->pages = pages; 271 fp->aux->prog = fp; 272 273 /* We keep fp->aux from fp_old around in the new 274 * reallocated structure. 275 */ 276 fp_old->aux = NULL; 277 fp_old->stats = NULL; 278 fp_old->active = NULL; 279 __bpf_prog_free(fp_old); 280 } 281 282 return fp; 283 } 284 285 void __bpf_prog_free(struct bpf_prog *fp) 286 { 287 if (fp->aux) { 288 mutex_destroy(&fp->aux->used_maps_mutex); 289 mutex_destroy(&fp->aux->dst_mutex); 290 kfree(fp->aux->poke_tab); 291 kfree(fp->aux); 292 } 293 free_percpu(fp->stats); 294 free_percpu(fp->active); 295 vfree(fp); 296 } 297 298 int bpf_prog_calc_tag(struct bpf_prog *fp) 299 { 300 size_t size = bpf_prog_insn_size(fp); 301 struct bpf_insn *dst; 302 bool was_ld_map; 303 u32 i; 304 305 dst = vmalloc(size); 306 if (!dst) 307 return -ENOMEM; 308 309 /* We need to take out the map fd for the digest calculation 310 * since they are unstable from user space side. 311 */ 312 for (i = 0, was_ld_map = false; i < fp->len; i++) { 313 dst[i] = fp->insnsi[i]; 314 if (!was_ld_map && 315 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 316 (dst[i].src_reg == BPF_PSEUDO_MAP_FD || 317 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { 318 was_ld_map = true; 319 dst[i].imm = 0; 320 } else if (was_ld_map && 321 dst[i].code == 0 && 322 dst[i].dst_reg == 0 && 323 dst[i].src_reg == 0 && 324 dst[i].off == 0) { 325 was_ld_map = false; 326 dst[i].imm = 0; 327 } else { 328 was_ld_map = false; 329 } 330 } 331 sha256((u8 *)dst, size, fp->digest); 332 vfree(dst); 333 return 0; 334 } 335 336 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, 337 s32 end_new, s32 curr, const bool probe_pass) 338 { 339 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 340 s32 delta = end_new - end_old; 341 s64 imm = insn->imm; 342 343 if (curr < pos && curr + imm + 1 >= end_old) 344 imm += delta; 345 else if (curr >= end_new && curr + imm + 1 < end_new) 346 imm -= delta; 347 if (imm < imm_min || imm > imm_max) 348 return -ERANGE; 349 if (!probe_pass) 350 insn->imm = imm; 351 return 0; 352 } 353 354 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, 355 s32 end_new, s32 curr, const bool probe_pass) 356 { 357 s64 off_min, off_max, off; 358 s32 delta = end_new - end_old; 359 360 if (insn->code == (BPF_JMP32 | BPF_JA)) { 361 off = insn->imm; 362 off_min = S32_MIN; 363 off_max = S32_MAX; 364 } else { 365 off = insn->off; 366 off_min = S16_MIN; 367 off_max = S16_MAX; 368 } 369 370 if (curr < pos && curr + off + 1 >= end_old) 371 off += delta; 372 else if (curr >= end_new && curr + off + 1 < end_new) 373 off -= delta; 374 if (off < off_min || off > off_max) 375 return -ERANGE; 376 if (!probe_pass) { 377 if (insn->code == (BPF_JMP32 | BPF_JA)) 378 insn->imm = off; 379 else 380 insn->off = off; 381 } 382 return 0; 383 } 384 385 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, 386 s32 end_new, const bool probe_pass) 387 { 388 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); 389 struct bpf_insn *insn = prog->insnsi; 390 int ret = 0; 391 392 for (i = 0; i < insn_cnt; i++, insn++) { 393 u8 code; 394 395 /* In the probing pass we still operate on the original, 396 * unpatched image in order to check overflows before we 397 * do any other adjustments. Therefore skip the patchlet. 398 */ 399 if (probe_pass && i == pos) { 400 i = end_new; 401 insn = prog->insnsi + end_old; 402 } 403 if (bpf_pseudo_func(insn)) { 404 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 405 end_new, i, probe_pass); 406 if (ret) 407 return ret; 408 continue; 409 } 410 code = insn->code; 411 if ((BPF_CLASS(code) != BPF_JMP && 412 BPF_CLASS(code) != BPF_JMP32) || 413 BPF_OP(code) == BPF_EXIT) 414 continue; 415 /* Adjust offset of jmps if we cross patch boundaries. */ 416 if (BPF_OP(code) == BPF_CALL) { 417 if (insn->src_reg != BPF_PSEUDO_CALL) 418 continue; 419 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 420 end_new, i, probe_pass); 421 } else { 422 ret = bpf_adj_delta_to_off(insn, pos, end_old, 423 end_new, i, probe_pass); 424 } 425 if (ret) 426 break; 427 } 428 429 return ret; 430 } 431 432 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta) 433 { 434 struct bpf_line_info *linfo; 435 u32 i, nr_linfo; 436 437 nr_linfo = prog->aux->nr_linfo; 438 if (!nr_linfo || !delta) 439 return; 440 441 linfo = prog->aux->linfo; 442 443 for (i = 0; i < nr_linfo; i++) 444 if (off < linfo[i].insn_off) 445 break; 446 447 /* Push all off < linfo[i].insn_off by delta */ 448 for (; i < nr_linfo; i++) 449 linfo[i].insn_off += delta; 450 } 451 452 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 453 const struct bpf_insn *patch, u32 len) 454 { 455 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 456 const u32 cnt_max = S16_MAX; 457 struct bpf_prog *prog_adj; 458 int err; 459 460 /* Since our patchlet doesn't expand the image, we're done. */ 461 if (insn_delta == 0) { 462 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 463 return prog; 464 } 465 466 insn_adj_cnt = prog->len + insn_delta; 467 468 /* Reject anything that would potentially let the insn->off 469 * target overflow when we have excessive program expansions. 470 * We need to probe here before we do any reallocation where 471 * we afterwards may not fail anymore. 472 */ 473 if (insn_adj_cnt > cnt_max && 474 (err = bpf_adj_branches(prog, off, off + 1, off + len, true))) 475 return ERR_PTR(err); 476 477 /* Several new instructions need to be inserted. Make room 478 * for them. Likely, there's no need for a new allocation as 479 * last page could have large enough tailroom. 480 */ 481 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 482 GFP_USER); 483 if (!prog_adj) 484 return ERR_PTR(-ENOMEM); 485 486 prog_adj->len = insn_adj_cnt; 487 488 /* Patching happens in 3 steps: 489 * 490 * 1) Move over tail of insnsi from next instruction onwards, 491 * so we can patch the single target insn with one or more 492 * new ones (patching is always from 1 to n insns, n > 0). 493 * 2) Inject new instructions at the target location. 494 * 3) Adjust branch offsets if necessary. 495 */ 496 insn_rest = insn_adj_cnt - off - len; 497 498 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 499 sizeof(*patch) * insn_rest); 500 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 501 502 /* We are guaranteed to not fail at this point, otherwise 503 * the ship has sailed to reverse to the original state. An 504 * overflow cannot happen at this point. 505 */ 506 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false)); 507 508 bpf_adj_linfo(prog_adj, off, insn_delta); 509 510 return prog_adj; 511 } 512 513 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) 514 { 515 int err; 516 517 /* Branch offsets can't overflow when program is shrinking, no need 518 * to call bpf_adj_branches(..., true) here 519 */ 520 memmove(prog->insnsi + off, prog->insnsi + off + cnt, 521 sizeof(struct bpf_insn) * (prog->len - off - cnt)); 522 prog->len -= cnt; 523 524 err = bpf_adj_branches(prog, off, off + cnt, off, false); 525 WARN_ON_ONCE(err); 526 return err; 527 } 528 529 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 530 { 531 int i; 532 533 for (i = 0; i < fp->aux->real_func_cnt; i++) 534 bpf_prog_kallsyms_del(fp->aux->func[i]); 535 } 536 537 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 538 { 539 bpf_prog_kallsyms_del_subprogs(fp); 540 bpf_prog_kallsyms_del(fp); 541 } 542 543 #ifdef CONFIG_BPF_JIT 544 /* All BPF JIT sysctl knobs here. */ 545 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 546 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 547 int bpf_jit_harden __read_mostly; 548 long bpf_jit_limit __read_mostly; 549 long bpf_jit_limit_max __read_mostly; 550 551 static void 552 bpf_prog_ksym_set_addr(struct bpf_prog *prog) 553 { 554 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 555 556 prog->aux->ksym.start = (unsigned long) prog->bpf_func; 557 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len; 558 } 559 560 static void 561 bpf_prog_ksym_set_name(struct bpf_prog *prog) 562 { 563 char *sym = prog->aux->ksym.name; 564 const char *end = sym + KSYM_NAME_LEN; 565 const struct btf_type *type; 566 const char *func_name; 567 568 BUILD_BUG_ON(sizeof("bpf_prog_") + 569 sizeof(prog->tag) * 2 + 570 /* name has been null terminated. 571 * We should need +1 for the '_' preceding 572 * the name. However, the null character 573 * is double counted between the name and the 574 * sizeof("bpf_prog_") above, so we omit 575 * the +1 here. 576 */ 577 sizeof(prog->aux->name) > KSYM_NAME_LEN); 578 579 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 580 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 581 582 /* prog->aux->name will be ignored if full btf name is available */ 583 if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) { 584 type = btf_type_by_id(prog->aux->btf, 585 prog->aux->func_info[prog->aux->func_idx].type_id); 586 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); 587 snprintf(sym, (size_t)(end - sym), "_%s", func_name); 588 return; 589 } 590 591 if (prog->aux->name[0]) 592 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 593 else 594 *sym = 0; 595 } 596 597 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n) 598 { 599 return container_of(n, struct bpf_ksym, tnode)->start; 600 } 601 602 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 603 struct latch_tree_node *b) 604 { 605 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b); 606 } 607 608 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 609 { 610 unsigned long val = (unsigned long)key; 611 const struct bpf_ksym *ksym; 612 613 ksym = container_of(n, struct bpf_ksym, tnode); 614 615 if (val < ksym->start) 616 return -1; 617 /* Ensure that we detect return addresses as part of the program, when 618 * the final instruction is a call for a program part of the stack 619 * trace. Therefore, do val > ksym->end instead of val >= ksym->end. 620 */ 621 if (val > ksym->end) 622 return 1; 623 624 return 0; 625 } 626 627 static const struct latch_tree_ops bpf_tree_ops = { 628 .less = bpf_tree_less, 629 .comp = bpf_tree_comp, 630 }; 631 632 static DEFINE_SPINLOCK(bpf_lock); 633 static LIST_HEAD(bpf_kallsyms); 634 static struct latch_tree_root bpf_tree __cacheline_aligned; 635 636 void bpf_ksym_add(struct bpf_ksym *ksym) 637 { 638 spin_lock_bh(&bpf_lock); 639 WARN_ON_ONCE(!list_empty(&ksym->lnode)); 640 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms); 641 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 642 spin_unlock_bh(&bpf_lock); 643 } 644 645 static void __bpf_ksym_del(struct bpf_ksym *ksym) 646 { 647 if (list_empty(&ksym->lnode)) 648 return; 649 650 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 651 list_del_rcu(&ksym->lnode); 652 } 653 654 void bpf_ksym_del(struct bpf_ksym *ksym) 655 { 656 spin_lock_bh(&bpf_lock); 657 __bpf_ksym_del(ksym); 658 spin_unlock_bh(&bpf_lock); 659 } 660 661 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 662 { 663 return fp->jited && !bpf_prog_was_classic(fp); 664 } 665 666 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 667 { 668 if (!bpf_prog_kallsyms_candidate(fp) || 669 !bpf_token_capable(fp->aux->token, CAP_BPF)) 670 return; 671 672 bpf_prog_ksym_set_addr(fp); 673 bpf_prog_ksym_set_name(fp); 674 fp->aux->ksym.prog = true; 675 676 bpf_ksym_add(&fp->aux->ksym); 677 678 #ifdef CONFIG_FINEIBT 679 /* 680 * When FineIBT, code in the __cfi_foo() symbols can get executed 681 * and hence unwinder needs help. 682 */ 683 if (cfi_mode != CFI_FINEIBT) 684 return; 685 686 snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN, 687 "__cfi_%s", fp->aux->ksym.name); 688 689 fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16; 690 fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func; 691 692 bpf_ksym_add(&fp->aux->ksym_prefix); 693 #endif 694 } 695 696 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 697 { 698 if (!bpf_prog_kallsyms_candidate(fp)) 699 return; 700 701 bpf_ksym_del(&fp->aux->ksym); 702 #ifdef CONFIG_FINEIBT 703 if (cfi_mode != CFI_FINEIBT) 704 return; 705 bpf_ksym_del(&fp->aux->ksym_prefix); 706 #endif 707 } 708 709 static struct bpf_ksym *bpf_ksym_find(unsigned long addr) 710 { 711 struct latch_tree_node *n; 712 713 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 714 return n ? container_of(n, struct bpf_ksym, tnode) : NULL; 715 } 716 717 int __bpf_address_lookup(unsigned long addr, unsigned long *size, 718 unsigned long *off, char *sym) 719 { 720 struct bpf_ksym *ksym; 721 int ret = 0; 722 723 rcu_read_lock(); 724 ksym = bpf_ksym_find(addr); 725 if (ksym) { 726 unsigned long symbol_start = ksym->start; 727 unsigned long symbol_end = ksym->end; 728 729 ret = strscpy(sym, ksym->name, KSYM_NAME_LEN); 730 731 if (size) 732 *size = symbol_end - symbol_start; 733 if (off) 734 *off = addr - symbol_start; 735 } 736 rcu_read_unlock(); 737 738 return ret; 739 } 740 741 bool is_bpf_text_address(unsigned long addr) 742 { 743 bool ret; 744 745 rcu_read_lock(); 746 ret = bpf_ksym_find(addr) != NULL; 747 rcu_read_unlock(); 748 749 return ret; 750 } 751 752 struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) 753 { 754 struct bpf_ksym *ksym; 755 756 WARN_ON_ONCE(!rcu_read_lock_held()); 757 ksym = bpf_ksym_find(addr); 758 759 return ksym && ksym->prog ? 760 container_of(ksym, struct bpf_prog_aux, ksym)->prog : 761 NULL; 762 } 763 764 bool bpf_has_frame_pointer(unsigned long ip) 765 { 766 struct bpf_ksym *ksym; 767 unsigned long offset; 768 769 guard(rcu)(); 770 771 ksym = bpf_ksym_find(ip); 772 if (!ksym || !ksym->fp_start || !ksym->fp_end) 773 return false; 774 775 offset = ip - ksym->start; 776 777 return offset >= ksym->fp_start && offset < ksym->fp_end; 778 } 779 780 const struct exception_table_entry *search_bpf_extables(unsigned long addr) 781 { 782 const struct exception_table_entry *e = NULL; 783 struct bpf_prog *prog; 784 785 rcu_read_lock(); 786 prog = bpf_prog_ksym_find(addr); 787 if (!prog) 788 goto out; 789 if (!prog->aux->num_exentries) 790 goto out; 791 792 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); 793 out: 794 rcu_read_unlock(); 795 return e; 796 } 797 798 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 799 char *sym) 800 { 801 struct bpf_ksym *ksym; 802 unsigned int it = 0; 803 int ret = -ERANGE; 804 805 if (!bpf_jit_kallsyms_enabled()) 806 return ret; 807 808 rcu_read_lock(); 809 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) { 810 if (it++ != symnum) 811 continue; 812 813 strscpy(sym, ksym->name, KSYM_NAME_LEN); 814 815 *value = ksym->start; 816 *type = BPF_SYM_ELF_TYPE; 817 818 ret = 0; 819 break; 820 } 821 rcu_read_unlock(); 822 823 return ret; 824 } 825 826 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, 827 struct bpf_jit_poke_descriptor *poke) 828 { 829 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 830 static const u32 poke_tab_max = 1024; 831 u32 slot = prog->aux->size_poke_tab; 832 u32 size = slot + 1; 833 834 if (size > poke_tab_max) 835 return -ENOSPC; 836 if (poke->tailcall_target || poke->tailcall_target_stable || 837 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr) 838 return -EINVAL; 839 840 switch (poke->reason) { 841 case BPF_POKE_REASON_TAIL_CALL: 842 if (!poke->tail_call.map) 843 return -EINVAL; 844 break; 845 default: 846 return -EINVAL; 847 } 848 849 tab = krealloc_array(tab, size, sizeof(*poke), GFP_KERNEL); 850 if (!tab) 851 return -ENOMEM; 852 853 memcpy(&tab[slot], poke, sizeof(*poke)); 854 prog->aux->size_poke_tab = size; 855 prog->aux->poke_tab = tab; 856 857 return slot; 858 } 859 860 /* 861 * BPF program pack allocator. 862 * 863 * Most BPF programs are pretty small. Allocating a hole page for each 864 * program is sometime a waste. Many small bpf program also adds pressure 865 * to instruction TLB. To solve this issue, we introduce a BPF program pack 866 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86) 867 * to host BPF programs. 868 */ 869 #define BPF_PROG_CHUNK_SHIFT 6 870 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT) 871 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1)) 872 873 struct bpf_prog_pack { 874 struct list_head list; 875 void *ptr; 876 unsigned long bitmap[]; 877 }; 878 879 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size) 880 { 881 memset(area, 0, size); 882 } 883 884 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) 885 886 static DEFINE_MUTEX(pack_mutex); 887 static LIST_HEAD(pack_list); 888 889 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with 890 * CONFIG_MMU=n. Use PAGE_SIZE in these cases. 891 */ 892 #ifdef PMD_SIZE 893 /* PMD_SIZE is really big for some archs. It doesn't make sense to 894 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to 895 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be 896 * greater than or equal to 2MB. 897 */ 898 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes()) 899 #else 900 #define BPF_PROG_PACK_SIZE PAGE_SIZE 901 #endif 902 903 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE) 904 905 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns) 906 { 907 struct bpf_prog_pack *pack; 908 int err; 909 910 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)), 911 GFP_KERNEL); 912 if (!pack) 913 return NULL; 914 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE); 915 if (!pack->ptr) 916 goto out; 917 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); 918 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); 919 920 set_vm_flush_reset_perms(pack->ptr); 921 err = set_memory_rox((unsigned long)pack->ptr, 922 BPF_PROG_PACK_SIZE / PAGE_SIZE); 923 if (err) 924 goto out; 925 list_add_tail(&pack->list, &pack_list); 926 return pack; 927 928 out: 929 bpf_jit_free_exec(pack->ptr); 930 kfree(pack); 931 return NULL; 932 } 933 934 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) 935 { 936 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size); 937 struct bpf_prog_pack *pack; 938 unsigned long pos; 939 void *ptr = NULL; 940 941 mutex_lock(&pack_mutex); 942 if (size > BPF_PROG_PACK_SIZE) { 943 size = round_up(size, PAGE_SIZE); 944 ptr = bpf_jit_alloc_exec(size); 945 if (ptr) { 946 int err; 947 948 bpf_fill_ill_insns(ptr, size); 949 set_vm_flush_reset_perms(ptr); 950 err = set_memory_rox((unsigned long)ptr, 951 size / PAGE_SIZE); 952 if (err) { 953 bpf_jit_free_exec(ptr); 954 ptr = NULL; 955 } 956 } 957 goto out; 958 } 959 list_for_each_entry(pack, &pack_list, list) { 960 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, 961 nbits, 0); 962 if (pos < BPF_PROG_CHUNK_COUNT) 963 goto found_free_area; 964 } 965 966 pack = alloc_new_pack(bpf_fill_ill_insns); 967 if (!pack) 968 goto out; 969 970 pos = 0; 971 972 found_free_area: 973 bitmap_set(pack->bitmap, pos, nbits); 974 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT); 975 976 out: 977 mutex_unlock(&pack_mutex); 978 return ptr; 979 } 980 981 void bpf_prog_pack_free(void *ptr, u32 size) 982 { 983 struct bpf_prog_pack *pack = NULL, *tmp; 984 unsigned int nbits; 985 unsigned long pos; 986 987 mutex_lock(&pack_mutex); 988 if (size > BPF_PROG_PACK_SIZE) { 989 bpf_jit_free_exec(ptr); 990 goto out; 991 } 992 993 list_for_each_entry(tmp, &pack_list, list) { 994 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) { 995 pack = tmp; 996 break; 997 } 998 } 999 1000 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n")) 1001 goto out; 1002 1003 nbits = BPF_PROG_SIZE_TO_NBITS(size); 1004 pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT; 1005 1006 WARN_ONCE(bpf_arch_text_invalidate(ptr, size), 1007 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n"); 1008 1009 bitmap_clear(pack->bitmap, pos, nbits); 1010 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, 1011 BPF_PROG_CHUNK_COUNT, 0) == 0) { 1012 list_del(&pack->list); 1013 bpf_jit_free_exec(pack->ptr); 1014 kfree(pack); 1015 } 1016 out: 1017 mutex_unlock(&pack_mutex); 1018 } 1019 1020 static atomic_long_t bpf_jit_current; 1021 1022 /* Can be overridden by an arch's JIT compiler if it has a custom, 1023 * dedicated BPF backend memory area, or if neither of the two 1024 * below apply. 1025 */ 1026 u64 __weak bpf_jit_alloc_exec_limit(void) 1027 { 1028 #if defined(MODULES_VADDR) 1029 return MODULES_END - MODULES_VADDR; 1030 #else 1031 return VMALLOC_END - VMALLOC_START; 1032 #endif 1033 } 1034 1035 static int __init bpf_jit_charge_init(void) 1036 { 1037 /* Only used as heuristic here to derive limit. */ 1038 bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); 1039 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1, 1040 PAGE_SIZE), LONG_MAX); 1041 return 0; 1042 } 1043 pure_initcall(bpf_jit_charge_init); 1044 1045 int bpf_jit_charge_modmem(u32 size) 1046 { 1047 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) { 1048 if (!bpf_capable()) { 1049 atomic_long_sub(size, &bpf_jit_current); 1050 return -EPERM; 1051 } 1052 } 1053 1054 return 0; 1055 } 1056 1057 void bpf_jit_uncharge_modmem(u32 size) 1058 { 1059 atomic_long_sub(size, &bpf_jit_current); 1060 } 1061 1062 void *__weak bpf_jit_alloc_exec(unsigned long size) 1063 { 1064 return execmem_alloc(EXECMEM_BPF, size); 1065 } 1066 1067 void __weak bpf_jit_free_exec(void *addr) 1068 { 1069 execmem_free(addr); 1070 } 1071 1072 struct bpf_binary_header * 1073 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 1074 unsigned int alignment, 1075 bpf_jit_fill_hole_t bpf_fill_ill_insns) 1076 { 1077 struct bpf_binary_header *hdr; 1078 u32 size, hole, start; 1079 1080 WARN_ON_ONCE(!is_power_of_2(alignment) || 1081 alignment > BPF_IMAGE_ALIGNMENT); 1082 1083 /* Most of BPF filters are really small, but if some of them 1084 * fill a page, allow at least 128 extra bytes to insert a 1085 * random section of illegal instructions. 1086 */ 1087 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 1088 1089 if (bpf_jit_charge_modmem(size)) 1090 return NULL; 1091 hdr = bpf_jit_alloc_exec(size); 1092 if (!hdr) { 1093 bpf_jit_uncharge_modmem(size); 1094 return NULL; 1095 } 1096 1097 /* Fill space with illegal/arch-dep instructions. */ 1098 bpf_fill_ill_insns(hdr, size); 1099 1100 hdr->size = size; 1101 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 1102 PAGE_SIZE - sizeof(*hdr)); 1103 start = get_random_u32_below(hole) & ~(alignment - 1); 1104 1105 /* Leave a random number of instructions before BPF code. */ 1106 *image_ptr = &hdr->image[start]; 1107 1108 return hdr; 1109 } 1110 1111 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 1112 { 1113 u32 size = hdr->size; 1114 1115 bpf_jit_free_exec(hdr); 1116 bpf_jit_uncharge_modmem(size); 1117 } 1118 1119 /* Allocate jit binary from bpf_prog_pack allocator. 1120 * Since the allocated memory is RO+X, the JIT engine cannot write directly 1121 * to the memory. To solve this problem, a RW buffer is also allocated at 1122 * as the same time. The JIT engine should calculate offsets based on the 1123 * RO memory address, but write JITed program to the RW buffer. Once the 1124 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies 1125 * the JITed program to the RO memory. 1126 */ 1127 struct bpf_binary_header * 1128 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr, 1129 unsigned int alignment, 1130 struct bpf_binary_header **rw_header, 1131 u8 **rw_image, 1132 bpf_jit_fill_hole_t bpf_fill_ill_insns) 1133 { 1134 struct bpf_binary_header *ro_header; 1135 u32 size, hole, start; 1136 1137 WARN_ON_ONCE(!is_power_of_2(alignment) || 1138 alignment > BPF_IMAGE_ALIGNMENT); 1139 1140 /* add 16 bytes for a random section of illegal instructions */ 1141 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE); 1142 1143 if (bpf_jit_charge_modmem(size)) 1144 return NULL; 1145 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns); 1146 if (!ro_header) { 1147 bpf_jit_uncharge_modmem(size); 1148 return NULL; 1149 } 1150 1151 *rw_header = kvmalloc(size, GFP_KERNEL); 1152 if (!*rw_header) { 1153 bpf_prog_pack_free(ro_header, size); 1154 bpf_jit_uncharge_modmem(size); 1155 return NULL; 1156 } 1157 1158 /* Fill space with illegal/arch-dep instructions. */ 1159 bpf_fill_ill_insns(*rw_header, size); 1160 (*rw_header)->size = size; 1161 1162 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)), 1163 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header)); 1164 start = get_random_u32_below(hole) & ~(alignment - 1); 1165 1166 *image_ptr = &ro_header->image[start]; 1167 *rw_image = &(*rw_header)->image[start]; 1168 1169 return ro_header; 1170 } 1171 1172 /* Copy JITed text from rw_header to its final location, the ro_header. */ 1173 int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header, 1174 struct bpf_binary_header *rw_header) 1175 { 1176 void *ptr; 1177 1178 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size); 1179 1180 kvfree(rw_header); 1181 1182 if (IS_ERR(ptr)) { 1183 bpf_prog_pack_free(ro_header, ro_header->size); 1184 return PTR_ERR(ptr); 1185 } 1186 return 0; 1187 } 1188 1189 /* bpf_jit_binary_pack_free is called in two different scenarios: 1190 * 1) when the program is freed after; 1191 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize). 1192 * For case 2), we need to free both the RO memory and the RW buffer. 1193 * 1194 * bpf_jit_binary_pack_free requires proper ro_header->size. However, 1195 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size 1196 * must be set with either bpf_jit_binary_pack_finalize (normal path) or 1197 * bpf_arch_text_copy (when jit fails). 1198 */ 1199 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, 1200 struct bpf_binary_header *rw_header) 1201 { 1202 u32 size = ro_header->size; 1203 1204 bpf_prog_pack_free(ro_header, size); 1205 kvfree(rw_header); 1206 bpf_jit_uncharge_modmem(size); 1207 } 1208 1209 struct bpf_binary_header * 1210 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp) 1211 { 1212 unsigned long real_start = (unsigned long)fp->bpf_func; 1213 unsigned long addr; 1214 1215 addr = real_start & BPF_PROG_CHUNK_MASK; 1216 return (void *)addr; 1217 } 1218 1219 static inline struct bpf_binary_header * 1220 bpf_jit_binary_hdr(const struct bpf_prog *fp) 1221 { 1222 unsigned long real_start = (unsigned long)fp->bpf_func; 1223 unsigned long addr; 1224 1225 addr = real_start & PAGE_MASK; 1226 return (void *)addr; 1227 } 1228 1229 /* This symbol is only overridden by archs that have different 1230 * requirements than the usual eBPF JITs, f.e. when they only 1231 * implement cBPF JIT, do not set images read-only, etc. 1232 */ 1233 void __weak bpf_jit_free(struct bpf_prog *fp) 1234 { 1235 if (fp->jited) { 1236 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 1237 1238 bpf_jit_binary_free(hdr); 1239 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 1240 } 1241 1242 bpf_prog_unlock_free(fp); 1243 } 1244 1245 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 1246 const struct bpf_insn *insn, bool extra_pass, 1247 u64 *func_addr, bool *func_addr_fixed) 1248 { 1249 s16 off = insn->off; 1250 s32 imm = insn->imm; 1251 u8 *addr; 1252 int err; 1253 1254 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 1255 if (!*func_addr_fixed) { 1256 /* Place-holder address till the last pass has collected 1257 * all addresses for JITed subprograms in which case we 1258 * can pick them up from prog->aux. 1259 */ 1260 if (!extra_pass) 1261 addr = NULL; 1262 else if (prog->aux->func && 1263 off >= 0 && off < prog->aux->real_func_cnt) 1264 addr = (u8 *)prog->aux->func[off]->bpf_func; 1265 else 1266 return -EINVAL; 1267 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && 1268 bpf_jit_supports_far_kfunc_call()) { 1269 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr); 1270 if (err) 1271 return err; 1272 } else { 1273 /* Address of a BPF helper call. Since part of the core 1274 * kernel, it's always at a fixed location. __bpf_call_base 1275 * and the helper with imm relative to it are both in core 1276 * kernel. 1277 */ 1278 addr = (u8 *)__bpf_call_base + imm; 1279 } 1280 1281 *func_addr = (unsigned long)addr; 1282 return 0; 1283 } 1284 1285 const char *bpf_jit_get_prog_name(struct bpf_prog *prog) 1286 { 1287 if (prog->aux->ksym.prog) 1288 return prog->aux->ksym.name; 1289 return prog->aux->name; 1290 } 1291 1292 static int bpf_jit_blind_insn(const struct bpf_insn *from, 1293 const struct bpf_insn *aux, 1294 struct bpf_insn *to_buff, 1295 bool emit_zext) 1296 { 1297 struct bpf_insn *to = to_buff; 1298 u32 imm_rnd = get_random_u32(); 1299 s16 off; 1300 1301 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 1302 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 1303 1304 /* Constraints on AX register: 1305 * 1306 * AX register is inaccessible from user space. It is mapped in 1307 * all JITs, and used here for constant blinding rewrites. It is 1308 * typically "stateless" meaning its contents are only valid within 1309 * the executed instruction, but not across several instructions. 1310 * There are a few exceptions however which are further detailed 1311 * below. 1312 * 1313 * Constant blinding is only used by JITs, not in the interpreter. 1314 * The interpreter uses AX in some occasions as a local temporary 1315 * register e.g. in DIV or MOD instructions. 1316 * 1317 * In restricted circumstances, the verifier can also use the AX 1318 * register for rewrites as long as they do not interfere with 1319 * the above cases! 1320 */ 1321 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) 1322 goto out; 1323 1324 if (from->imm == 0 && 1325 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 1326 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 1327 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 1328 goto out; 1329 } 1330 1331 switch (from->code) { 1332 case BPF_ALU | BPF_ADD | BPF_K: 1333 case BPF_ALU | BPF_SUB | BPF_K: 1334 case BPF_ALU | BPF_AND | BPF_K: 1335 case BPF_ALU | BPF_OR | BPF_K: 1336 case BPF_ALU | BPF_XOR | BPF_K: 1337 case BPF_ALU | BPF_MUL | BPF_K: 1338 case BPF_ALU | BPF_MOV | BPF_K: 1339 case BPF_ALU | BPF_DIV | BPF_K: 1340 case BPF_ALU | BPF_MOD | BPF_K: 1341 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1342 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1343 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off); 1344 break; 1345 1346 case BPF_ALU64 | BPF_ADD | BPF_K: 1347 case BPF_ALU64 | BPF_SUB | BPF_K: 1348 case BPF_ALU64 | BPF_AND | BPF_K: 1349 case BPF_ALU64 | BPF_OR | BPF_K: 1350 case BPF_ALU64 | BPF_XOR | BPF_K: 1351 case BPF_ALU64 | BPF_MUL | BPF_K: 1352 case BPF_ALU64 | BPF_MOV | BPF_K: 1353 case BPF_ALU64 | BPF_DIV | BPF_K: 1354 case BPF_ALU64 | BPF_MOD | BPF_K: 1355 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1356 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1357 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off); 1358 break; 1359 1360 case BPF_JMP | BPF_JEQ | BPF_K: 1361 case BPF_JMP | BPF_JNE | BPF_K: 1362 case BPF_JMP | BPF_JGT | BPF_K: 1363 case BPF_JMP | BPF_JLT | BPF_K: 1364 case BPF_JMP | BPF_JGE | BPF_K: 1365 case BPF_JMP | BPF_JLE | BPF_K: 1366 case BPF_JMP | BPF_JSGT | BPF_K: 1367 case BPF_JMP | BPF_JSLT | BPF_K: 1368 case BPF_JMP | BPF_JSGE | BPF_K: 1369 case BPF_JMP | BPF_JSLE | BPF_K: 1370 case BPF_JMP | BPF_JSET | BPF_K: 1371 /* Accommodate for extra offset in case of a backjump. */ 1372 off = from->off; 1373 if (off < 0) 1374 off -= 2; 1375 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1376 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1377 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 1378 break; 1379 1380 case BPF_JMP32 | BPF_JEQ | BPF_K: 1381 case BPF_JMP32 | BPF_JNE | BPF_K: 1382 case BPF_JMP32 | BPF_JGT | BPF_K: 1383 case BPF_JMP32 | BPF_JLT | BPF_K: 1384 case BPF_JMP32 | BPF_JGE | BPF_K: 1385 case BPF_JMP32 | BPF_JLE | BPF_K: 1386 case BPF_JMP32 | BPF_JSGT | BPF_K: 1387 case BPF_JMP32 | BPF_JSLT | BPF_K: 1388 case BPF_JMP32 | BPF_JSGE | BPF_K: 1389 case BPF_JMP32 | BPF_JSLE | BPF_K: 1390 case BPF_JMP32 | BPF_JSET | BPF_K: 1391 /* Accommodate for extra offset in case of a backjump. */ 1392 off = from->off; 1393 if (off < 0) 1394 off -= 2; 1395 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1396 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1397 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, 1398 off); 1399 break; 1400 1401 case BPF_LD | BPF_IMM | BPF_DW: 1402 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 1403 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1404 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 1405 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 1406 break; 1407 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1408 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1409 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1410 if (emit_zext) 1411 *to++ = BPF_ZEXT_REG(BPF_REG_AX); 1412 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1413 break; 1414 1415 case BPF_ST | BPF_MEM | BPF_DW: 1416 case BPF_ST | BPF_MEM | BPF_W: 1417 case BPF_ST | BPF_MEM | BPF_H: 1418 case BPF_ST | BPF_MEM | BPF_B: 1419 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1420 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1421 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 1422 break; 1423 } 1424 out: 1425 return to - to_buff; 1426 } 1427 1428 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 1429 gfp_t gfp_extra_flags) 1430 { 1431 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 1432 struct bpf_prog *fp; 1433 1434 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); 1435 if (fp != NULL) { 1436 /* aux->prog still points to the fp_other one, so 1437 * when promoting the clone to the real program, 1438 * this still needs to be adapted. 1439 */ 1440 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 1441 } 1442 1443 return fp; 1444 } 1445 1446 static void bpf_prog_clone_free(struct bpf_prog *fp) 1447 { 1448 /* aux was stolen by the other clone, so we cannot free 1449 * it from this path! It will be freed eventually by the 1450 * other program on release. 1451 * 1452 * At this point, we don't need a deferred release since 1453 * clone is guaranteed to not be locked. 1454 */ 1455 fp->aux = NULL; 1456 fp->stats = NULL; 1457 fp->active = NULL; 1458 __bpf_prog_free(fp); 1459 } 1460 1461 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 1462 { 1463 /* We have to repoint aux->prog to self, as we don't 1464 * know whether fp here is the clone or the original. 1465 */ 1466 fp->aux->prog = fp; 1467 bpf_prog_clone_free(fp_other); 1468 } 1469 1470 static void adjust_insn_arrays(struct bpf_prog *prog, u32 off, u32 len) 1471 { 1472 #ifdef CONFIG_BPF_SYSCALL 1473 struct bpf_map *map; 1474 int i; 1475 1476 if (len <= 1) 1477 return; 1478 1479 for (i = 0; i < prog->aux->used_map_cnt; i++) { 1480 map = prog->aux->used_maps[i]; 1481 if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY) 1482 bpf_insn_array_adjust(map, off, len); 1483 } 1484 #endif 1485 } 1486 1487 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 1488 { 1489 struct bpf_insn insn_buff[16], aux[2]; 1490 struct bpf_prog *clone, *tmp; 1491 int insn_delta, insn_cnt; 1492 struct bpf_insn *insn; 1493 int i, rewritten; 1494 1495 if (!prog->blinding_requested || prog->blinded) 1496 return prog; 1497 1498 clone = bpf_prog_clone_create(prog, GFP_USER); 1499 if (!clone) 1500 return ERR_PTR(-ENOMEM); 1501 1502 insn_cnt = clone->len; 1503 insn = clone->insnsi; 1504 1505 for (i = 0; i < insn_cnt; i++, insn++) { 1506 if (bpf_pseudo_func(insn)) { 1507 /* ld_imm64 with an address of bpf subprog is not 1508 * a user controlled constant. Don't randomize it, 1509 * since it will conflict with jit_subprogs() logic. 1510 */ 1511 insn++; 1512 i++; 1513 continue; 1514 } 1515 1516 /* We temporarily need to hold the original ld64 insn 1517 * so that we can still access the first part in the 1518 * second blinding run. 1519 */ 1520 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 1521 insn[1].code == 0) 1522 memcpy(aux, insn, sizeof(aux)); 1523 1524 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, 1525 clone->aux->verifier_zext); 1526 if (!rewritten) 1527 continue; 1528 1529 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 1530 if (IS_ERR(tmp)) { 1531 /* Patching may have repointed aux->prog during 1532 * realloc from the original one, so we need to 1533 * fix it up here on error. 1534 */ 1535 bpf_jit_prog_release_other(prog, clone); 1536 return tmp; 1537 } 1538 1539 clone = tmp; 1540 insn_delta = rewritten - 1; 1541 1542 /* Instructions arrays must be updated using absolute xlated offsets */ 1543 adjust_insn_arrays(clone, prog->aux->subprog_start + i, rewritten); 1544 1545 /* Walk new program and skip insns we just inserted. */ 1546 insn = clone->insnsi + i + insn_delta; 1547 insn_cnt += insn_delta; 1548 i += insn_delta; 1549 } 1550 1551 clone->blinded = 1; 1552 return clone; 1553 } 1554 #endif /* CONFIG_BPF_JIT */ 1555 1556 /* Base function for offset calculation. Needs to go into .text section, 1557 * therefore keeping it non-static as well; will also be used by JITs 1558 * anyway later on, so do not let the compiler omit it. This also needs 1559 * to go into kallsyms for correlation from e.g. bpftool, so naming 1560 * must not change. 1561 */ 1562 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1563 { 1564 return 0; 1565 } 1566 EXPORT_SYMBOL_GPL(__bpf_call_base); 1567 1568 /* All UAPI available opcodes. */ 1569 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 1570 /* 32 bit ALU operations. */ \ 1571 /* Register based. */ \ 1572 INSN_3(ALU, ADD, X), \ 1573 INSN_3(ALU, SUB, X), \ 1574 INSN_3(ALU, AND, X), \ 1575 INSN_3(ALU, OR, X), \ 1576 INSN_3(ALU, LSH, X), \ 1577 INSN_3(ALU, RSH, X), \ 1578 INSN_3(ALU, XOR, X), \ 1579 INSN_3(ALU, MUL, X), \ 1580 INSN_3(ALU, MOV, X), \ 1581 INSN_3(ALU, ARSH, X), \ 1582 INSN_3(ALU, DIV, X), \ 1583 INSN_3(ALU, MOD, X), \ 1584 INSN_2(ALU, NEG), \ 1585 INSN_3(ALU, END, TO_BE), \ 1586 INSN_3(ALU, END, TO_LE), \ 1587 /* Immediate based. */ \ 1588 INSN_3(ALU, ADD, K), \ 1589 INSN_3(ALU, SUB, K), \ 1590 INSN_3(ALU, AND, K), \ 1591 INSN_3(ALU, OR, K), \ 1592 INSN_3(ALU, LSH, K), \ 1593 INSN_3(ALU, RSH, K), \ 1594 INSN_3(ALU, XOR, K), \ 1595 INSN_3(ALU, MUL, K), \ 1596 INSN_3(ALU, MOV, K), \ 1597 INSN_3(ALU, ARSH, K), \ 1598 INSN_3(ALU, DIV, K), \ 1599 INSN_3(ALU, MOD, K), \ 1600 /* 64 bit ALU operations. */ \ 1601 /* Register based. */ \ 1602 INSN_3(ALU64, ADD, X), \ 1603 INSN_3(ALU64, SUB, X), \ 1604 INSN_3(ALU64, AND, X), \ 1605 INSN_3(ALU64, OR, X), \ 1606 INSN_3(ALU64, LSH, X), \ 1607 INSN_3(ALU64, RSH, X), \ 1608 INSN_3(ALU64, XOR, X), \ 1609 INSN_3(ALU64, MUL, X), \ 1610 INSN_3(ALU64, MOV, X), \ 1611 INSN_3(ALU64, ARSH, X), \ 1612 INSN_3(ALU64, DIV, X), \ 1613 INSN_3(ALU64, MOD, X), \ 1614 INSN_2(ALU64, NEG), \ 1615 INSN_3(ALU64, END, TO_LE), \ 1616 /* Immediate based. */ \ 1617 INSN_3(ALU64, ADD, K), \ 1618 INSN_3(ALU64, SUB, K), \ 1619 INSN_3(ALU64, AND, K), \ 1620 INSN_3(ALU64, OR, K), \ 1621 INSN_3(ALU64, LSH, K), \ 1622 INSN_3(ALU64, RSH, K), \ 1623 INSN_3(ALU64, XOR, K), \ 1624 INSN_3(ALU64, MUL, K), \ 1625 INSN_3(ALU64, MOV, K), \ 1626 INSN_3(ALU64, ARSH, K), \ 1627 INSN_3(ALU64, DIV, K), \ 1628 INSN_3(ALU64, MOD, K), \ 1629 /* Call instruction. */ \ 1630 INSN_2(JMP, CALL), \ 1631 /* Exit instruction. */ \ 1632 INSN_2(JMP, EXIT), \ 1633 /* 32-bit Jump instructions. */ \ 1634 /* Register based. */ \ 1635 INSN_3(JMP32, JEQ, X), \ 1636 INSN_3(JMP32, JNE, X), \ 1637 INSN_3(JMP32, JGT, X), \ 1638 INSN_3(JMP32, JLT, X), \ 1639 INSN_3(JMP32, JGE, X), \ 1640 INSN_3(JMP32, JLE, X), \ 1641 INSN_3(JMP32, JSGT, X), \ 1642 INSN_3(JMP32, JSLT, X), \ 1643 INSN_3(JMP32, JSGE, X), \ 1644 INSN_3(JMP32, JSLE, X), \ 1645 INSN_3(JMP32, JSET, X), \ 1646 /* Immediate based. */ \ 1647 INSN_3(JMP32, JEQ, K), \ 1648 INSN_3(JMP32, JNE, K), \ 1649 INSN_3(JMP32, JGT, K), \ 1650 INSN_3(JMP32, JLT, K), \ 1651 INSN_3(JMP32, JGE, K), \ 1652 INSN_3(JMP32, JLE, K), \ 1653 INSN_3(JMP32, JSGT, K), \ 1654 INSN_3(JMP32, JSLT, K), \ 1655 INSN_3(JMP32, JSGE, K), \ 1656 INSN_3(JMP32, JSLE, K), \ 1657 INSN_3(JMP32, JSET, K), \ 1658 /* Jump instructions. */ \ 1659 /* Register based. */ \ 1660 INSN_3(JMP, JEQ, X), \ 1661 INSN_3(JMP, JNE, X), \ 1662 INSN_3(JMP, JGT, X), \ 1663 INSN_3(JMP, JLT, X), \ 1664 INSN_3(JMP, JGE, X), \ 1665 INSN_3(JMP, JLE, X), \ 1666 INSN_3(JMP, JSGT, X), \ 1667 INSN_3(JMP, JSLT, X), \ 1668 INSN_3(JMP, JSGE, X), \ 1669 INSN_3(JMP, JSLE, X), \ 1670 INSN_3(JMP, JSET, X), \ 1671 /* Immediate based. */ \ 1672 INSN_3(JMP, JEQ, K), \ 1673 INSN_3(JMP, JNE, K), \ 1674 INSN_3(JMP, JGT, K), \ 1675 INSN_3(JMP, JLT, K), \ 1676 INSN_3(JMP, JGE, K), \ 1677 INSN_3(JMP, JLE, K), \ 1678 INSN_3(JMP, JSGT, K), \ 1679 INSN_3(JMP, JSLT, K), \ 1680 INSN_3(JMP, JSGE, K), \ 1681 INSN_3(JMP, JSLE, K), \ 1682 INSN_3(JMP, JSET, K), \ 1683 INSN_2(JMP, JA), \ 1684 INSN_2(JMP32, JA), \ 1685 /* Atomic operations. */ \ 1686 INSN_3(STX, ATOMIC, B), \ 1687 INSN_3(STX, ATOMIC, H), \ 1688 INSN_3(STX, ATOMIC, W), \ 1689 INSN_3(STX, ATOMIC, DW), \ 1690 /* Store instructions. */ \ 1691 /* Register based. */ \ 1692 INSN_3(STX, MEM, B), \ 1693 INSN_3(STX, MEM, H), \ 1694 INSN_3(STX, MEM, W), \ 1695 INSN_3(STX, MEM, DW), \ 1696 /* Immediate based. */ \ 1697 INSN_3(ST, MEM, B), \ 1698 INSN_3(ST, MEM, H), \ 1699 INSN_3(ST, MEM, W), \ 1700 INSN_3(ST, MEM, DW), \ 1701 /* Load instructions. */ \ 1702 /* Register based. */ \ 1703 INSN_3(LDX, MEM, B), \ 1704 INSN_3(LDX, MEM, H), \ 1705 INSN_3(LDX, MEM, W), \ 1706 INSN_3(LDX, MEM, DW), \ 1707 INSN_3(LDX, MEMSX, B), \ 1708 INSN_3(LDX, MEMSX, H), \ 1709 INSN_3(LDX, MEMSX, W), \ 1710 /* Immediate based. */ \ 1711 INSN_3(LD, IMM, DW) 1712 1713 bool bpf_opcode_in_insntable(u8 code) 1714 { 1715 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1716 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1717 static const bool public_insntable[256] = { 1718 [0 ... 255] = false, 1719 /* Now overwrite non-defaults ... */ 1720 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1721 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1722 [BPF_LD | BPF_ABS | BPF_B] = true, 1723 [BPF_LD | BPF_ABS | BPF_H] = true, 1724 [BPF_LD | BPF_ABS | BPF_W] = true, 1725 [BPF_LD | BPF_IND | BPF_B] = true, 1726 [BPF_LD | BPF_IND | BPF_H] = true, 1727 [BPF_LD | BPF_IND | BPF_W] = true, 1728 [BPF_JMP | BPF_JA | BPF_X] = true, 1729 [BPF_JMP | BPF_JCOND] = true, 1730 }; 1731 #undef BPF_INSN_3_TBL 1732 #undef BPF_INSN_2_TBL 1733 return public_insntable[code]; 1734 } 1735 1736 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1737 /** 1738 * ___bpf_prog_run - run eBPF program on a given context 1739 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers 1740 * @insn: is the array of eBPF instructions 1741 * 1742 * Decode and execute eBPF instructions. 1743 * 1744 * Return: whatever value is in %BPF_R0 at program exit 1745 */ 1746 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) 1747 { 1748 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1749 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1750 static const void * const jumptable[256] __annotate_jump_table = { 1751 [0 ... 255] = &&default_label, 1752 /* Now overwrite non-defaults ... */ 1753 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1754 /* Non-UAPI available opcodes. */ 1755 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1756 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1757 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC, 1758 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B, 1759 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H, 1760 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W, 1761 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW, 1762 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B, 1763 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H, 1764 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W, 1765 }; 1766 #undef BPF_INSN_3_LBL 1767 #undef BPF_INSN_2_LBL 1768 u32 tail_call_cnt = 0; 1769 1770 #define CONT ({ insn++; goto select_insn; }) 1771 #define CONT_JMP ({ insn++; goto select_insn; }) 1772 1773 select_insn: 1774 goto *jumptable[insn->code]; 1775 1776 /* Explicitly mask the register-based shift amounts with 63 or 31 1777 * to avoid undefined behavior. Normally this won't affect the 1778 * generated code, for example, in case of native 64 bit archs such 1779 * as x86-64 or arm64, the compiler is optimizing the AND away for 1780 * the interpreter. In case of JITs, each of the JIT backends compiles 1781 * the BPF shift operations to machine instructions which produce 1782 * implementation-defined results in such a case; the resulting 1783 * contents of the register may be arbitrary, but program behaviour 1784 * as a whole remains defined. In other words, in case of JIT backends, 1785 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation. 1786 */ 1787 /* ALU (shifts) */ 1788 #define SHT(OPCODE, OP) \ 1789 ALU64_##OPCODE##_X: \ 1790 DST = DST OP (SRC & 63); \ 1791 CONT; \ 1792 ALU_##OPCODE##_X: \ 1793 DST = (u32) DST OP ((u32) SRC & 31); \ 1794 CONT; \ 1795 ALU64_##OPCODE##_K: \ 1796 DST = DST OP IMM; \ 1797 CONT; \ 1798 ALU_##OPCODE##_K: \ 1799 DST = (u32) DST OP (u32) IMM; \ 1800 CONT; 1801 /* ALU (rest) */ 1802 #define ALU(OPCODE, OP) \ 1803 ALU64_##OPCODE##_X: \ 1804 DST = DST OP SRC; \ 1805 CONT; \ 1806 ALU_##OPCODE##_X: \ 1807 DST = (u32) DST OP (u32) SRC; \ 1808 CONT; \ 1809 ALU64_##OPCODE##_K: \ 1810 DST = DST OP IMM; \ 1811 CONT; \ 1812 ALU_##OPCODE##_K: \ 1813 DST = (u32) DST OP (u32) IMM; \ 1814 CONT; 1815 ALU(ADD, +) 1816 ALU(SUB, -) 1817 ALU(AND, &) 1818 ALU(OR, |) 1819 ALU(XOR, ^) 1820 ALU(MUL, *) 1821 SHT(LSH, <<) 1822 SHT(RSH, >>) 1823 #undef SHT 1824 #undef ALU 1825 ALU_NEG: 1826 DST = (u32) -DST; 1827 CONT; 1828 ALU64_NEG: 1829 DST = -DST; 1830 CONT; 1831 ALU_MOV_X: 1832 switch (OFF) { 1833 case 0: 1834 DST = (u32) SRC; 1835 break; 1836 case 8: 1837 DST = (u32)(s8) SRC; 1838 break; 1839 case 16: 1840 DST = (u32)(s16) SRC; 1841 break; 1842 } 1843 CONT; 1844 ALU_MOV_K: 1845 DST = (u32) IMM; 1846 CONT; 1847 ALU64_MOV_X: 1848 switch (OFF) { 1849 case 0: 1850 DST = SRC; 1851 break; 1852 case 8: 1853 DST = (s8) SRC; 1854 break; 1855 case 16: 1856 DST = (s16) SRC; 1857 break; 1858 case 32: 1859 DST = (s32) SRC; 1860 break; 1861 } 1862 CONT; 1863 ALU64_MOV_K: 1864 DST = IMM; 1865 CONT; 1866 LD_IMM_DW: 1867 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1868 insn++; 1869 CONT; 1870 ALU_ARSH_X: 1871 DST = (u64) (u32) (((s32) DST) >> (SRC & 31)); 1872 CONT; 1873 ALU_ARSH_K: 1874 DST = (u64) (u32) (((s32) DST) >> IMM); 1875 CONT; 1876 ALU64_ARSH_X: 1877 (*(s64 *) &DST) >>= (SRC & 63); 1878 CONT; 1879 ALU64_ARSH_K: 1880 (*(s64 *) &DST) >>= IMM; 1881 CONT; 1882 ALU64_MOD_X: 1883 switch (OFF) { 1884 case 0: 1885 div64_u64_rem(DST, SRC, &AX); 1886 DST = AX; 1887 break; 1888 case 1: 1889 AX = div64_s64(DST, SRC); 1890 DST = DST - AX * SRC; 1891 break; 1892 } 1893 CONT; 1894 ALU_MOD_X: 1895 switch (OFF) { 1896 case 0: 1897 AX = (u32) DST; 1898 DST = do_div(AX, (u32) SRC); 1899 break; 1900 case 1: 1901 AX = abs((s32)DST); 1902 AX = do_div(AX, abs((s32)SRC)); 1903 if ((s32)DST < 0) 1904 DST = (u32)-AX; 1905 else 1906 DST = (u32)AX; 1907 break; 1908 } 1909 CONT; 1910 ALU64_MOD_K: 1911 switch (OFF) { 1912 case 0: 1913 div64_u64_rem(DST, IMM, &AX); 1914 DST = AX; 1915 break; 1916 case 1: 1917 AX = div64_s64(DST, IMM); 1918 DST = DST - AX * IMM; 1919 break; 1920 } 1921 CONT; 1922 ALU_MOD_K: 1923 switch (OFF) { 1924 case 0: 1925 AX = (u32) DST; 1926 DST = do_div(AX, (u32) IMM); 1927 break; 1928 case 1: 1929 AX = abs((s32)DST); 1930 AX = do_div(AX, abs((s32)IMM)); 1931 if ((s32)DST < 0) 1932 DST = (u32)-AX; 1933 else 1934 DST = (u32)AX; 1935 break; 1936 } 1937 CONT; 1938 ALU64_DIV_X: 1939 switch (OFF) { 1940 case 0: 1941 DST = div64_u64(DST, SRC); 1942 break; 1943 case 1: 1944 DST = div64_s64(DST, SRC); 1945 break; 1946 } 1947 CONT; 1948 ALU_DIV_X: 1949 switch (OFF) { 1950 case 0: 1951 AX = (u32) DST; 1952 do_div(AX, (u32) SRC); 1953 DST = (u32) AX; 1954 break; 1955 case 1: 1956 AX = abs((s32)DST); 1957 do_div(AX, abs((s32)SRC)); 1958 if (((s32)DST < 0) == ((s32)SRC < 0)) 1959 DST = (u32)AX; 1960 else 1961 DST = (u32)-AX; 1962 break; 1963 } 1964 CONT; 1965 ALU64_DIV_K: 1966 switch (OFF) { 1967 case 0: 1968 DST = div64_u64(DST, IMM); 1969 break; 1970 case 1: 1971 DST = div64_s64(DST, IMM); 1972 break; 1973 } 1974 CONT; 1975 ALU_DIV_K: 1976 switch (OFF) { 1977 case 0: 1978 AX = (u32) DST; 1979 do_div(AX, (u32) IMM); 1980 DST = (u32) AX; 1981 break; 1982 case 1: 1983 AX = abs((s32)DST); 1984 do_div(AX, abs((s32)IMM)); 1985 if (((s32)DST < 0) == ((s32)IMM < 0)) 1986 DST = (u32)AX; 1987 else 1988 DST = (u32)-AX; 1989 break; 1990 } 1991 CONT; 1992 ALU_END_TO_BE: 1993 switch (IMM) { 1994 case 16: 1995 DST = (__force u16) cpu_to_be16(DST); 1996 break; 1997 case 32: 1998 DST = (__force u32) cpu_to_be32(DST); 1999 break; 2000 case 64: 2001 DST = (__force u64) cpu_to_be64(DST); 2002 break; 2003 } 2004 CONT; 2005 ALU_END_TO_LE: 2006 switch (IMM) { 2007 case 16: 2008 DST = (__force u16) cpu_to_le16(DST); 2009 break; 2010 case 32: 2011 DST = (__force u32) cpu_to_le32(DST); 2012 break; 2013 case 64: 2014 DST = (__force u64) cpu_to_le64(DST); 2015 break; 2016 } 2017 CONT; 2018 ALU64_END_TO_LE: 2019 switch (IMM) { 2020 case 16: 2021 DST = (__force u16) __swab16(DST); 2022 break; 2023 case 32: 2024 DST = (__force u32) __swab32(DST); 2025 break; 2026 case 64: 2027 DST = (__force u64) __swab64(DST); 2028 break; 2029 } 2030 CONT; 2031 2032 /* CALL */ 2033 JMP_CALL: 2034 /* Function call scratches BPF_R1-BPF_R5 registers, 2035 * preserves BPF_R6-BPF_R9, and stores return value 2036 * into BPF_R0. 2037 */ 2038 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 2039 BPF_R4, BPF_R5); 2040 CONT; 2041 2042 JMP_CALL_ARGS: 2043 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 2044 BPF_R3, BPF_R4, 2045 BPF_R5, 2046 insn + insn->off + 1); 2047 CONT; 2048 2049 JMP_TAIL_CALL: { 2050 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 2051 struct bpf_array *array = container_of(map, struct bpf_array, map); 2052 struct bpf_prog *prog; 2053 u32 index = BPF_R3; 2054 2055 if (unlikely(index >= array->map.max_entries)) 2056 goto out; 2057 2058 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT)) 2059 goto out; 2060 2061 tail_call_cnt++; 2062 2063 prog = READ_ONCE(array->ptrs[index]); 2064 if (!prog) 2065 goto out; 2066 2067 /* ARG1 at this point is guaranteed to point to CTX from 2068 * the verifier side due to the fact that the tail call is 2069 * handled like a helper, that is, bpf_tail_call_proto, 2070 * where arg1_type is ARG_PTR_TO_CTX. 2071 */ 2072 insn = prog->insnsi; 2073 goto select_insn; 2074 out: 2075 CONT; 2076 } 2077 JMP_JA: 2078 insn += insn->off; 2079 CONT; 2080 JMP32_JA: 2081 insn += insn->imm; 2082 CONT; 2083 JMP_EXIT: 2084 return BPF_R0; 2085 /* JMP */ 2086 #define COND_JMP(SIGN, OPCODE, CMP_OP) \ 2087 JMP_##OPCODE##_X: \ 2088 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ 2089 insn += insn->off; \ 2090 CONT_JMP; \ 2091 } \ 2092 CONT; \ 2093 JMP32_##OPCODE##_X: \ 2094 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ 2095 insn += insn->off; \ 2096 CONT_JMP; \ 2097 } \ 2098 CONT; \ 2099 JMP_##OPCODE##_K: \ 2100 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ 2101 insn += insn->off; \ 2102 CONT_JMP; \ 2103 } \ 2104 CONT; \ 2105 JMP32_##OPCODE##_K: \ 2106 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ 2107 insn += insn->off; \ 2108 CONT_JMP; \ 2109 } \ 2110 CONT; 2111 COND_JMP(u, JEQ, ==) 2112 COND_JMP(u, JNE, !=) 2113 COND_JMP(u, JGT, >) 2114 COND_JMP(u, JLT, <) 2115 COND_JMP(u, JGE, >=) 2116 COND_JMP(u, JLE, <=) 2117 COND_JMP(u, JSET, &) 2118 COND_JMP(s, JSGT, >) 2119 COND_JMP(s, JSLT, <) 2120 COND_JMP(s, JSGE, >=) 2121 COND_JMP(s, JSLE, <=) 2122 #undef COND_JMP 2123 /* ST, STX and LDX*/ 2124 ST_NOSPEC: 2125 /* Speculation barrier for mitigating Speculative Store Bypass, 2126 * Bounds-Check Bypass and Type Confusion. In case of arm64, we 2127 * rely on the firmware mitigation as controlled via the ssbd 2128 * kernel parameter. Whenever the mitigation is enabled, it 2129 * works for all of the kernel code with no need to provide any 2130 * additional instructions here. In case of x86, we use 'lfence' 2131 * insn for mitigation. We reuse preexisting logic from Spectre 2132 * v1 mitigation that happens to produce the required code on 2133 * x86 for v4 as well. 2134 */ 2135 barrier_nospec(); 2136 CONT; 2137 #define LDST(SIZEOP, SIZE) \ 2138 STX_MEM_##SIZEOP: \ 2139 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 2140 CONT; \ 2141 ST_MEM_##SIZEOP: \ 2142 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 2143 CONT; \ 2144 LDX_MEM_##SIZEOP: \ 2145 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 2146 CONT; \ 2147 LDX_PROBE_MEM_##SIZEOP: \ 2148 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \ 2149 (const void *)(long) (SRC + insn->off)); \ 2150 DST = *((SIZE *)&DST); \ 2151 CONT; 2152 2153 LDST(B, u8) 2154 LDST(H, u16) 2155 LDST(W, u32) 2156 LDST(DW, u64) 2157 #undef LDST 2158 2159 #define LDSX(SIZEOP, SIZE) \ 2160 LDX_MEMSX_##SIZEOP: \ 2161 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 2162 CONT; \ 2163 LDX_PROBE_MEMSX_##SIZEOP: \ 2164 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \ 2165 (const void *)(long) (SRC + insn->off)); \ 2166 DST = *((SIZE *)&DST); \ 2167 CONT; 2168 2169 LDSX(B, s8) 2170 LDSX(H, s16) 2171 LDSX(W, s32) 2172 #undef LDSX 2173 2174 #define ATOMIC_ALU_OP(BOP, KOP) \ 2175 case BOP: \ 2176 if (BPF_SIZE(insn->code) == BPF_W) \ 2177 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \ 2178 (DST + insn->off)); \ 2179 else if (BPF_SIZE(insn->code) == BPF_DW) \ 2180 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \ 2181 (DST + insn->off)); \ 2182 else \ 2183 goto default_label; \ 2184 break; \ 2185 case BOP | BPF_FETCH: \ 2186 if (BPF_SIZE(insn->code) == BPF_W) \ 2187 SRC = (u32) atomic_fetch_##KOP( \ 2188 (u32) SRC, \ 2189 (atomic_t *)(unsigned long) (DST + insn->off)); \ 2190 else if (BPF_SIZE(insn->code) == BPF_DW) \ 2191 SRC = (u64) atomic64_fetch_##KOP( \ 2192 (u64) SRC, \ 2193 (atomic64_t *)(unsigned long) (DST + insn->off)); \ 2194 else \ 2195 goto default_label; \ 2196 break; 2197 2198 STX_ATOMIC_DW: 2199 STX_ATOMIC_W: 2200 STX_ATOMIC_H: 2201 STX_ATOMIC_B: 2202 switch (IMM) { 2203 /* Atomic read-modify-write instructions support only W and DW 2204 * size modifiers. 2205 */ 2206 ATOMIC_ALU_OP(BPF_ADD, add) 2207 ATOMIC_ALU_OP(BPF_AND, and) 2208 ATOMIC_ALU_OP(BPF_OR, or) 2209 ATOMIC_ALU_OP(BPF_XOR, xor) 2210 #undef ATOMIC_ALU_OP 2211 2212 case BPF_XCHG: 2213 if (BPF_SIZE(insn->code) == BPF_W) 2214 SRC = (u32) atomic_xchg( 2215 (atomic_t *)(unsigned long) (DST + insn->off), 2216 (u32) SRC); 2217 else if (BPF_SIZE(insn->code) == BPF_DW) 2218 SRC = (u64) atomic64_xchg( 2219 (atomic64_t *)(unsigned long) (DST + insn->off), 2220 (u64) SRC); 2221 else 2222 goto default_label; 2223 break; 2224 case BPF_CMPXCHG: 2225 if (BPF_SIZE(insn->code) == BPF_W) 2226 BPF_R0 = (u32) atomic_cmpxchg( 2227 (atomic_t *)(unsigned long) (DST + insn->off), 2228 (u32) BPF_R0, (u32) SRC); 2229 else if (BPF_SIZE(insn->code) == BPF_DW) 2230 BPF_R0 = (u64) atomic64_cmpxchg( 2231 (atomic64_t *)(unsigned long) (DST + insn->off), 2232 (u64) BPF_R0, (u64) SRC); 2233 else 2234 goto default_label; 2235 break; 2236 /* Atomic load and store instructions support all size 2237 * modifiers. 2238 */ 2239 case BPF_LOAD_ACQ: 2240 switch (BPF_SIZE(insn->code)) { 2241 #define LOAD_ACQUIRE(SIZEOP, SIZE) \ 2242 case BPF_##SIZEOP: \ 2243 DST = (SIZE)smp_load_acquire( \ 2244 (SIZE *)(unsigned long)(SRC + insn->off)); \ 2245 break; 2246 LOAD_ACQUIRE(B, u8) 2247 LOAD_ACQUIRE(H, u16) 2248 LOAD_ACQUIRE(W, u32) 2249 #ifdef CONFIG_64BIT 2250 LOAD_ACQUIRE(DW, u64) 2251 #endif 2252 #undef LOAD_ACQUIRE 2253 default: 2254 goto default_label; 2255 } 2256 break; 2257 case BPF_STORE_REL: 2258 switch (BPF_SIZE(insn->code)) { 2259 #define STORE_RELEASE(SIZEOP, SIZE) \ 2260 case BPF_##SIZEOP: \ 2261 smp_store_release( \ 2262 (SIZE *)(unsigned long)(DST + insn->off), (SIZE)SRC); \ 2263 break; 2264 STORE_RELEASE(B, u8) 2265 STORE_RELEASE(H, u16) 2266 STORE_RELEASE(W, u32) 2267 #ifdef CONFIG_64BIT 2268 STORE_RELEASE(DW, u64) 2269 #endif 2270 #undef STORE_RELEASE 2271 default: 2272 goto default_label; 2273 } 2274 break; 2275 2276 default: 2277 goto default_label; 2278 } 2279 CONT; 2280 2281 default_label: 2282 /* If we ever reach this, we have a bug somewhere. Die hard here 2283 * instead of just returning 0; we could be somewhere in a subprog, 2284 * so execution could continue otherwise which we do /not/ want. 2285 * 2286 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 2287 */ 2288 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n", 2289 insn->code, insn->imm); 2290 BUG_ON(1); 2291 return 0; 2292 } 2293 2294 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 2295 #define DEFINE_BPF_PROG_RUN(stack_size) \ 2296 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 2297 { \ 2298 u64 stack[stack_size / sizeof(u64)]; \ 2299 u64 regs[MAX_BPF_EXT_REG] = {}; \ 2300 \ 2301 kmsan_unpoison_memory(stack, sizeof(stack)); \ 2302 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 2303 ARG1 = (u64) (unsigned long) ctx; \ 2304 return ___bpf_prog_run(regs, insn); \ 2305 } 2306 2307 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 2308 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 2309 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 2310 const struct bpf_insn *insn) \ 2311 { \ 2312 u64 stack[stack_size / sizeof(u64)]; \ 2313 u64 regs[MAX_BPF_EXT_REG]; \ 2314 \ 2315 kmsan_unpoison_memory(stack, sizeof(stack)); \ 2316 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 2317 BPF_R1 = r1; \ 2318 BPF_R2 = r2; \ 2319 BPF_R3 = r3; \ 2320 BPF_R4 = r4; \ 2321 BPF_R5 = r5; \ 2322 return ___bpf_prog_run(regs, insn); \ 2323 } 2324 2325 #define EVAL1(FN, X) FN(X) 2326 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 2327 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 2328 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 2329 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 2330 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 2331 2332 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 2333 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 2334 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 2335 2336 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 2337 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 2338 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 2339 2340 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 2341 2342 static unsigned int (*interpreters[])(const void *ctx, 2343 const struct bpf_insn *insn) = { 2344 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 2345 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 2346 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 2347 }; 2348 #undef PROG_NAME_LIST 2349 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 2350 static __maybe_unused 2351 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 2352 const struct bpf_insn *insn) = { 2353 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 2354 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 2355 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 2356 }; 2357 #undef PROG_NAME_LIST 2358 2359 #ifdef CONFIG_BPF_SYSCALL 2360 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 2361 { 2362 stack_depth = max_t(u32, stack_depth, 1); 2363 insn->off = (s16) insn->imm; 2364 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 2365 __bpf_call_base_args; 2366 insn->code = BPF_JMP | BPF_CALL_ARGS; 2367 } 2368 #endif 2369 #endif 2370 2371 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 2372 const struct bpf_insn *insn) 2373 { 2374 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 2375 * is not working properly, so warn about it! 2376 */ 2377 WARN_ON_ONCE(1); 2378 return 0; 2379 } 2380 2381 static bool __bpf_prog_map_compatible(struct bpf_map *map, 2382 const struct bpf_prog *fp) 2383 { 2384 enum bpf_prog_type prog_type = resolve_prog_type(fp); 2385 struct bpf_prog_aux *aux = fp->aux; 2386 enum bpf_cgroup_storage_type i; 2387 bool ret = false; 2388 u64 cookie; 2389 2390 if (fp->kprobe_override) 2391 return ret; 2392 2393 spin_lock(&map->owner_lock); 2394 /* There's no owner yet where we could check for compatibility. */ 2395 if (!map->owner) { 2396 map->owner = bpf_map_owner_alloc(map); 2397 if (!map->owner) 2398 goto err; 2399 map->owner->type = prog_type; 2400 map->owner->jited = fp->jited; 2401 map->owner->xdp_has_frags = aux->xdp_has_frags; 2402 map->owner->expected_attach_type = fp->expected_attach_type; 2403 map->owner->attach_func_proto = aux->attach_func_proto; 2404 for_each_cgroup_storage_type(i) { 2405 map->owner->storage_cookie[i] = 2406 aux->cgroup_storage[i] ? 2407 aux->cgroup_storage[i]->cookie : 0; 2408 } 2409 ret = true; 2410 } else { 2411 ret = map->owner->type == prog_type && 2412 map->owner->jited == fp->jited && 2413 map->owner->xdp_has_frags == aux->xdp_has_frags; 2414 if (ret && 2415 map->map_type == BPF_MAP_TYPE_PROG_ARRAY && 2416 map->owner->expected_attach_type != fp->expected_attach_type) 2417 ret = false; 2418 for_each_cgroup_storage_type(i) { 2419 if (!ret) 2420 break; 2421 cookie = aux->cgroup_storage[i] ? 2422 aux->cgroup_storage[i]->cookie : 0; 2423 ret = map->owner->storage_cookie[i] == cookie || 2424 !cookie; 2425 } 2426 if (ret && 2427 map->owner->attach_func_proto != aux->attach_func_proto) { 2428 switch (prog_type) { 2429 case BPF_PROG_TYPE_TRACING: 2430 case BPF_PROG_TYPE_LSM: 2431 case BPF_PROG_TYPE_EXT: 2432 case BPF_PROG_TYPE_STRUCT_OPS: 2433 ret = false; 2434 break; 2435 default: 2436 break; 2437 } 2438 } 2439 } 2440 err: 2441 spin_unlock(&map->owner_lock); 2442 return ret; 2443 } 2444 2445 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) 2446 { 2447 /* XDP programs inserted into maps are not guaranteed to run on 2448 * a particular netdev (and can run outside driver context entirely 2449 * in the case of devmap and cpumap). Until device checks 2450 * are implemented, prohibit adding dev-bound programs to program maps. 2451 */ 2452 if (bpf_prog_is_dev_bound(fp->aux)) 2453 return false; 2454 2455 return __bpf_prog_map_compatible(map, fp); 2456 } 2457 2458 static int bpf_check_tail_call(const struct bpf_prog *fp) 2459 { 2460 struct bpf_prog_aux *aux = fp->aux; 2461 int i, ret = 0; 2462 2463 mutex_lock(&aux->used_maps_mutex); 2464 for (i = 0; i < aux->used_map_cnt; i++) { 2465 struct bpf_map *map = aux->used_maps[i]; 2466 2467 if (!map_type_contains_progs(map)) 2468 continue; 2469 2470 if (!__bpf_prog_map_compatible(map, fp)) { 2471 ret = -EINVAL; 2472 goto out; 2473 } 2474 } 2475 2476 out: 2477 mutex_unlock(&aux->used_maps_mutex); 2478 return ret; 2479 } 2480 2481 static bool bpf_prog_select_interpreter(struct bpf_prog *fp) 2482 { 2483 bool select_interpreter = false; 2484 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 2485 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 2486 u32 idx = (round_up(stack_depth, 32) / 32) - 1; 2487 2488 /* may_goto may cause stack size > 512, leading to idx out-of-bounds. 2489 * But for non-JITed programs, we don't need bpf_func, so no bounds 2490 * check needed. 2491 */ 2492 if (idx < ARRAY_SIZE(interpreters)) { 2493 fp->bpf_func = interpreters[idx]; 2494 select_interpreter = true; 2495 } else { 2496 fp->bpf_func = __bpf_prog_ret0_warn; 2497 } 2498 #else 2499 fp->bpf_func = __bpf_prog_ret0_warn; 2500 #endif 2501 return select_interpreter; 2502 } 2503 2504 /** 2505 * bpf_prog_select_runtime - select exec runtime for BPF program 2506 * @fp: bpf_prog populated with BPF program 2507 * @err: pointer to error variable 2508 * 2509 * Try to JIT eBPF program, if JIT is not available, use interpreter. 2510 * The BPF program will be executed via bpf_prog_run() function. 2511 * 2512 * Return: the &fp argument along with &err set to 0 for success or 2513 * a negative errno code on failure 2514 */ 2515 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 2516 { 2517 /* In case of BPF to BPF calls, verifier did all the prep 2518 * work with regards to JITing, etc. 2519 */ 2520 bool jit_needed = false; 2521 2522 if (fp->bpf_func) 2523 goto finalize; 2524 2525 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) || 2526 bpf_prog_has_kfunc_call(fp)) 2527 jit_needed = true; 2528 2529 if (!bpf_prog_select_interpreter(fp)) 2530 jit_needed = true; 2531 2532 /* eBPF JITs can rewrite the program in case constant 2533 * blinding is active. However, in case of error during 2534 * blinding, bpf_int_jit_compile() must always return a 2535 * valid program, which in this case would simply not 2536 * be JITed, but falls back to the interpreter. 2537 */ 2538 if (!bpf_prog_is_offloaded(fp->aux)) { 2539 *err = bpf_prog_alloc_jited_linfo(fp); 2540 if (*err) 2541 return fp; 2542 2543 fp = bpf_int_jit_compile(fp); 2544 bpf_prog_jit_attempt_done(fp); 2545 if (!fp->jited && jit_needed) { 2546 *err = -ENOTSUPP; 2547 return fp; 2548 } 2549 } else { 2550 *err = bpf_prog_offload_compile(fp); 2551 if (*err) 2552 return fp; 2553 } 2554 2555 finalize: 2556 *err = bpf_prog_lock_ro(fp); 2557 if (*err) 2558 return fp; 2559 2560 /* The tail call compatibility check can only be done at 2561 * this late stage as we need to determine, if we deal 2562 * with JITed or non JITed program concatenations and not 2563 * all eBPF JITs might immediately support all features. 2564 */ 2565 *err = bpf_check_tail_call(fp); 2566 2567 return fp; 2568 } 2569 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 2570 2571 static unsigned int __bpf_prog_ret1(const void *ctx, 2572 const struct bpf_insn *insn) 2573 { 2574 return 1; 2575 } 2576 2577 static struct bpf_prog_dummy { 2578 struct bpf_prog prog; 2579 } dummy_bpf_prog = { 2580 .prog = { 2581 .bpf_func = __bpf_prog_ret1, 2582 }, 2583 }; 2584 2585 struct bpf_empty_prog_array bpf_empty_prog_array = { 2586 .null_prog = NULL, 2587 }; 2588 EXPORT_SYMBOL(bpf_empty_prog_array); 2589 2590 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 2591 { 2592 struct bpf_prog_array *p; 2593 2594 if (prog_cnt) 2595 p = kzalloc(struct_size(p, items, prog_cnt + 1), flags); 2596 else 2597 p = &bpf_empty_prog_array.hdr; 2598 2599 return p; 2600 } 2601 2602 void bpf_prog_array_free(struct bpf_prog_array *progs) 2603 { 2604 if (!progs || progs == &bpf_empty_prog_array.hdr) 2605 return; 2606 kfree_rcu(progs, rcu); 2607 } 2608 2609 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu) 2610 { 2611 struct bpf_prog_array *progs; 2612 2613 /* If RCU Tasks Trace grace period implies RCU grace period, there is 2614 * no need to call kfree_rcu(), just call kfree() directly. 2615 */ 2616 progs = container_of(rcu, struct bpf_prog_array, rcu); 2617 if (rcu_trace_implies_rcu_gp()) 2618 kfree(progs); 2619 else 2620 kfree_rcu(progs, rcu); 2621 } 2622 2623 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs) 2624 { 2625 if (!progs || progs == &bpf_empty_prog_array.hdr) 2626 return; 2627 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb); 2628 } 2629 2630 int bpf_prog_array_length(struct bpf_prog_array *array) 2631 { 2632 struct bpf_prog_array_item *item; 2633 u32 cnt = 0; 2634 2635 for (item = array->items; item->prog; item++) 2636 if (item->prog != &dummy_bpf_prog.prog) 2637 cnt++; 2638 return cnt; 2639 } 2640 2641 bool bpf_prog_array_is_empty(struct bpf_prog_array *array) 2642 { 2643 struct bpf_prog_array_item *item; 2644 2645 for (item = array->items; item->prog; item++) 2646 if (item->prog != &dummy_bpf_prog.prog) 2647 return false; 2648 return true; 2649 } 2650 2651 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, 2652 u32 *prog_ids, 2653 u32 request_cnt) 2654 { 2655 struct bpf_prog_array_item *item; 2656 int i = 0; 2657 2658 for (item = array->items; item->prog; item++) { 2659 if (item->prog == &dummy_bpf_prog.prog) 2660 continue; 2661 prog_ids[i] = item->prog->aux->id; 2662 if (++i == request_cnt) { 2663 item++; 2664 break; 2665 } 2666 } 2667 2668 return !!(item->prog); 2669 } 2670 2671 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, 2672 __u32 __user *prog_ids, u32 cnt) 2673 { 2674 unsigned long err = 0; 2675 bool nospc; 2676 u32 *ids; 2677 2678 /* users of this function are doing: 2679 * cnt = bpf_prog_array_length(); 2680 * if (cnt > 0) 2681 * bpf_prog_array_copy_to_user(..., cnt); 2682 * so below kcalloc doesn't need extra cnt > 0 check. 2683 */ 2684 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 2685 if (!ids) 2686 return -ENOMEM; 2687 nospc = bpf_prog_array_copy_core(array, ids, cnt); 2688 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 2689 kfree(ids); 2690 if (err) 2691 return -EFAULT; 2692 if (nospc) 2693 return -ENOSPC; 2694 return 0; 2695 } 2696 2697 void bpf_prog_array_delete_safe(struct bpf_prog_array *array, 2698 struct bpf_prog *old_prog) 2699 { 2700 struct bpf_prog_array_item *item; 2701 2702 for (item = array->items; item->prog; item++) 2703 if (item->prog == old_prog) { 2704 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 2705 break; 2706 } 2707 } 2708 2709 /** 2710 * bpf_prog_array_delete_safe_at() - Replaces the program at the given 2711 * index into the program array with 2712 * a dummy no-op program. 2713 * @array: a bpf_prog_array 2714 * @index: the index of the program to replace 2715 * 2716 * Skips over dummy programs, by not counting them, when calculating 2717 * the position of the program to replace. 2718 * 2719 * Return: 2720 * * 0 - Success 2721 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2722 * * -ENOENT - Index out of range 2723 */ 2724 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index) 2725 { 2726 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog); 2727 } 2728 2729 /** 2730 * bpf_prog_array_update_at() - Updates the program at the given index 2731 * into the program array. 2732 * @array: a bpf_prog_array 2733 * @index: the index of the program to update 2734 * @prog: the program to insert into the array 2735 * 2736 * Skips over dummy programs, by not counting them, when calculating 2737 * the position of the program to update. 2738 * 2739 * Return: 2740 * * 0 - Success 2741 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2742 * * -ENOENT - Index out of range 2743 */ 2744 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 2745 struct bpf_prog *prog) 2746 { 2747 struct bpf_prog_array_item *item; 2748 2749 if (unlikely(index < 0)) 2750 return -EINVAL; 2751 2752 for (item = array->items; item->prog; item++) { 2753 if (item->prog == &dummy_bpf_prog.prog) 2754 continue; 2755 if (!index) { 2756 WRITE_ONCE(item->prog, prog); 2757 return 0; 2758 } 2759 index--; 2760 } 2761 return -ENOENT; 2762 } 2763 2764 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 2765 struct bpf_prog *exclude_prog, 2766 struct bpf_prog *include_prog, 2767 u64 bpf_cookie, 2768 struct bpf_prog_array **new_array) 2769 { 2770 int new_prog_cnt, carry_prog_cnt = 0; 2771 struct bpf_prog_array_item *existing, *new; 2772 struct bpf_prog_array *array; 2773 bool found_exclude = false; 2774 2775 /* Figure out how many existing progs we need to carry over to 2776 * the new array. 2777 */ 2778 if (old_array) { 2779 existing = old_array->items; 2780 for (; existing->prog; existing++) { 2781 if (existing->prog == exclude_prog) { 2782 found_exclude = true; 2783 continue; 2784 } 2785 if (existing->prog != &dummy_bpf_prog.prog) 2786 carry_prog_cnt++; 2787 if (existing->prog == include_prog) 2788 return -EEXIST; 2789 } 2790 } 2791 2792 if (exclude_prog && !found_exclude) 2793 return -ENOENT; 2794 2795 /* How many progs (not NULL) will be in the new array? */ 2796 new_prog_cnt = carry_prog_cnt; 2797 if (include_prog) 2798 new_prog_cnt += 1; 2799 2800 /* Do we have any prog (not NULL) in the new array? */ 2801 if (!new_prog_cnt) { 2802 *new_array = NULL; 2803 return 0; 2804 } 2805 2806 /* +1 as the end of prog_array is marked with NULL */ 2807 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 2808 if (!array) 2809 return -ENOMEM; 2810 new = array->items; 2811 2812 /* Fill in the new prog array */ 2813 if (carry_prog_cnt) { 2814 existing = old_array->items; 2815 for (; existing->prog; existing++) { 2816 if (existing->prog == exclude_prog || 2817 existing->prog == &dummy_bpf_prog.prog) 2818 continue; 2819 2820 new->prog = existing->prog; 2821 new->bpf_cookie = existing->bpf_cookie; 2822 new++; 2823 } 2824 } 2825 if (include_prog) { 2826 new->prog = include_prog; 2827 new->bpf_cookie = bpf_cookie; 2828 new++; 2829 } 2830 new->prog = NULL; 2831 *new_array = array; 2832 return 0; 2833 } 2834 2835 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 2836 u32 *prog_ids, u32 request_cnt, 2837 u32 *prog_cnt) 2838 { 2839 u32 cnt = 0; 2840 2841 if (array) 2842 cnt = bpf_prog_array_length(array); 2843 2844 *prog_cnt = cnt; 2845 2846 /* return early if user requested only program count or nothing to copy */ 2847 if (!request_cnt || !cnt) 2848 return 0; 2849 2850 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 2851 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 2852 : 0; 2853 } 2854 2855 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2856 struct bpf_map **used_maps, u32 len) 2857 { 2858 struct bpf_map *map; 2859 bool sleepable; 2860 u32 i; 2861 2862 sleepable = aux->prog->sleepable; 2863 for (i = 0; i < len; i++) { 2864 map = used_maps[i]; 2865 if (map->ops->map_poke_untrack) 2866 map->ops->map_poke_untrack(map, aux); 2867 if (sleepable) 2868 atomic64_dec(&map->sleepable_refcnt); 2869 bpf_map_put(map); 2870 } 2871 } 2872 2873 static void bpf_free_used_maps(struct bpf_prog_aux *aux) 2874 { 2875 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt); 2876 kfree(aux->used_maps); 2877 } 2878 2879 void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len) 2880 { 2881 #ifdef CONFIG_BPF_SYSCALL 2882 struct btf_mod_pair *btf_mod; 2883 u32 i; 2884 2885 for (i = 0; i < len; i++) { 2886 btf_mod = &used_btfs[i]; 2887 if (btf_mod->module) 2888 module_put(btf_mod->module); 2889 btf_put(btf_mod->btf); 2890 } 2891 #endif 2892 } 2893 2894 static void bpf_free_used_btfs(struct bpf_prog_aux *aux) 2895 { 2896 __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt); 2897 kfree(aux->used_btfs); 2898 } 2899 2900 static void bpf_prog_free_deferred(struct work_struct *work) 2901 { 2902 struct bpf_prog_aux *aux; 2903 int i; 2904 2905 aux = container_of(work, struct bpf_prog_aux, work); 2906 #ifdef CONFIG_BPF_SYSCALL 2907 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab); 2908 bpf_prog_stream_free(aux->prog); 2909 #endif 2910 #ifdef CONFIG_CGROUP_BPF 2911 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID) 2912 bpf_cgroup_atype_put(aux->cgroup_atype); 2913 #endif 2914 bpf_free_used_maps(aux); 2915 bpf_free_used_btfs(aux); 2916 if (bpf_prog_is_dev_bound(aux)) 2917 bpf_prog_dev_bound_destroy(aux->prog); 2918 #ifdef CONFIG_PERF_EVENTS 2919 if (aux->prog->has_callchain_buf) 2920 put_callchain_buffers(); 2921 #endif 2922 if (aux->dst_trampoline) 2923 bpf_trampoline_put(aux->dst_trampoline); 2924 for (i = 0; i < aux->real_func_cnt; i++) { 2925 /* We can just unlink the subprog poke descriptor table as 2926 * it was originally linked to the main program and is also 2927 * released along with it. 2928 */ 2929 aux->func[i]->aux->poke_tab = NULL; 2930 bpf_jit_free(aux->func[i]); 2931 } 2932 if (aux->real_func_cnt) { 2933 kfree(aux->func); 2934 bpf_prog_unlock_free(aux->prog); 2935 } else { 2936 bpf_jit_free(aux->prog); 2937 } 2938 } 2939 2940 void bpf_prog_free(struct bpf_prog *fp) 2941 { 2942 struct bpf_prog_aux *aux = fp->aux; 2943 2944 if (aux->dst_prog) 2945 bpf_prog_put(aux->dst_prog); 2946 bpf_token_put(aux->token); 2947 INIT_WORK(&aux->work, bpf_prog_free_deferred); 2948 schedule_work(&aux->work); 2949 } 2950 EXPORT_SYMBOL_GPL(bpf_prog_free); 2951 2952 /* RNG for unprivileged user space with separated state from prandom_u32(). */ 2953 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 2954 2955 void bpf_user_rnd_init_once(void) 2956 { 2957 prandom_init_once(&bpf_user_rnd_state); 2958 } 2959 2960 BPF_CALL_0(bpf_user_rnd_u32) 2961 { 2962 /* Should someone ever have the rather unwise idea to use some 2963 * of the registers passed into this function, then note that 2964 * this function is called from native eBPF and classic-to-eBPF 2965 * transformations. Register assignments from both sides are 2966 * different, f.e. classic always sets fn(ctx, A, X) here. 2967 */ 2968 struct rnd_state *state; 2969 u32 res; 2970 2971 state = &get_cpu_var(bpf_user_rnd_state); 2972 res = prandom_u32_state(state); 2973 put_cpu_var(bpf_user_rnd_state); 2974 2975 return res; 2976 } 2977 2978 BPF_CALL_0(bpf_get_raw_cpu_id) 2979 { 2980 return raw_smp_processor_id(); 2981 } 2982 2983 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 2984 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 2985 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 2986 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 2987 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 2988 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 2989 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 2990 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak; 2991 const struct bpf_func_proto bpf_spin_lock_proto __weak; 2992 const struct bpf_func_proto bpf_spin_unlock_proto __weak; 2993 const struct bpf_func_proto bpf_jiffies64_proto __weak; 2994 2995 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 2996 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 2997 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 2998 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 2999 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak; 3000 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak; 3001 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak; 3002 3003 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 3004 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 3005 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 3006 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 3007 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak; 3008 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 3009 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak; 3010 const struct bpf_func_proto bpf_snprintf_btf_proto __weak; 3011 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak; 3012 const struct bpf_func_proto bpf_set_retval_proto __weak; 3013 const struct bpf_func_proto bpf_get_retval_proto __weak; 3014 3015 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 3016 { 3017 return NULL; 3018 } 3019 3020 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void) 3021 { 3022 return NULL; 3023 } 3024 3025 const struct bpf_func_proto * __weak bpf_get_perf_event_read_value_proto(void) 3026 { 3027 return NULL; 3028 } 3029 3030 u64 __weak 3031 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 3032 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 3033 { 3034 return -ENOTSUPP; 3035 } 3036 EXPORT_SYMBOL_GPL(bpf_event_output); 3037 3038 /* Always built-in helper functions. */ 3039 const struct bpf_func_proto bpf_tail_call_proto = { 3040 /* func is unused for tail_call, we set it to pass the 3041 * get_helper_proto check 3042 */ 3043 .func = BPF_PTR_POISON, 3044 .gpl_only = false, 3045 .ret_type = RET_VOID, 3046 .arg1_type = ARG_PTR_TO_CTX, 3047 .arg2_type = ARG_CONST_MAP_PTR, 3048 .arg3_type = ARG_ANYTHING, 3049 }; 3050 3051 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 3052 * It is encouraged to implement bpf_int_jit_compile() instead, so that 3053 * eBPF and implicitly also cBPF can get JITed! 3054 */ 3055 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 3056 { 3057 return prog; 3058 } 3059 3060 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 3061 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 3062 */ 3063 void __weak bpf_jit_compile(struct bpf_prog *prog) 3064 { 3065 } 3066 3067 bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id) 3068 { 3069 return false; 3070 } 3071 3072 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage 3073 * analysis code and wants explicit zero extension inserted by verifier. 3074 * Otherwise, return FALSE. 3075 * 3076 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if 3077 * you don't override this. JITs that don't want these extra insns can detect 3078 * them using insn_is_zext. 3079 */ 3080 bool __weak bpf_jit_needs_zext(void) 3081 { 3082 return false; 3083 } 3084 3085 /* By default, enable the verifier's mitigations against Spectre v1 and v4 for 3086 * all archs. The value returned must not change at runtime as there is 3087 * currently no support for reloading programs that were loaded without 3088 * mitigations. 3089 */ 3090 bool __weak bpf_jit_bypass_spec_v1(void) 3091 { 3092 return false; 3093 } 3094 3095 bool __weak bpf_jit_bypass_spec_v4(void) 3096 { 3097 return false; 3098 } 3099 3100 /* Return true if the JIT inlines the call to the helper corresponding to 3101 * the imm. 3102 * 3103 * The verifier will not patch the insn->imm for the call to the helper if 3104 * this returns true. 3105 */ 3106 bool __weak bpf_jit_inlines_helper_call(s32 imm) 3107 { 3108 return false; 3109 } 3110 3111 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */ 3112 bool __weak bpf_jit_supports_subprog_tailcalls(void) 3113 { 3114 return false; 3115 } 3116 3117 bool __weak bpf_jit_supports_percpu_insn(void) 3118 { 3119 return false; 3120 } 3121 3122 bool __weak bpf_jit_supports_kfunc_call(void) 3123 { 3124 return false; 3125 } 3126 3127 bool __weak bpf_jit_supports_far_kfunc_call(void) 3128 { 3129 return false; 3130 } 3131 3132 bool __weak bpf_jit_supports_arena(void) 3133 { 3134 return false; 3135 } 3136 3137 bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) 3138 { 3139 return false; 3140 } 3141 3142 u64 __weak bpf_arch_uaddress_limit(void) 3143 { 3144 #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) 3145 return TASK_SIZE; 3146 #else 3147 return 0; 3148 #endif 3149 } 3150 3151 /* Return TRUE if the JIT backend satisfies the following two conditions: 3152 * 1) JIT backend supports atomic_xchg() on pointer-sized words. 3153 * 2) Under the specific arch, the implementation of xchg() is the same 3154 * as atomic_xchg() on pointer-sized words. 3155 */ 3156 bool __weak bpf_jit_supports_ptr_xchg(void) 3157 { 3158 return false; 3159 } 3160 3161 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 3162 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 3163 */ 3164 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 3165 int len) 3166 { 3167 return -EFAULT; 3168 } 3169 3170 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, 3171 enum bpf_text_poke_type new_t, void *old_addr, 3172 void *new_addr) 3173 { 3174 return -ENOTSUPP; 3175 } 3176 3177 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len) 3178 { 3179 return ERR_PTR(-ENOTSUPP); 3180 } 3181 3182 int __weak bpf_arch_text_invalidate(void *dst, size_t len) 3183 { 3184 return -ENOTSUPP; 3185 } 3186 3187 bool __weak bpf_jit_supports_exceptions(void) 3188 { 3189 return false; 3190 } 3191 3192 bool __weak bpf_jit_supports_private_stack(void) 3193 { 3194 return false; 3195 } 3196 3197 void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) 3198 { 3199 } 3200 3201 bool __weak bpf_jit_supports_timed_may_goto(void) 3202 { 3203 return false; 3204 } 3205 3206 u64 __weak arch_bpf_timed_may_goto(void) 3207 { 3208 return 0; 3209 } 3210 3211 static noinline void bpf_prog_report_may_goto_violation(void) 3212 { 3213 #ifdef CONFIG_BPF_SYSCALL 3214 struct bpf_stream_stage ss; 3215 struct bpf_prog *prog; 3216 3217 prog = bpf_prog_find_from_stack(); 3218 if (!prog) 3219 return; 3220 bpf_stream_stage(ss, prog, BPF_STDERR, ({ 3221 bpf_stream_printk(ss, "ERROR: Timeout detected for may_goto instruction\n"); 3222 bpf_stream_dump_stack(ss); 3223 })); 3224 #endif 3225 } 3226 3227 u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *p) 3228 { 3229 u64 time = ktime_get_mono_fast_ns(); 3230 3231 /* Populate the timestamp for this stack frame, and refresh count. */ 3232 if (!p->timestamp) { 3233 p->timestamp = time; 3234 return BPF_MAX_TIMED_LOOPS; 3235 } 3236 /* Check if we've exhausted our time slice, and zero count. */ 3237 if (unlikely(time - p->timestamp >= (NSEC_PER_SEC / 4))) { 3238 bpf_prog_report_may_goto_violation(); 3239 return 0; 3240 } 3241 /* Refresh the count for the stack frame. */ 3242 return BPF_MAX_TIMED_LOOPS; 3243 } 3244 3245 /* for configs without MMU or 32-bit */ 3246 __weak const struct bpf_map_ops arena_map_ops; 3247 __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena) 3248 { 3249 return 0; 3250 } 3251 __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena) 3252 { 3253 return 0; 3254 } 3255 3256 #ifdef CONFIG_BPF_SYSCALL 3257 static int __init bpf_global_ma_init(void) 3258 { 3259 int ret; 3260 3261 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false); 3262 bpf_global_ma_set = !ret; 3263 return ret; 3264 } 3265 late_initcall(bpf_global_ma_init); 3266 #endif 3267 3268 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); 3269 EXPORT_SYMBOL(bpf_stats_enabled_key); 3270 3271 /* All definitions of tracepoints related to BPF. */ 3272 #define CREATE_TRACE_POINTS 3273 #include <linux/bpf_trace.h> 3274 3275 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 3276 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx); 3277 3278 #ifdef CONFIG_BPF_SYSCALL 3279 3280 int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep, 3281 const char **linep, int *nump) 3282 { 3283 int idx = -1, insn_start, insn_end, len; 3284 struct bpf_line_info *linfo; 3285 void **jited_linfo; 3286 struct btf *btf; 3287 int nr_linfo; 3288 3289 btf = prog->aux->btf; 3290 linfo = prog->aux->linfo; 3291 jited_linfo = prog->aux->jited_linfo; 3292 3293 if (!btf || !linfo || !jited_linfo) 3294 return -EINVAL; 3295 len = prog->aux->func ? prog->aux->func[prog->aux->func_idx]->len : prog->len; 3296 3297 linfo = &prog->aux->linfo[prog->aux->linfo_idx]; 3298 jited_linfo = &prog->aux->jited_linfo[prog->aux->linfo_idx]; 3299 3300 insn_start = linfo[0].insn_off; 3301 insn_end = insn_start + len; 3302 nr_linfo = prog->aux->nr_linfo - prog->aux->linfo_idx; 3303 3304 for (int i = 0; i < nr_linfo && 3305 linfo[i].insn_off >= insn_start && linfo[i].insn_off < insn_end; i++) { 3306 if (jited_linfo[i] >= (void *)ip) 3307 break; 3308 idx = i; 3309 } 3310 3311 if (idx == -1) 3312 return -ENOENT; 3313 3314 /* Get base component of the file path. */ 3315 *filep = btf_name_by_offset(btf, linfo[idx].file_name_off); 3316 *filep = kbasename(*filep); 3317 /* Obtain the source line, and strip whitespace in prefix. */ 3318 *linep = btf_name_by_offset(btf, linfo[idx].line_off); 3319 while (isspace(**linep)) 3320 *linep += 1; 3321 *nump = BPF_LINE_INFO_LINE_NUM(linfo[idx].line_col); 3322 return 0; 3323 } 3324 3325 struct walk_stack_ctx { 3326 struct bpf_prog *prog; 3327 }; 3328 3329 static bool find_from_stack_cb(void *cookie, u64 ip, u64 sp, u64 bp) 3330 { 3331 struct walk_stack_ctx *ctxp = cookie; 3332 struct bpf_prog *prog; 3333 3334 /* 3335 * The RCU read lock is held to safely traverse the latch tree, but we 3336 * don't need its protection when accessing the prog, since it has an 3337 * active stack frame on the current stack trace, and won't disappear. 3338 */ 3339 rcu_read_lock(); 3340 prog = bpf_prog_ksym_find(ip); 3341 rcu_read_unlock(); 3342 if (!prog) 3343 return true; 3344 /* Make sure we return the main prog if we found a subprog */ 3345 ctxp->prog = prog->aux->main_prog_aux->prog; 3346 return false; 3347 } 3348 3349 struct bpf_prog *bpf_prog_find_from_stack(void) 3350 { 3351 struct walk_stack_ctx ctx = {}; 3352 3353 arch_bpf_stack_walk(find_from_stack_cb, &ctx); 3354 return ctx.prog; 3355 } 3356 3357 #endif 3358