1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <uapi/linux/btf.h> 21 #include <linux/filter.h> 22 #include <linux/skbuff.h> 23 #include <linux/vmalloc.h> 24 #include <linux/prandom.h> 25 #include <linux/bpf.h> 26 #include <linux/btf.h> 27 #include <linux/hex.h> 28 #include <linux/objtool.h> 29 #include <linux/overflow.h> 30 #include <linux/rbtree_latch.h> 31 #include <linux/kallsyms.h> 32 #include <linux/rcupdate.h> 33 #include <linux/perf_event.h> 34 #include <linux/extable.h> 35 #include <linux/log2.h> 36 #include <linux/bpf_verifier.h> 37 #include <linux/nodemask.h> 38 #include <linux/nospec.h> 39 #include <linux/bpf_mem_alloc.h> 40 #include <linux/memcontrol.h> 41 #include <linux/execmem.h> 42 #include <crypto/sha2.h> 43 44 #include <asm/barrier.h> 45 #include <linux/unaligned.h> 46 47 /* Registers */ 48 #define BPF_R0 regs[BPF_REG_0] 49 #define BPF_R1 regs[BPF_REG_1] 50 #define BPF_R2 regs[BPF_REG_2] 51 #define BPF_R3 regs[BPF_REG_3] 52 #define BPF_R4 regs[BPF_REG_4] 53 #define BPF_R5 regs[BPF_REG_5] 54 #define BPF_R6 regs[BPF_REG_6] 55 #define BPF_R7 regs[BPF_REG_7] 56 #define BPF_R8 regs[BPF_REG_8] 57 #define BPF_R9 regs[BPF_REG_9] 58 #define BPF_R10 regs[BPF_REG_10] 59 60 /* Named registers */ 61 #define DST regs[insn->dst_reg] 62 #define SRC regs[insn->src_reg] 63 #define FP regs[BPF_REG_FP] 64 #define AX regs[BPF_REG_AX] 65 #define ARG1 regs[BPF_REG_ARG1] 66 #define CTX regs[BPF_REG_CTX] 67 #define OFF insn->off 68 #define IMM insn->imm 69 70 struct bpf_mem_alloc bpf_global_ma; 71 bool bpf_global_ma_set; 72 73 /* No hurry in this branch 74 * 75 * Exported for the bpf jit load helper. 76 */ 77 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 78 { 79 u8 *ptr = NULL; 80 81 if (k >= SKF_NET_OFF) { 82 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 83 } else if (k >= SKF_LL_OFF) { 84 if (unlikely(!skb_mac_header_was_set(skb))) 85 return NULL; 86 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 87 } 88 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 89 return ptr; 90 91 return NULL; 92 } 93 94 /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */ 95 enum page_size_enum { 96 __PAGE_SIZE = PAGE_SIZE 97 }; 98 99 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags) 100 { 101 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 102 struct bpf_prog_aux *aux; 103 struct bpf_prog *fp; 104 105 size = round_up(size, __PAGE_SIZE); 106 fp = __vmalloc(size, gfp_flags); 107 if (fp == NULL) 108 return NULL; 109 110 aux = kzalloc_obj(*aux, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); 111 if (aux == NULL) { 112 vfree(fp); 113 return NULL; 114 } 115 fp->active = __alloc_percpu_gfp(sizeof(u8[BPF_NR_CONTEXTS]), 4, 116 bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); 117 if (!fp->active) { 118 vfree(fp); 119 kfree(aux); 120 return NULL; 121 } 122 123 fp->pages = size / PAGE_SIZE; 124 fp->aux = aux; 125 fp->aux->main_prog_aux = aux; 126 fp->aux->prog = fp; 127 fp->jit_requested = ebpf_jit_enabled(); 128 fp->blinding_requested = bpf_jit_blinding_enabled(fp); 129 #ifdef CONFIG_CGROUP_BPF 130 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID; 131 #endif 132 133 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); 134 #ifdef CONFIG_FINEIBT 135 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode); 136 #endif 137 mutex_init(&fp->aux->used_maps_mutex); 138 mutex_init(&fp->aux->ext_mutex); 139 mutex_init(&fp->aux->dst_mutex); 140 mutex_init(&fp->aux->st_ops_assoc_mutex); 141 142 #ifdef CONFIG_BPF_SYSCALL 143 bpf_prog_stream_init(fp); 144 #endif 145 146 return fp; 147 } 148 149 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 150 { 151 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 152 struct bpf_prog *prog; 153 int cpu; 154 155 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags); 156 if (!prog) 157 return NULL; 158 159 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); 160 if (!prog->stats) { 161 free_percpu(prog->active); 162 kfree(prog->aux); 163 vfree(prog); 164 return NULL; 165 } 166 167 for_each_possible_cpu(cpu) { 168 struct bpf_prog_stats *pstats; 169 170 pstats = per_cpu_ptr(prog->stats, cpu); 171 u64_stats_init(&pstats->syncp); 172 } 173 return prog; 174 } 175 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 176 177 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) 178 { 179 if (!prog->aux->nr_linfo || !prog->jit_requested) 180 return 0; 181 182 prog->aux->jited_linfo = kvzalloc_objs(*prog->aux->jited_linfo, 183 prog->aux->nr_linfo, 184 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN)); 185 if (!prog->aux->jited_linfo) 186 return -ENOMEM; 187 188 return 0; 189 } 190 191 void bpf_prog_jit_attempt_done(struct bpf_prog *prog) 192 { 193 if (prog->aux->jited_linfo && 194 (!prog->jited || !prog->aux->jited_linfo[0])) { 195 kvfree(prog->aux->jited_linfo); 196 prog->aux->jited_linfo = NULL; 197 } 198 199 kfree(prog->aux->kfunc_tab); 200 prog->aux->kfunc_tab = NULL; 201 } 202 203 /* The jit engine is responsible to provide an array 204 * for insn_off to the jited_off mapping (insn_to_jit_off). 205 * 206 * The idx to this array is the insn_off. Hence, the insn_off 207 * here is relative to the prog itself instead of the main prog. 208 * This array has one entry for each xlated bpf insn. 209 * 210 * jited_off is the byte off to the end of the jited insn. 211 * 212 * Hence, with 213 * insn_start: 214 * The first bpf insn off of the prog. The insn off 215 * here is relative to the main prog. 216 * e.g. if prog is a subprog, insn_start > 0 217 * linfo_idx: 218 * The prog's idx to prog->aux->linfo and jited_linfo 219 * 220 * jited_linfo[linfo_idx] = prog->bpf_func 221 * 222 * For i > linfo_idx, 223 * 224 * jited_linfo[i] = prog->bpf_func + 225 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1] 226 */ 227 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, 228 const u32 *insn_to_jit_off) 229 { 230 u32 linfo_idx, insn_start, insn_end, nr_linfo, i; 231 const struct bpf_line_info *linfo; 232 void **jited_linfo; 233 234 if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt) 235 /* Userspace did not provide linfo */ 236 return; 237 238 linfo_idx = prog->aux->linfo_idx; 239 linfo = &prog->aux->linfo[linfo_idx]; 240 insn_start = linfo[0].insn_off; 241 insn_end = insn_start + prog->len; 242 243 jited_linfo = &prog->aux->jited_linfo[linfo_idx]; 244 jited_linfo[0] = prog->bpf_func; 245 246 nr_linfo = prog->aux->nr_linfo - linfo_idx; 247 248 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++) 249 /* The verifier ensures that linfo[i].insn_off is 250 * strictly increasing 251 */ 252 jited_linfo[i] = prog->bpf_func + 253 insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; 254 } 255 256 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 257 gfp_t gfp_extra_flags) 258 { 259 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 260 struct bpf_prog *fp; 261 u32 pages; 262 263 size = round_up(size, PAGE_SIZE); 264 pages = size / PAGE_SIZE; 265 if (pages <= fp_old->pages) 266 return fp_old; 267 268 fp = __vmalloc(size, gfp_flags); 269 if (fp) { 270 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 271 fp->pages = pages; 272 fp->aux->prog = fp; 273 274 /* We keep fp->aux from fp_old around in the new 275 * reallocated structure. 276 */ 277 fp_old->aux = NULL; 278 fp_old->stats = NULL; 279 fp_old->active = NULL; 280 __bpf_prog_free(fp_old); 281 } 282 283 return fp; 284 } 285 286 void __bpf_prog_free(struct bpf_prog *fp) 287 { 288 if (fp->aux) { 289 mutex_destroy(&fp->aux->used_maps_mutex); 290 mutex_destroy(&fp->aux->dst_mutex); 291 mutex_destroy(&fp->aux->st_ops_assoc_mutex); 292 kfree(fp->aux->poke_tab); 293 kfree(fp->aux); 294 } 295 free_percpu(fp->stats); 296 free_percpu(fp->active); 297 vfree(fp); 298 } 299 300 int bpf_prog_calc_tag(struct bpf_prog *fp) 301 { 302 size_t size = bpf_prog_insn_size(fp); 303 struct bpf_insn *dst; 304 bool was_ld_map; 305 u32 i; 306 307 dst = vmalloc(size); 308 if (!dst) 309 return -ENOMEM; 310 311 /* We need to take out the map fd for the digest calculation 312 * since they are unstable from user space side. 313 */ 314 for (i = 0, was_ld_map = false; i < fp->len; i++) { 315 dst[i] = fp->insnsi[i]; 316 if (!was_ld_map && 317 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 318 (dst[i].src_reg == BPF_PSEUDO_MAP_FD || 319 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { 320 was_ld_map = true; 321 dst[i].imm = 0; 322 } else if (was_ld_map && 323 dst[i].code == 0 && 324 dst[i].dst_reg == 0 && 325 dst[i].src_reg == 0 && 326 dst[i].off == 0) { 327 was_ld_map = false; 328 dst[i].imm = 0; 329 } else { 330 was_ld_map = false; 331 } 332 } 333 sha256((u8 *)dst, size, fp->digest); 334 vfree(dst); 335 return 0; 336 } 337 338 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, 339 s32 end_new, s32 curr, const bool probe_pass) 340 { 341 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 342 s32 delta = end_new - end_old; 343 s64 imm = insn->imm; 344 345 if (curr < pos && curr + imm + 1 >= end_old) 346 imm += delta; 347 else if (curr >= end_new && curr + imm + 1 < end_new) 348 imm -= delta; 349 if (imm < imm_min || imm > imm_max) 350 return -ERANGE; 351 if (!probe_pass) 352 insn->imm = imm; 353 return 0; 354 } 355 356 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, 357 s32 end_new, s32 curr, const bool probe_pass) 358 { 359 s64 off_min, off_max, off; 360 s32 delta = end_new - end_old; 361 362 if (insn->code == (BPF_JMP32 | BPF_JA)) { 363 off = insn->imm; 364 off_min = S32_MIN; 365 off_max = S32_MAX; 366 } else { 367 off = insn->off; 368 off_min = S16_MIN; 369 off_max = S16_MAX; 370 } 371 372 if (curr < pos && curr + off + 1 >= end_old) 373 off += delta; 374 else if (curr >= end_new && curr + off + 1 < end_new) 375 off -= delta; 376 if (off < off_min || off > off_max) 377 return -ERANGE; 378 if (!probe_pass) { 379 if (insn->code == (BPF_JMP32 | BPF_JA)) 380 insn->imm = off; 381 else 382 insn->off = off; 383 } 384 return 0; 385 } 386 387 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, 388 s32 end_new, const bool probe_pass) 389 { 390 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); 391 struct bpf_insn *insn = prog->insnsi; 392 int ret = 0; 393 394 for (i = 0; i < insn_cnt; i++, insn++) { 395 u8 code; 396 397 /* In the probing pass we still operate on the original, 398 * unpatched image in order to check overflows before we 399 * do any other adjustments. Therefore skip the patchlet. 400 */ 401 if (probe_pass && i == pos) { 402 i = end_new; 403 insn = prog->insnsi + end_old; 404 } 405 if (bpf_pseudo_func(insn)) { 406 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 407 end_new, i, probe_pass); 408 if (ret) 409 return ret; 410 continue; 411 } 412 code = insn->code; 413 if ((BPF_CLASS(code) != BPF_JMP && 414 BPF_CLASS(code) != BPF_JMP32) || 415 BPF_OP(code) == BPF_EXIT) 416 continue; 417 /* Adjust offset of jmps if we cross patch boundaries. */ 418 if (BPF_OP(code) == BPF_CALL) { 419 if (insn->src_reg != BPF_PSEUDO_CALL) 420 continue; 421 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 422 end_new, i, probe_pass); 423 } else { 424 ret = bpf_adj_delta_to_off(insn, pos, end_old, 425 end_new, i, probe_pass); 426 } 427 if (ret) 428 break; 429 } 430 431 return ret; 432 } 433 434 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta) 435 { 436 struct bpf_line_info *linfo; 437 u32 i, nr_linfo; 438 439 nr_linfo = prog->aux->nr_linfo; 440 if (!nr_linfo || !delta) 441 return; 442 443 linfo = prog->aux->linfo; 444 445 for (i = 0; i < nr_linfo; i++) 446 if (off < linfo[i].insn_off) 447 break; 448 449 /* Push all off < linfo[i].insn_off by delta */ 450 for (; i < nr_linfo; i++) 451 linfo[i].insn_off += delta; 452 } 453 454 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 455 const struct bpf_insn *patch, u32 len) 456 { 457 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 458 const u32 cnt_max = S16_MAX; 459 struct bpf_prog *prog_adj; 460 int err; 461 462 /* Since our patchlet doesn't expand the image, we're done. */ 463 if (insn_delta == 0) { 464 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 465 return prog; 466 } 467 468 insn_adj_cnt = prog->len + insn_delta; 469 470 /* Reject anything that would potentially let the insn->off 471 * target overflow when we have excessive program expansions. 472 * We need to probe here before we do any reallocation where 473 * we afterwards may not fail anymore. 474 */ 475 if (insn_adj_cnt > cnt_max && 476 (err = bpf_adj_branches(prog, off, off + 1, off + len, true))) 477 return ERR_PTR(err); 478 479 /* Several new instructions need to be inserted. Make room 480 * for them. Likely, there's no need for a new allocation as 481 * last page could have large enough tailroom. 482 */ 483 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 484 GFP_USER); 485 if (!prog_adj) 486 return ERR_PTR(-ENOMEM); 487 488 prog_adj->len = insn_adj_cnt; 489 490 /* Patching happens in 3 steps: 491 * 492 * 1) Move over tail of insnsi from next instruction onwards, 493 * so we can patch the single target insn with one or more 494 * new ones (patching is always from 1 to n insns, n > 0). 495 * 2) Inject new instructions at the target location. 496 * 3) Adjust branch offsets if necessary. 497 */ 498 insn_rest = insn_adj_cnt - off - len; 499 500 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 501 sizeof(*patch) * insn_rest); 502 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 503 504 /* We are guaranteed to not fail at this point, otherwise 505 * the ship has sailed to reverse to the original state. An 506 * overflow cannot happen at this point. 507 */ 508 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false)); 509 510 bpf_adj_linfo(prog_adj, off, insn_delta); 511 512 return prog_adj; 513 } 514 515 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) 516 { 517 int err; 518 519 /* Branch offsets can't overflow when program is shrinking, no need 520 * to call bpf_adj_branches(..., true) here 521 */ 522 memmove(prog->insnsi + off, prog->insnsi + off + cnt, 523 sizeof(struct bpf_insn) * (prog->len - off - cnt)); 524 prog->len -= cnt; 525 526 err = bpf_adj_branches(prog, off, off + cnt, off, false); 527 WARN_ON_ONCE(err); 528 return err; 529 } 530 531 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 532 { 533 int i; 534 535 for (i = 0; i < fp->aux->real_func_cnt; i++) 536 bpf_prog_kallsyms_del(fp->aux->func[i]); 537 } 538 539 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 540 { 541 bpf_prog_kallsyms_del_subprogs(fp); 542 bpf_prog_kallsyms_del(fp); 543 } 544 545 #ifdef CONFIG_BPF_JIT 546 /* All BPF JIT sysctl knobs here. */ 547 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 548 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 549 int bpf_jit_harden __read_mostly; 550 long bpf_jit_limit __read_mostly; 551 long bpf_jit_limit_max __read_mostly; 552 553 static void 554 bpf_prog_ksym_set_addr(struct bpf_prog *prog) 555 { 556 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 557 558 prog->aux->ksym.start = (unsigned long) prog->bpf_func; 559 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len; 560 } 561 562 static void 563 bpf_prog_ksym_set_name(struct bpf_prog *prog) 564 { 565 char *sym = prog->aux->ksym.name; 566 const char *end = sym + KSYM_NAME_LEN; 567 const struct btf_type *type; 568 const char *func_name; 569 570 BUILD_BUG_ON(sizeof("bpf_prog_") + 571 sizeof(prog->tag) * 2 + 572 /* name has been null terminated. 573 * We should need +1 for the '_' preceding 574 * the name. However, the null character 575 * is double counted between the name and the 576 * sizeof("bpf_prog_") above, so we omit 577 * the +1 here. 578 */ 579 sizeof(prog->aux->name) > KSYM_NAME_LEN); 580 581 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 582 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 583 584 /* prog->aux->name will be ignored if full btf name is available */ 585 if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) { 586 type = btf_type_by_id(prog->aux->btf, 587 prog->aux->func_info[prog->aux->func_idx].type_id); 588 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); 589 snprintf(sym, (size_t)(end - sym), "_%s", func_name); 590 return; 591 } 592 593 if (prog->aux->name[0]) 594 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 595 else 596 *sym = 0; 597 } 598 599 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n) 600 { 601 return container_of(n, struct bpf_ksym, tnode)->start; 602 } 603 604 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 605 struct latch_tree_node *b) 606 { 607 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b); 608 } 609 610 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 611 { 612 unsigned long val = (unsigned long)key; 613 const struct bpf_ksym *ksym; 614 615 ksym = container_of(n, struct bpf_ksym, tnode); 616 617 if (val < ksym->start) 618 return -1; 619 /* Ensure that we detect return addresses as part of the program, when 620 * the final instruction is a call for a program part of the stack 621 * trace. Therefore, do val > ksym->end instead of val >= ksym->end. 622 */ 623 if (val > ksym->end) 624 return 1; 625 626 return 0; 627 } 628 629 static const struct latch_tree_ops bpf_tree_ops = { 630 .less = bpf_tree_less, 631 .comp = bpf_tree_comp, 632 }; 633 634 static DEFINE_SPINLOCK(bpf_lock); 635 static LIST_HEAD(bpf_kallsyms); 636 static struct latch_tree_root bpf_tree __cacheline_aligned; 637 638 void bpf_ksym_add(struct bpf_ksym *ksym) 639 { 640 spin_lock_bh(&bpf_lock); 641 WARN_ON_ONCE(!list_empty(&ksym->lnode)); 642 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms); 643 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 644 spin_unlock_bh(&bpf_lock); 645 } 646 647 static void __bpf_ksym_del(struct bpf_ksym *ksym) 648 { 649 if (list_empty(&ksym->lnode)) 650 return; 651 652 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 653 list_del_rcu(&ksym->lnode); 654 } 655 656 void bpf_ksym_del(struct bpf_ksym *ksym) 657 { 658 spin_lock_bh(&bpf_lock); 659 __bpf_ksym_del(ksym); 660 spin_unlock_bh(&bpf_lock); 661 } 662 663 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 664 { 665 return fp->jited && !bpf_prog_was_classic(fp); 666 } 667 668 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 669 { 670 if (!bpf_prog_kallsyms_candidate(fp) || 671 !bpf_token_capable(fp->aux->token, CAP_BPF)) 672 return; 673 674 bpf_prog_ksym_set_addr(fp); 675 bpf_prog_ksym_set_name(fp); 676 fp->aux->ksym.prog = true; 677 678 bpf_ksym_add(&fp->aux->ksym); 679 680 #ifdef CONFIG_FINEIBT 681 /* 682 * When FineIBT, code in the __cfi_foo() symbols can get executed 683 * and hence unwinder needs help. 684 */ 685 if (cfi_mode != CFI_FINEIBT) 686 return; 687 688 snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN, 689 "__cfi_%s", fp->aux->ksym.name); 690 691 fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16; 692 fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func; 693 694 bpf_ksym_add(&fp->aux->ksym_prefix); 695 #endif 696 } 697 698 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 699 { 700 if (!bpf_prog_kallsyms_candidate(fp)) 701 return; 702 703 bpf_ksym_del(&fp->aux->ksym); 704 #ifdef CONFIG_FINEIBT 705 if (cfi_mode != CFI_FINEIBT) 706 return; 707 bpf_ksym_del(&fp->aux->ksym_prefix); 708 #endif 709 } 710 711 static struct bpf_ksym *bpf_ksym_find(unsigned long addr) 712 { 713 struct latch_tree_node *n; 714 715 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 716 return n ? container_of(n, struct bpf_ksym, tnode) : NULL; 717 } 718 719 int bpf_address_lookup(unsigned long addr, unsigned long *size, 720 unsigned long *off, char *sym) 721 { 722 struct bpf_ksym *ksym; 723 int ret = 0; 724 725 rcu_read_lock(); 726 ksym = bpf_ksym_find(addr); 727 if (ksym) { 728 unsigned long symbol_start = ksym->start; 729 unsigned long symbol_end = ksym->end; 730 731 ret = strscpy(sym, ksym->name, KSYM_NAME_LEN); 732 733 if (size) 734 *size = symbol_end - symbol_start; 735 if (off) 736 *off = addr - symbol_start; 737 } 738 rcu_read_unlock(); 739 740 return ret; 741 } 742 743 bool is_bpf_text_address(unsigned long addr) 744 { 745 bool ret; 746 747 rcu_read_lock(); 748 ret = bpf_ksym_find(addr) != NULL; 749 rcu_read_unlock(); 750 751 return ret; 752 } 753 754 struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) 755 { 756 struct bpf_ksym *ksym; 757 758 WARN_ON_ONCE(!rcu_read_lock_held()); 759 ksym = bpf_ksym_find(addr); 760 761 return ksym && ksym->prog ? 762 container_of(ksym, struct bpf_prog_aux, ksym)->prog : 763 NULL; 764 } 765 766 bool bpf_has_frame_pointer(unsigned long ip) 767 { 768 struct bpf_ksym *ksym; 769 unsigned long offset; 770 771 guard(rcu)(); 772 773 ksym = bpf_ksym_find(ip); 774 if (!ksym || !ksym->fp_start || !ksym->fp_end) 775 return false; 776 777 offset = ip - ksym->start; 778 779 return offset >= ksym->fp_start && offset < ksym->fp_end; 780 } 781 782 const struct exception_table_entry *search_bpf_extables(unsigned long addr) 783 { 784 const struct exception_table_entry *e = NULL; 785 struct bpf_prog *prog; 786 787 rcu_read_lock(); 788 prog = bpf_prog_ksym_find(addr); 789 if (!prog) 790 goto out; 791 if (!prog->aux->num_exentries) 792 goto out; 793 794 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); 795 out: 796 rcu_read_unlock(); 797 return e; 798 } 799 800 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 801 char *sym) 802 { 803 struct bpf_ksym *ksym; 804 unsigned int it = 0; 805 int ret = -ERANGE; 806 807 if (!bpf_jit_kallsyms_enabled()) 808 return ret; 809 810 rcu_read_lock(); 811 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) { 812 if (it++ != symnum) 813 continue; 814 815 strscpy(sym, ksym->name, KSYM_NAME_LEN); 816 817 *value = ksym->start; 818 *type = BPF_SYM_ELF_TYPE; 819 820 ret = 0; 821 break; 822 } 823 rcu_read_unlock(); 824 825 return ret; 826 } 827 828 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, 829 struct bpf_jit_poke_descriptor *poke) 830 { 831 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 832 static const u32 poke_tab_max = 1024; 833 u32 slot = prog->aux->size_poke_tab; 834 u32 size = slot + 1; 835 836 if (size > poke_tab_max) 837 return -ENOSPC; 838 if (poke->tailcall_target || poke->tailcall_target_stable || 839 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr) 840 return -EINVAL; 841 842 switch (poke->reason) { 843 case BPF_POKE_REASON_TAIL_CALL: 844 if (!poke->tail_call.map) 845 return -EINVAL; 846 break; 847 default: 848 return -EINVAL; 849 } 850 851 tab = krealloc_array(tab, size, sizeof(*poke), GFP_KERNEL); 852 if (!tab) 853 return -ENOMEM; 854 855 memcpy(&tab[slot], poke, sizeof(*poke)); 856 prog->aux->size_poke_tab = size; 857 prog->aux->poke_tab = tab; 858 859 return slot; 860 } 861 862 /* 863 * BPF program pack allocator. 864 * 865 * Most BPF programs are pretty small. Allocating a hole page for each 866 * program is sometime a waste. Many small bpf program also adds pressure 867 * to instruction TLB. To solve this issue, we introduce a BPF program pack 868 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86) 869 * to host BPF programs. 870 */ 871 #define BPF_PROG_CHUNK_SHIFT 6 872 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT) 873 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1)) 874 875 struct bpf_prog_pack { 876 struct list_head list; 877 void *ptr; 878 unsigned long bitmap[]; 879 }; 880 881 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size) 882 { 883 memset(area, 0, size); 884 } 885 886 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) 887 888 static DEFINE_MUTEX(pack_mutex); 889 static LIST_HEAD(pack_list); 890 891 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with 892 * CONFIG_MMU=n. Use PAGE_SIZE in these cases. 893 */ 894 #ifdef PMD_SIZE 895 /* PMD_SIZE is really big for some archs. It doesn't make sense to 896 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to 897 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be 898 * greater than or equal to 2MB. 899 */ 900 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes()) 901 #else 902 #define BPF_PROG_PACK_SIZE PAGE_SIZE 903 #endif 904 905 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE) 906 907 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns) 908 { 909 struct bpf_prog_pack *pack; 910 int err; 911 912 pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)); 913 if (!pack) 914 return NULL; 915 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE); 916 if (!pack->ptr) 917 goto out; 918 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); 919 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); 920 921 set_vm_flush_reset_perms(pack->ptr); 922 err = set_memory_rox((unsigned long)pack->ptr, 923 BPF_PROG_PACK_SIZE / PAGE_SIZE); 924 if (err) 925 goto out; 926 list_add_tail(&pack->list, &pack_list); 927 return pack; 928 929 out: 930 bpf_jit_free_exec(pack->ptr); 931 kfree(pack); 932 return NULL; 933 } 934 935 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) 936 { 937 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size); 938 struct bpf_prog_pack *pack; 939 unsigned long pos; 940 void *ptr = NULL; 941 942 mutex_lock(&pack_mutex); 943 if (size > BPF_PROG_PACK_SIZE) { 944 size = round_up(size, PAGE_SIZE); 945 ptr = bpf_jit_alloc_exec(size); 946 if (ptr) { 947 int err; 948 949 bpf_fill_ill_insns(ptr, size); 950 set_vm_flush_reset_perms(ptr); 951 err = set_memory_rox((unsigned long)ptr, 952 size / PAGE_SIZE); 953 if (err) { 954 bpf_jit_free_exec(ptr); 955 ptr = NULL; 956 } 957 } 958 goto out; 959 } 960 list_for_each_entry(pack, &pack_list, list) { 961 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, 962 nbits, 0); 963 if (pos < BPF_PROG_CHUNK_COUNT) 964 goto found_free_area; 965 } 966 967 pack = alloc_new_pack(bpf_fill_ill_insns); 968 if (!pack) 969 goto out; 970 971 pos = 0; 972 973 found_free_area: 974 bitmap_set(pack->bitmap, pos, nbits); 975 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT); 976 977 out: 978 mutex_unlock(&pack_mutex); 979 return ptr; 980 } 981 982 void bpf_prog_pack_free(void *ptr, u32 size) 983 { 984 struct bpf_prog_pack *pack = NULL, *tmp; 985 unsigned int nbits; 986 unsigned long pos; 987 988 mutex_lock(&pack_mutex); 989 if (size > BPF_PROG_PACK_SIZE) { 990 bpf_jit_free_exec(ptr); 991 goto out; 992 } 993 994 list_for_each_entry(tmp, &pack_list, list) { 995 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) { 996 pack = tmp; 997 break; 998 } 999 } 1000 1001 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n")) 1002 goto out; 1003 1004 nbits = BPF_PROG_SIZE_TO_NBITS(size); 1005 pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT; 1006 1007 WARN_ONCE(bpf_arch_text_invalidate(ptr, size), 1008 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n"); 1009 1010 bitmap_clear(pack->bitmap, pos, nbits); 1011 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, 1012 BPF_PROG_CHUNK_COUNT, 0) == 0) { 1013 list_del(&pack->list); 1014 bpf_jit_free_exec(pack->ptr); 1015 kfree(pack); 1016 } 1017 out: 1018 mutex_unlock(&pack_mutex); 1019 } 1020 1021 static atomic_long_t bpf_jit_current; 1022 1023 /* Can be overridden by an arch's JIT compiler if it has a custom, 1024 * dedicated BPF backend memory area, or if neither of the two 1025 * below apply. 1026 */ 1027 u64 __weak bpf_jit_alloc_exec_limit(void) 1028 { 1029 #if defined(MODULES_VADDR) 1030 return MODULES_END - MODULES_VADDR; 1031 #else 1032 return VMALLOC_END - VMALLOC_START; 1033 #endif 1034 } 1035 1036 static int __init bpf_jit_charge_init(void) 1037 { 1038 /* Only used as heuristic here to derive limit. */ 1039 bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); 1040 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1, 1041 PAGE_SIZE), LONG_MAX); 1042 return 0; 1043 } 1044 pure_initcall(bpf_jit_charge_init); 1045 1046 int bpf_jit_charge_modmem(u32 size) 1047 { 1048 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) { 1049 if (!bpf_capable()) { 1050 atomic_long_sub(size, &bpf_jit_current); 1051 return -EPERM; 1052 } 1053 } 1054 1055 return 0; 1056 } 1057 1058 void bpf_jit_uncharge_modmem(u32 size) 1059 { 1060 atomic_long_sub(size, &bpf_jit_current); 1061 } 1062 1063 void *__weak bpf_jit_alloc_exec(unsigned long size) 1064 { 1065 return execmem_alloc(EXECMEM_BPF, size); 1066 } 1067 1068 void __weak bpf_jit_free_exec(void *addr) 1069 { 1070 execmem_free(addr); 1071 } 1072 1073 struct bpf_binary_header * 1074 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 1075 unsigned int alignment, 1076 bpf_jit_fill_hole_t bpf_fill_ill_insns) 1077 { 1078 struct bpf_binary_header *hdr; 1079 u32 size, hole, start; 1080 1081 WARN_ON_ONCE(!is_power_of_2(alignment) || 1082 alignment > BPF_IMAGE_ALIGNMENT); 1083 1084 /* Most of BPF filters are really small, but if some of them 1085 * fill a page, allow at least 128 extra bytes to insert a 1086 * random section of illegal instructions. 1087 */ 1088 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 1089 1090 if (bpf_jit_charge_modmem(size)) 1091 return NULL; 1092 hdr = bpf_jit_alloc_exec(size); 1093 if (!hdr) { 1094 bpf_jit_uncharge_modmem(size); 1095 return NULL; 1096 } 1097 1098 /* Fill space with illegal/arch-dep instructions. */ 1099 bpf_fill_ill_insns(hdr, size); 1100 1101 hdr->size = size; 1102 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 1103 PAGE_SIZE - sizeof(*hdr)); 1104 start = get_random_u32_below(hole) & ~(alignment - 1); 1105 1106 /* Leave a random number of instructions before BPF code. */ 1107 *image_ptr = &hdr->image[start]; 1108 1109 return hdr; 1110 } 1111 1112 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 1113 { 1114 u32 size = hdr->size; 1115 1116 bpf_jit_free_exec(hdr); 1117 bpf_jit_uncharge_modmem(size); 1118 } 1119 1120 /* Allocate jit binary from bpf_prog_pack allocator. 1121 * Since the allocated memory is RO+X, the JIT engine cannot write directly 1122 * to the memory. To solve this problem, a RW buffer is also allocated at 1123 * as the same time. The JIT engine should calculate offsets based on the 1124 * RO memory address, but write JITed program to the RW buffer. Once the 1125 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies 1126 * the JITed program to the RO memory. 1127 */ 1128 struct bpf_binary_header * 1129 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr, 1130 unsigned int alignment, 1131 struct bpf_binary_header **rw_header, 1132 u8 **rw_image, 1133 bpf_jit_fill_hole_t bpf_fill_ill_insns) 1134 { 1135 struct bpf_binary_header *ro_header; 1136 u32 size, hole, start; 1137 1138 WARN_ON_ONCE(!is_power_of_2(alignment) || 1139 alignment > BPF_IMAGE_ALIGNMENT); 1140 1141 /* add 16 bytes for a random section of illegal instructions */ 1142 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE); 1143 1144 if (bpf_jit_charge_modmem(size)) 1145 return NULL; 1146 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns); 1147 if (!ro_header) { 1148 bpf_jit_uncharge_modmem(size); 1149 return NULL; 1150 } 1151 1152 *rw_header = kvmalloc(size, GFP_KERNEL); 1153 if (!*rw_header) { 1154 bpf_prog_pack_free(ro_header, size); 1155 bpf_jit_uncharge_modmem(size); 1156 return NULL; 1157 } 1158 1159 /* Fill space with illegal/arch-dep instructions. */ 1160 bpf_fill_ill_insns(*rw_header, size); 1161 (*rw_header)->size = size; 1162 1163 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)), 1164 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header)); 1165 start = get_random_u32_below(hole) & ~(alignment - 1); 1166 1167 *image_ptr = &ro_header->image[start]; 1168 *rw_image = &(*rw_header)->image[start]; 1169 1170 return ro_header; 1171 } 1172 1173 /* Copy JITed text from rw_header to its final location, the ro_header. */ 1174 int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header, 1175 struct bpf_binary_header *rw_header) 1176 { 1177 void *ptr; 1178 1179 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size); 1180 1181 kvfree(rw_header); 1182 1183 if (IS_ERR(ptr)) { 1184 bpf_prog_pack_free(ro_header, ro_header->size); 1185 return PTR_ERR(ptr); 1186 } 1187 return 0; 1188 } 1189 1190 /* bpf_jit_binary_pack_free is called in two different scenarios: 1191 * 1) when the program is freed after; 1192 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize). 1193 * For case 2), we need to free both the RO memory and the RW buffer. 1194 * 1195 * bpf_jit_binary_pack_free requires proper ro_header->size. However, 1196 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size 1197 * must be set with either bpf_jit_binary_pack_finalize (normal path) or 1198 * bpf_arch_text_copy (when jit fails). 1199 */ 1200 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, 1201 struct bpf_binary_header *rw_header) 1202 { 1203 u32 size = ro_header->size; 1204 1205 bpf_prog_pack_free(ro_header, size); 1206 kvfree(rw_header); 1207 bpf_jit_uncharge_modmem(size); 1208 } 1209 1210 struct bpf_binary_header * 1211 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp) 1212 { 1213 unsigned long real_start = (unsigned long)fp->bpf_func; 1214 unsigned long addr; 1215 1216 addr = real_start & BPF_PROG_CHUNK_MASK; 1217 return (void *)addr; 1218 } 1219 1220 static inline struct bpf_binary_header * 1221 bpf_jit_binary_hdr(const struct bpf_prog *fp) 1222 { 1223 unsigned long real_start = (unsigned long)fp->bpf_func; 1224 unsigned long addr; 1225 1226 addr = real_start & PAGE_MASK; 1227 return (void *)addr; 1228 } 1229 1230 /* This symbol is only overridden by archs that have different 1231 * requirements than the usual eBPF JITs, f.e. when they only 1232 * implement cBPF JIT, do not set images read-only, etc. 1233 */ 1234 void __weak bpf_jit_free(struct bpf_prog *fp) 1235 { 1236 if (fp->jited) { 1237 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 1238 1239 bpf_jit_binary_free(hdr); 1240 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 1241 } 1242 1243 bpf_prog_unlock_free(fp); 1244 } 1245 1246 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 1247 const struct bpf_insn *insn, bool extra_pass, 1248 u64 *func_addr, bool *func_addr_fixed) 1249 { 1250 s16 off = insn->off; 1251 s32 imm = insn->imm; 1252 u8 *addr; 1253 int err; 1254 1255 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 1256 if (!*func_addr_fixed) { 1257 /* Place-holder address till the last pass has collected 1258 * all addresses for JITed subprograms in which case we 1259 * can pick them up from prog->aux. 1260 */ 1261 if (!extra_pass) 1262 addr = NULL; 1263 else if (prog->aux->func && 1264 off >= 0 && off < prog->aux->real_func_cnt) 1265 addr = (u8 *)prog->aux->func[off]->bpf_func; 1266 else 1267 return -EINVAL; 1268 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && 1269 bpf_jit_supports_far_kfunc_call()) { 1270 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr); 1271 if (err) 1272 return err; 1273 } else { 1274 /* Address of a BPF helper call. Since part of the core 1275 * kernel, it's always at a fixed location. __bpf_call_base 1276 * and the helper with imm relative to it are both in core 1277 * kernel. 1278 */ 1279 addr = (u8 *)__bpf_call_base + imm; 1280 } 1281 1282 *func_addr = (unsigned long)addr; 1283 return 0; 1284 } 1285 1286 const char *bpf_jit_get_prog_name(struct bpf_prog *prog) 1287 { 1288 if (prog->aux->ksym.prog) 1289 return prog->aux->ksym.name; 1290 return prog->aux->name; 1291 } 1292 1293 static int bpf_jit_blind_insn(const struct bpf_insn *from, 1294 const struct bpf_insn *aux, 1295 struct bpf_insn *to_buff, 1296 bool emit_zext) 1297 { 1298 struct bpf_insn *to = to_buff; 1299 u32 imm_rnd = get_random_u32(); 1300 s16 off; 1301 1302 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 1303 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 1304 1305 /* Constraints on AX register: 1306 * 1307 * AX register is inaccessible from user space. It is mapped in 1308 * all JITs, and used here for constant blinding rewrites. It is 1309 * typically "stateless" meaning its contents are only valid within 1310 * the executed instruction, but not across several instructions. 1311 * There are a few exceptions however which are further detailed 1312 * below. 1313 * 1314 * Constant blinding is only used by JITs, not in the interpreter. 1315 * The interpreter uses AX in some occasions as a local temporary 1316 * register e.g. in DIV or MOD instructions. 1317 * 1318 * In restricted circumstances, the verifier can also use the AX 1319 * register for rewrites as long as they do not interfere with 1320 * the above cases! 1321 */ 1322 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) 1323 goto out; 1324 1325 if (from->imm == 0 && 1326 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 1327 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 1328 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 1329 goto out; 1330 } 1331 1332 switch (from->code) { 1333 case BPF_ALU | BPF_ADD | BPF_K: 1334 case BPF_ALU | BPF_SUB | BPF_K: 1335 case BPF_ALU | BPF_AND | BPF_K: 1336 case BPF_ALU | BPF_OR | BPF_K: 1337 case BPF_ALU | BPF_XOR | BPF_K: 1338 case BPF_ALU | BPF_MUL | BPF_K: 1339 case BPF_ALU | BPF_MOV | BPF_K: 1340 case BPF_ALU | BPF_DIV | BPF_K: 1341 case BPF_ALU | BPF_MOD | BPF_K: 1342 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1343 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1344 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off); 1345 break; 1346 1347 case BPF_ALU64 | BPF_ADD | BPF_K: 1348 case BPF_ALU64 | BPF_SUB | BPF_K: 1349 case BPF_ALU64 | BPF_AND | BPF_K: 1350 case BPF_ALU64 | BPF_OR | BPF_K: 1351 case BPF_ALU64 | BPF_XOR | BPF_K: 1352 case BPF_ALU64 | BPF_MUL | BPF_K: 1353 case BPF_ALU64 | BPF_MOV | BPF_K: 1354 case BPF_ALU64 | BPF_DIV | BPF_K: 1355 case BPF_ALU64 | BPF_MOD | BPF_K: 1356 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1357 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1358 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off); 1359 break; 1360 1361 case BPF_JMP | BPF_JEQ | BPF_K: 1362 case BPF_JMP | BPF_JNE | BPF_K: 1363 case BPF_JMP | BPF_JGT | BPF_K: 1364 case BPF_JMP | BPF_JLT | BPF_K: 1365 case BPF_JMP | BPF_JGE | BPF_K: 1366 case BPF_JMP | BPF_JLE | BPF_K: 1367 case BPF_JMP | BPF_JSGT | BPF_K: 1368 case BPF_JMP | BPF_JSLT | BPF_K: 1369 case BPF_JMP | BPF_JSGE | BPF_K: 1370 case BPF_JMP | BPF_JSLE | BPF_K: 1371 case BPF_JMP | BPF_JSET | BPF_K: 1372 /* Accommodate for extra offset in case of a backjump. */ 1373 off = from->off; 1374 if (off < 0) 1375 off -= 2; 1376 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1377 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1378 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 1379 break; 1380 1381 case BPF_JMP32 | BPF_JEQ | BPF_K: 1382 case BPF_JMP32 | BPF_JNE | BPF_K: 1383 case BPF_JMP32 | BPF_JGT | BPF_K: 1384 case BPF_JMP32 | BPF_JLT | BPF_K: 1385 case BPF_JMP32 | BPF_JGE | BPF_K: 1386 case BPF_JMP32 | BPF_JLE | BPF_K: 1387 case BPF_JMP32 | BPF_JSGT | BPF_K: 1388 case BPF_JMP32 | BPF_JSLT | BPF_K: 1389 case BPF_JMP32 | BPF_JSGE | BPF_K: 1390 case BPF_JMP32 | BPF_JSLE | BPF_K: 1391 case BPF_JMP32 | BPF_JSET | BPF_K: 1392 /* Accommodate for extra offset in case of a backjump. */ 1393 off = from->off; 1394 if (off < 0) 1395 off -= 2; 1396 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1397 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1398 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, 1399 off); 1400 break; 1401 1402 case BPF_LD | BPF_IMM | BPF_DW: 1403 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 1404 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1405 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 1406 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 1407 break; 1408 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1409 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1410 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1411 if (emit_zext) 1412 *to++ = BPF_ZEXT_REG(BPF_REG_AX); 1413 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1414 break; 1415 1416 case BPF_ST | BPF_MEM | BPF_DW: 1417 case BPF_ST | BPF_MEM | BPF_W: 1418 case BPF_ST | BPF_MEM | BPF_H: 1419 case BPF_ST | BPF_MEM | BPF_B: 1420 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1421 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1422 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 1423 break; 1424 1425 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: 1426 case BPF_ST | BPF_PROBE_MEM32 | BPF_W: 1427 case BPF_ST | BPF_PROBE_MEM32 | BPF_H: 1428 case BPF_ST | BPF_PROBE_MEM32 | BPF_B: 1429 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ 1430 from->imm); 1431 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1432 /* 1433 * Cannot use BPF_STX_MEM() macro here as it 1434 * hardcodes BPF_MEM mode, losing PROBE_MEM32 1435 * and breaking arena addressing in the JIT. 1436 */ 1437 *to++ = (struct bpf_insn) { 1438 .code = BPF_STX | BPF_PROBE_MEM32 | 1439 BPF_SIZE(from->code), 1440 .dst_reg = from->dst_reg, 1441 .src_reg = BPF_REG_AX, 1442 .off = from->off, 1443 }; 1444 break; 1445 } 1446 out: 1447 return to - to_buff; 1448 } 1449 1450 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 1451 gfp_t gfp_extra_flags) 1452 { 1453 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 1454 struct bpf_prog *fp; 1455 1456 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); 1457 if (fp != NULL) { 1458 /* aux->prog still points to the fp_other one, so 1459 * when promoting the clone to the real program, 1460 * this still needs to be adapted. 1461 */ 1462 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 1463 } 1464 1465 return fp; 1466 } 1467 1468 static void bpf_prog_clone_free(struct bpf_prog *fp) 1469 { 1470 /* aux was stolen by the other clone, so we cannot free 1471 * it from this path! It will be freed eventually by the 1472 * other program on release. 1473 * 1474 * At this point, we don't need a deferred release since 1475 * clone is guaranteed to not be locked. 1476 */ 1477 fp->aux = NULL; 1478 fp->stats = NULL; 1479 fp->active = NULL; 1480 __bpf_prog_free(fp); 1481 } 1482 1483 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 1484 { 1485 /* We have to repoint aux->prog to self, as we don't 1486 * know whether fp here is the clone or the original. 1487 */ 1488 fp->aux->prog = fp; 1489 if (fp->aux->offload) 1490 fp->aux->offload->prog = fp; 1491 bpf_prog_clone_free(fp_other); 1492 } 1493 1494 /* 1495 * Now this function is used only to blind the main prog and must be invoked only when 1496 * bpf_prog_need_blind() returns true. 1497 */ 1498 struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog) 1499 { 1500 struct bpf_insn insn_buff[16], aux[2]; 1501 struct bpf_prog *clone, *tmp; 1502 int insn_delta, insn_cnt; 1503 struct bpf_insn *insn; 1504 int i, rewritten; 1505 1506 if (WARN_ON_ONCE(env && env->prog != prog)) 1507 return ERR_PTR(-EINVAL); 1508 1509 clone = bpf_prog_clone_create(prog, GFP_USER); 1510 if (!clone) 1511 return ERR_PTR(-ENOMEM); 1512 1513 /* make sure bpf_patch_insn_data() patches the correct prog */ 1514 if (env) 1515 env->prog = clone; 1516 1517 insn_cnt = clone->len; 1518 insn = clone->insnsi; 1519 1520 for (i = 0; i < insn_cnt; i++, insn++) { 1521 if (bpf_pseudo_func(insn)) { 1522 /* ld_imm64 with an address of bpf subprog is not 1523 * a user controlled constant. Don't randomize it, 1524 * since it will conflict with jit_subprogs() logic. 1525 */ 1526 insn++; 1527 i++; 1528 continue; 1529 } 1530 1531 /* We temporarily need to hold the original ld64 insn 1532 * so that we can still access the first part in the 1533 * second blinding run. 1534 */ 1535 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 1536 insn[1].code == 0) 1537 memcpy(aux, insn, sizeof(aux)); 1538 1539 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, 1540 clone->aux->verifier_zext); 1541 if (!rewritten) 1542 continue; 1543 1544 if (env) 1545 tmp = bpf_patch_insn_data(env, i, insn_buff, rewritten); 1546 else 1547 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 1548 1549 if (IS_ERR_OR_NULL(tmp)) { 1550 if (env) 1551 /* restore the original prog */ 1552 env->prog = prog; 1553 /* Patching may have repointed aux->prog during 1554 * realloc from the original one, so we need to 1555 * fix it up here on error. 1556 */ 1557 bpf_jit_prog_release_other(prog, clone); 1558 return IS_ERR(tmp) ? tmp : ERR_PTR(-ENOMEM); 1559 } 1560 1561 clone = tmp; 1562 insn_delta = rewritten - 1; 1563 1564 if (env) 1565 env->prog = clone; 1566 1567 /* Walk new program and skip insns we just inserted. */ 1568 insn = clone->insnsi + i + insn_delta; 1569 insn_cnt += insn_delta; 1570 i += insn_delta; 1571 } 1572 1573 clone->blinded = 1; 1574 return clone; 1575 } 1576 #endif /* CONFIG_BPF_JIT */ 1577 1578 /* Base function for offset calculation. Needs to go into .text section, 1579 * therefore keeping it non-static as well; will also be used by JITs 1580 * anyway later on, so do not let the compiler omit it. This also needs 1581 * to go into kallsyms for correlation from e.g. bpftool, so naming 1582 * must not change. 1583 */ 1584 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1585 { 1586 return 0; 1587 } 1588 EXPORT_SYMBOL_GPL(__bpf_call_base); 1589 1590 /* All UAPI available opcodes. */ 1591 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 1592 /* 32 bit ALU operations. */ \ 1593 /* Register based. */ \ 1594 INSN_3(ALU, ADD, X), \ 1595 INSN_3(ALU, SUB, X), \ 1596 INSN_3(ALU, AND, X), \ 1597 INSN_3(ALU, OR, X), \ 1598 INSN_3(ALU, LSH, X), \ 1599 INSN_3(ALU, RSH, X), \ 1600 INSN_3(ALU, XOR, X), \ 1601 INSN_3(ALU, MUL, X), \ 1602 INSN_3(ALU, MOV, X), \ 1603 INSN_3(ALU, ARSH, X), \ 1604 INSN_3(ALU, DIV, X), \ 1605 INSN_3(ALU, MOD, X), \ 1606 INSN_2(ALU, NEG), \ 1607 INSN_3(ALU, END, TO_BE), \ 1608 INSN_3(ALU, END, TO_LE), \ 1609 /* Immediate based. */ \ 1610 INSN_3(ALU, ADD, K), \ 1611 INSN_3(ALU, SUB, K), \ 1612 INSN_3(ALU, AND, K), \ 1613 INSN_3(ALU, OR, K), \ 1614 INSN_3(ALU, LSH, K), \ 1615 INSN_3(ALU, RSH, K), \ 1616 INSN_3(ALU, XOR, K), \ 1617 INSN_3(ALU, MUL, K), \ 1618 INSN_3(ALU, MOV, K), \ 1619 INSN_3(ALU, ARSH, K), \ 1620 INSN_3(ALU, DIV, K), \ 1621 INSN_3(ALU, MOD, K), \ 1622 /* 64 bit ALU operations. */ \ 1623 /* Register based. */ \ 1624 INSN_3(ALU64, ADD, X), \ 1625 INSN_3(ALU64, SUB, X), \ 1626 INSN_3(ALU64, AND, X), \ 1627 INSN_3(ALU64, OR, X), \ 1628 INSN_3(ALU64, LSH, X), \ 1629 INSN_3(ALU64, RSH, X), \ 1630 INSN_3(ALU64, XOR, X), \ 1631 INSN_3(ALU64, MUL, X), \ 1632 INSN_3(ALU64, MOV, X), \ 1633 INSN_3(ALU64, ARSH, X), \ 1634 INSN_3(ALU64, DIV, X), \ 1635 INSN_3(ALU64, MOD, X), \ 1636 INSN_2(ALU64, NEG), \ 1637 INSN_3(ALU64, END, TO_LE), \ 1638 /* Immediate based. */ \ 1639 INSN_3(ALU64, ADD, K), \ 1640 INSN_3(ALU64, SUB, K), \ 1641 INSN_3(ALU64, AND, K), \ 1642 INSN_3(ALU64, OR, K), \ 1643 INSN_3(ALU64, LSH, K), \ 1644 INSN_3(ALU64, RSH, K), \ 1645 INSN_3(ALU64, XOR, K), \ 1646 INSN_3(ALU64, MUL, K), \ 1647 INSN_3(ALU64, MOV, K), \ 1648 INSN_3(ALU64, ARSH, K), \ 1649 INSN_3(ALU64, DIV, K), \ 1650 INSN_3(ALU64, MOD, K), \ 1651 /* Call instruction. */ \ 1652 INSN_2(JMP, CALL), \ 1653 /* Exit instruction. */ \ 1654 INSN_2(JMP, EXIT), \ 1655 /* 32-bit Jump instructions. */ \ 1656 /* Register based. */ \ 1657 INSN_3(JMP32, JEQ, X), \ 1658 INSN_3(JMP32, JNE, X), \ 1659 INSN_3(JMP32, JGT, X), \ 1660 INSN_3(JMP32, JLT, X), \ 1661 INSN_3(JMP32, JGE, X), \ 1662 INSN_3(JMP32, JLE, X), \ 1663 INSN_3(JMP32, JSGT, X), \ 1664 INSN_3(JMP32, JSLT, X), \ 1665 INSN_3(JMP32, JSGE, X), \ 1666 INSN_3(JMP32, JSLE, X), \ 1667 INSN_3(JMP32, JSET, X), \ 1668 /* Immediate based. */ \ 1669 INSN_3(JMP32, JEQ, K), \ 1670 INSN_3(JMP32, JNE, K), \ 1671 INSN_3(JMP32, JGT, K), \ 1672 INSN_3(JMP32, JLT, K), \ 1673 INSN_3(JMP32, JGE, K), \ 1674 INSN_3(JMP32, JLE, K), \ 1675 INSN_3(JMP32, JSGT, K), \ 1676 INSN_3(JMP32, JSLT, K), \ 1677 INSN_3(JMP32, JSGE, K), \ 1678 INSN_3(JMP32, JSLE, K), \ 1679 INSN_3(JMP32, JSET, K), \ 1680 /* Jump instructions. */ \ 1681 /* Register based. */ \ 1682 INSN_3(JMP, JEQ, X), \ 1683 INSN_3(JMP, JNE, X), \ 1684 INSN_3(JMP, JGT, X), \ 1685 INSN_3(JMP, JLT, X), \ 1686 INSN_3(JMP, JGE, X), \ 1687 INSN_3(JMP, JLE, X), \ 1688 INSN_3(JMP, JSGT, X), \ 1689 INSN_3(JMP, JSLT, X), \ 1690 INSN_3(JMP, JSGE, X), \ 1691 INSN_3(JMP, JSLE, X), \ 1692 INSN_3(JMP, JSET, X), \ 1693 /* Immediate based. */ \ 1694 INSN_3(JMP, JEQ, K), \ 1695 INSN_3(JMP, JNE, K), \ 1696 INSN_3(JMP, JGT, K), \ 1697 INSN_3(JMP, JLT, K), \ 1698 INSN_3(JMP, JGE, K), \ 1699 INSN_3(JMP, JLE, K), \ 1700 INSN_3(JMP, JSGT, K), \ 1701 INSN_3(JMP, JSLT, K), \ 1702 INSN_3(JMP, JSGE, K), \ 1703 INSN_3(JMP, JSLE, K), \ 1704 INSN_3(JMP, JSET, K), \ 1705 INSN_2(JMP, JA), \ 1706 INSN_2(JMP32, JA), \ 1707 /* Atomic operations. */ \ 1708 INSN_3(STX, ATOMIC, B), \ 1709 INSN_3(STX, ATOMIC, H), \ 1710 INSN_3(STX, ATOMIC, W), \ 1711 INSN_3(STX, ATOMIC, DW), \ 1712 /* Store instructions. */ \ 1713 /* Register based. */ \ 1714 INSN_3(STX, MEM, B), \ 1715 INSN_3(STX, MEM, H), \ 1716 INSN_3(STX, MEM, W), \ 1717 INSN_3(STX, MEM, DW), \ 1718 /* Immediate based. */ \ 1719 INSN_3(ST, MEM, B), \ 1720 INSN_3(ST, MEM, H), \ 1721 INSN_3(ST, MEM, W), \ 1722 INSN_3(ST, MEM, DW), \ 1723 /* Load instructions. */ \ 1724 /* Register based. */ \ 1725 INSN_3(LDX, MEM, B), \ 1726 INSN_3(LDX, MEM, H), \ 1727 INSN_3(LDX, MEM, W), \ 1728 INSN_3(LDX, MEM, DW), \ 1729 INSN_3(LDX, MEMSX, B), \ 1730 INSN_3(LDX, MEMSX, H), \ 1731 INSN_3(LDX, MEMSX, W), \ 1732 /* Immediate based. */ \ 1733 INSN_3(LD, IMM, DW) 1734 1735 bool bpf_opcode_in_insntable(u8 code) 1736 { 1737 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1738 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1739 static const bool public_insntable[256] = { 1740 [0 ... 255] = false, 1741 /* Now overwrite non-defaults ... */ 1742 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1743 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1744 [BPF_LD | BPF_ABS | BPF_B] = true, 1745 [BPF_LD | BPF_ABS | BPF_H] = true, 1746 [BPF_LD | BPF_ABS | BPF_W] = true, 1747 [BPF_LD | BPF_IND | BPF_B] = true, 1748 [BPF_LD | BPF_IND | BPF_H] = true, 1749 [BPF_LD | BPF_IND | BPF_W] = true, 1750 [BPF_JMP | BPF_JA | BPF_X] = true, 1751 [BPF_JMP | BPF_JCOND] = true, 1752 }; 1753 #undef BPF_INSN_3_TBL 1754 #undef BPF_INSN_2_TBL 1755 return public_insntable[code]; 1756 } 1757 1758 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1759 /* Absolute value of s32 without undefined behavior for S32_MIN */ 1760 static u32 abs_s32(s32 x) 1761 { 1762 return x >= 0 ? (u32)x : -(u32)x; 1763 } 1764 1765 /** 1766 * ___bpf_prog_run - run eBPF program on a given context 1767 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers 1768 * @insn: is the array of eBPF instructions 1769 * 1770 * Decode and execute eBPF instructions. 1771 * 1772 * Return: whatever value is in %BPF_R0 at program exit 1773 */ 1774 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) 1775 { 1776 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1777 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1778 static const void * const jumptable[256] __annotate_jump_table = { 1779 [0 ... 255] = &&default_label, 1780 /* Now overwrite non-defaults ... */ 1781 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1782 /* Non-UAPI available opcodes. */ 1783 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1784 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1785 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC, 1786 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B, 1787 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H, 1788 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W, 1789 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW, 1790 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B, 1791 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H, 1792 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W, 1793 }; 1794 #undef BPF_INSN_3_LBL 1795 #undef BPF_INSN_2_LBL 1796 u32 tail_call_cnt = 0; 1797 1798 #define CONT ({ insn++; goto select_insn; }) 1799 #define CONT_JMP ({ insn++; goto select_insn; }) 1800 1801 select_insn: 1802 goto *jumptable[insn->code]; 1803 1804 /* Explicitly mask the register-based shift amounts with 63 or 31 1805 * to avoid undefined behavior. Normally this won't affect the 1806 * generated code, for example, in case of native 64 bit archs such 1807 * as x86-64 or arm64, the compiler is optimizing the AND away for 1808 * the interpreter. In case of JITs, each of the JIT backends compiles 1809 * the BPF shift operations to machine instructions which produce 1810 * implementation-defined results in such a case; the resulting 1811 * contents of the register may be arbitrary, but program behaviour 1812 * as a whole remains defined. In other words, in case of JIT backends, 1813 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation. 1814 */ 1815 /* ALU (shifts) */ 1816 #define SHT(OPCODE, OP) \ 1817 ALU64_##OPCODE##_X: \ 1818 DST = DST OP (SRC & 63); \ 1819 CONT; \ 1820 ALU_##OPCODE##_X: \ 1821 DST = (u32) DST OP ((u32) SRC & 31); \ 1822 CONT; \ 1823 ALU64_##OPCODE##_K: \ 1824 DST = DST OP IMM; \ 1825 CONT; \ 1826 ALU_##OPCODE##_K: \ 1827 DST = (u32) DST OP (u32) IMM; \ 1828 CONT; 1829 /* ALU (rest) */ 1830 #define ALU(OPCODE, OP) \ 1831 ALU64_##OPCODE##_X: \ 1832 DST = DST OP SRC; \ 1833 CONT; \ 1834 ALU_##OPCODE##_X: \ 1835 DST = (u32) DST OP (u32) SRC; \ 1836 CONT; \ 1837 ALU64_##OPCODE##_K: \ 1838 DST = DST OP IMM; \ 1839 CONT; \ 1840 ALU_##OPCODE##_K: \ 1841 DST = (u32) DST OP (u32) IMM; \ 1842 CONT; 1843 ALU(ADD, +) 1844 ALU(SUB, -) 1845 ALU(AND, &) 1846 ALU(OR, |) 1847 ALU(XOR, ^) 1848 ALU(MUL, *) 1849 SHT(LSH, <<) 1850 SHT(RSH, >>) 1851 #undef SHT 1852 #undef ALU 1853 ALU_NEG: 1854 DST = (u32) -DST; 1855 CONT; 1856 ALU64_NEG: 1857 DST = -DST; 1858 CONT; 1859 ALU_MOV_X: 1860 switch (OFF) { 1861 case 0: 1862 DST = (u32) SRC; 1863 break; 1864 case 8: 1865 DST = (u32)(s8) SRC; 1866 break; 1867 case 16: 1868 DST = (u32)(s16) SRC; 1869 break; 1870 } 1871 CONT; 1872 ALU_MOV_K: 1873 DST = (u32) IMM; 1874 CONT; 1875 ALU64_MOV_X: 1876 switch (OFF) { 1877 case 0: 1878 DST = SRC; 1879 break; 1880 case 8: 1881 DST = (s8) SRC; 1882 break; 1883 case 16: 1884 DST = (s16) SRC; 1885 break; 1886 case 32: 1887 DST = (s32) SRC; 1888 break; 1889 } 1890 CONT; 1891 ALU64_MOV_K: 1892 DST = IMM; 1893 CONT; 1894 LD_IMM_DW: 1895 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1896 insn++; 1897 CONT; 1898 ALU_ARSH_X: 1899 DST = (u64) (u32) (((s32) DST) >> (SRC & 31)); 1900 CONT; 1901 ALU_ARSH_K: 1902 DST = (u64) (u32) (((s32) DST) >> IMM); 1903 CONT; 1904 ALU64_ARSH_X: 1905 (*(s64 *) &DST) >>= (SRC & 63); 1906 CONT; 1907 ALU64_ARSH_K: 1908 (*(s64 *) &DST) >>= IMM; 1909 CONT; 1910 ALU64_MOD_X: 1911 switch (OFF) { 1912 case 0: 1913 div64_u64_rem(DST, SRC, &AX); 1914 DST = AX; 1915 break; 1916 case 1: 1917 AX = div64_s64(DST, SRC); 1918 DST = DST - AX * SRC; 1919 break; 1920 } 1921 CONT; 1922 ALU_MOD_X: 1923 switch (OFF) { 1924 case 0: 1925 AX = (u32) DST; 1926 DST = do_div(AX, (u32) SRC); 1927 break; 1928 case 1: 1929 AX = abs_s32((s32)DST); 1930 AX = do_div(AX, abs_s32((s32)SRC)); 1931 if ((s32)DST < 0) 1932 DST = (u32)-AX; 1933 else 1934 DST = (u32)AX; 1935 break; 1936 } 1937 CONT; 1938 ALU64_MOD_K: 1939 switch (OFF) { 1940 case 0: 1941 div64_u64_rem(DST, IMM, &AX); 1942 DST = AX; 1943 break; 1944 case 1: 1945 AX = div64_s64(DST, IMM); 1946 DST = DST - AX * IMM; 1947 break; 1948 } 1949 CONT; 1950 ALU_MOD_K: 1951 switch (OFF) { 1952 case 0: 1953 AX = (u32) DST; 1954 DST = do_div(AX, (u32) IMM); 1955 break; 1956 case 1: 1957 AX = abs_s32((s32)DST); 1958 AX = do_div(AX, abs_s32((s32)IMM)); 1959 if ((s32)DST < 0) 1960 DST = (u32)-AX; 1961 else 1962 DST = (u32)AX; 1963 break; 1964 } 1965 CONT; 1966 ALU64_DIV_X: 1967 switch (OFF) { 1968 case 0: 1969 DST = div64_u64(DST, SRC); 1970 break; 1971 case 1: 1972 DST = div64_s64(DST, SRC); 1973 break; 1974 } 1975 CONT; 1976 ALU_DIV_X: 1977 switch (OFF) { 1978 case 0: 1979 AX = (u32) DST; 1980 do_div(AX, (u32) SRC); 1981 DST = (u32) AX; 1982 break; 1983 case 1: 1984 AX = abs_s32((s32)DST); 1985 do_div(AX, abs_s32((s32)SRC)); 1986 if (((s32)DST < 0) == ((s32)SRC < 0)) 1987 DST = (u32)AX; 1988 else 1989 DST = (u32)-AX; 1990 break; 1991 } 1992 CONT; 1993 ALU64_DIV_K: 1994 switch (OFF) { 1995 case 0: 1996 DST = div64_u64(DST, IMM); 1997 break; 1998 case 1: 1999 DST = div64_s64(DST, IMM); 2000 break; 2001 } 2002 CONT; 2003 ALU_DIV_K: 2004 switch (OFF) { 2005 case 0: 2006 AX = (u32) DST; 2007 do_div(AX, (u32) IMM); 2008 DST = (u32) AX; 2009 break; 2010 case 1: 2011 AX = abs_s32((s32)DST); 2012 do_div(AX, abs_s32((s32)IMM)); 2013 if (((s32)DST < 0) == ((s32)IMM < 0)) 2014 DST = (u32)AX; 2015 else 2016 DST = (u32)-AX; 2017 break; 2018 } 2019 CONT; 2020 ALU_END_TO_BE: 2021 switch (IMM) { 2022 case 16: 2023 DST = (__force u16) cpu_to_be16(DST); 2024 break; 2025 case 32: 2026 DST = (__force u32) cpu_to_be32(DST); 2027 break; 2028 case 64: 2029 DST = (__force u64) cpu_to_be64(DST); 2030 break; 2031 } 2032 CONT; 2033 ALU_END_TO_LE: 2034 switch (IMM) { 2035 case 16: 2036 DST = (__force u16) cpu_to_le16(DST); 2037 break; 2038 case 32: 2039 DST = (__force u32) cpu_to_le32(DST); 2040 break; 2041 case 64: 2042 DST = (__force u64) cpu_to_le64(DST); 2043 break; 2044 } 2045 CONT; 2046 ALU64_END_TO_LE: 2047 switch (IMM) { 2048 case 16: 2049 DST = (__force u16) __swab16(DST); 2050 break; 2051 case 32: 2052 DST = (__force u32) __swab32(DST); 2053 break; 2054 case 64: 2055 DST = (__force u64) __swab64(DST); 2056 break; 2057 } 2058 CONT; 2059 2060 /* CALL */ 2061 JMP_CALL: 2062 /* Function call scratches BPF_R1-BPF_R5 registers, 2063 * preserves BPF_R6-BPF_R9, and stores return value 2064 * into BPF_R0. 2065 */ 2066 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 2067 BPF_R4, BPF_R5); 2068 CONT; 2069 2070 JMP_CALL_ARGS: 2071 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 2072 BPF_R3, BPF_R4, 2073 BPF_R5, 2074 insn + insn->off + 1); 2075 CONT; 2076 2077 JMP_TAIL_CALL: { 2078 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 2079 struct bpf_array *array = container_of(map, struct bpf_array, map); 2080 struct bpf_prog *prog; 2081 u32 index = BPF_R3; 2082 2083 if (unlikely(index >= array->map.max_entries)) 2084 goto out; 2085 2086 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT)) 2087 goto out; 2088 2089 prog = READ_ONCE(array->ptrs[index]); 2090 if (!prog) 2091 goto out; 2092 2093 tail_call_cnt++; 2094 2095 /* ARG1 at this point is guaranteed to point to CTX from 2096 * the verifier side due to the fact that the tail call is 2097 * handled like a helper, that is, bpf_tail_call_proto, 2098 * where arg1_type is ARG_PTR_TO_CTX. 2099 */ 2100 insn = prog->insnsi; 2101 goto select_insn; 2102 out: 2103 CONT; 2104 } 2105 JMP_JA: 2106 insn += insn->off; 2107 CONT; 2108 JMP32_JA: 2109 insn += insn->imm; 2110 CONT; 2111 JMP_EXIT: 2112 return BPF_R0; 2113 /* JMP */ 2114 #define COND_JMP(SIGN, OPCODE, CMP_OP) \ 2115 JMP_##OPCODE##_X: \ 2116 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ 2117 insn += insn->off; \ 2118 CONT_JMP; \ 2119 } \ 2120 CONT; \ 2121 JMP32_##OPCODE##_X: \ 2122 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ 2123 insn += insn->off; \ 2124 CONT_JMP; \ 2125 } \ 2126 CONT; \ 2127 JMP_##OPCODE##_K: \ 2128 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ 2129 insn += insn->off; \ 2130 CONT_JMP; \ 2131 } \ 2132 CONT; \ 2133 JMP32_##OPCODE##_K: \ 2134 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ 2135 insn += insn->off; \ 2136 CONT_JMP; \ 2137 } \ 2138 CONT; 2139 COND_JMP(u, JEQ, ==) 2140 COND_JMP(u, JNE, !=) 2141 COND_JMP(u, JGT, >) 2142 COND_JMP(u, JLT, <) 2143 COND_JMP(u, JGE, >=) 2144 COND_JMP(u, JLE, <=) 2145 COND_JMP(u, JSET, &) 2146 COND_JMP(s, JSGT, >) 2147 COND_JMP(s, JSLT, <) 2148 COND_JMP(s, JSGE, >=) 2149 COND_JMP(s, JSLE, <=) 2150 #undef COND_JMP 2151 /* ST, STX and LDX*/ 2152 ST_NOSPEC: 2153 /* Speculation barrier for mitigating Speculative Store Bypass, 2154 * Bounds-Check Bypass and Type Confusion. In case of arm64, we 2155 * rely on the firmware mitigation as controlled via the ssbd 2156 * kernel parameter. Whenever the mitigation is enabled, it 2157 * works for all of the kernel code with no need to provide any 2158 * additional instructions here. In case of x86, we use 'lfence' 2159 * insn for mitigation. We reuse preexisting logic from Spectre 2160 * v1 mitigation that happens to produce the required code on 2161 * x86 for v4 as well. 2162 */ 2163 barrier_nospec(); 2164 CONT; 2165 #define LDST(SIZEOP, SIZE) \ 2166 STX_MEM_##SIZEOP: \ 2167 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 2168 CONT; \ 2169 ST_MEM_##SIZEOP: \ 2170 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 2171 CONT; \ 2172 LDX_MEM_##SIZEOP: \ 2173 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 2174 CONT; \ 2175 LDX_PROBE_MEM_##SIZEOP: \ 2176 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \ 2177 (const void *)(long) (SRC + insn->off)); \ 2178 DST = *((SIZE *)&DST); \ 2179 CONT; 2180 2181 LDST(B, u8) 2182 LDST(H, u16) 2183 LDST(W, u32) 2184 LDST(DW, u64) 2185 #undef LDST 2186 2187 #define LDSX(SIZEOP, SIZE) \ 2188 LDX_MEMSX_##SIZEOP: \ 2189 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 2190 CONT; \ 2191 LDX_PROBE_MEMSX_##SIZEOP: \ 2192 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \ 2193 (const void *)(long) (SRC + insn->off)); \ 2194 DST = *((SIZE *)&DST); \ 2195 CONT; 2196 2197 LDSX(B, s8) 2198 LDSX(H, s16) 2199 LDSX(W, s32) 2200 #undef LDSX 2201 2202 #define ATOMIC_ALU_OP(BOP, KOP) \ 2203 case BOP: \ 2204 if (BPF_SIZE(insn->code) == BPF_W) \ 2205 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \ 2206 (DST + insn->off)); \ 2207 else if (BPF_SIZE(insn->code) == BPF_DW) \ 2208 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \ 2209 (DST + insn->off)); \ 2210 else \ 2211 goto default_label; \ 2212 break; \ 2213 case BOP | BPF_FETCH: \ 2214 if (BPF_SIZE(insn->code) == BPF_W) \ 2215 SRC = (u32) atomic_fetch_##KOP( \ 2216 (u32) SRC, \ 2217 (atomic_t *)(unsigned long) (DST + insn->off)); \ 2218 else if (BPF_SIZE(insn->code) == BPF_DW) \ 2219 SRC = (u64) atomic64_fetch_##KOP( \ 2220 (u64) SRC, \ 2221 (atomic64_t *)(unsigned long) (DST + insn->off)); \ 2222 else \ 2223 goto default_label; \ 2224 break; 2225 2226 STX_ATOMIC_DW: 2227 STX_ATOMIC_W: 2228 STX_ATOMIC_H: 2229 STX_ATOMIC_B: 2230 switch (IMM) { 2231 /* Atomic read-modify-write instructions support only W and DW 2232 * size modifiers. 2233 */ 2234 ATOMIC_ALU_OP(BPF_ADD, add) 2235 ATOMIC_ALU_OP(BPF_AND, and) 2236 ATOMIC_ALU_OP(BPF_OR, or) 2237 ATOMIC_ALU_OP(BPF_XOR, xor) 2238 #undef ATOMIC_ALU_OP 2239 2240 case BPF_XCHG: 2241 if (BPF_SIZE(insn->code) == BPF_W) 2242 SRC = (u32) atomic_xchg( 2243 (atomic_t *)(unsigned long) (DST + insn->off), 2244 (u32) SRC); 2245 else if (BPF_SIZE(insn->code) == BPF_DW) 2246 SRC = (u64) atomic64_xchg( 2247 (atomic64_t *)(unsigned long) (DST + insn->off), 2248 (u64) SRC); 2249 else 2250 goto default_label; 2251 break; 2252 case BPF_CMPXCHG: 2253 if (BPF_SIZE(insn->code) == BPF_W) 2254 BPF_R0 = (u32) atomic_cmpxchg( 2255 (atomic_t *)(unsigned long) (DST + insn->off), 2256 (u32) BPF_R0, (u32) SRC); 2257 else if (BPF_SIZE(insn->code) == BPF_DW) 2258 BPF_R0 = (u64) atomic64_cmpxchg( 2259 (atomic64_t *)(unsigned long) (DST + insn->off), 2260 (u64) BPF_R0, (u64) SRC); 2261 else 2262 goto default_label; 2263 break; 2264 /* Atomic load and store instructions support all size 2265 * modifiers. 2266 */ 2267 case BPF_LOAD_ACQ: 2268 switch (BPF_SIZE(insn->code)) { 2269 #define LOAD_ACQUIRE(SIZEOP, SIZE) \ 2270 case BPF_##SIZEOP: \ 2271 DST = (SIZE)smp_load_acquire( \ 2272 (SIZE *)(unsigned long)(SRC + insn->off)); \ 2273 break; 2274 LOAD_ACQUIRE(B, u8) 2275 LOAD_ACQUIRE(H, u16) 2276 LOAD_ACQUIRE(W, u32) 2277 #ifdef CONFIG_64BIT 2278 LOAD_ACQUIRE(DW, u64) 2279 #endif 2280 #undef LOAD_ACQUIRE 2281 default: 2282 goto default_label; 2283 } 2284 break; 2285 case BPF_STORE_REL: 2286 switch (BPF_SIZE(insn->code)) { 2287 #define STORE_RELEASE(SIZEOP, SIZE) \ 2288 case BPF_##SIZEOP: \ 2289 smp_store_release( \ 2290 (SIZE *)(unsigned long)(DST + insn->off), (SIZE)SRC); \ 2291 break; 2292 STORE_RELEASE(B, u8) 2293 STORE_RELEASE(H, u16) 2294 STORE_RELEASE(W, u32) 2295 #ifdef CONFIG_64BIT 2296 STORE_RELEASE(DW, u64) 2297 #endif 2298 #undef STORE_RELEASE 2299 default: 2300 goto default_label; 2301 } 2302 break; 2303 2304 default: 2305 goto default_label; 2306 } 2307 CONT; 2308 2309 default_label: 2310 /* If we ever reach this, we have a bug somewhere. Die hard here 2311 * instead of just returning 0; we could be somewhere in a subprog, 2312 * so execution could continue otherwise which we do /not/ want. 2313 * 2314 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 2315 */ 2316 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n", 2317 insn->code, insn->imm); 2318 BUG_ON(1); 2319 return 0; 2320 } 2321 2322 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 2323 #define DEFINE_BPF_PROG_RUN(stack_size) \ 2324 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 2325 { \ 2326 u64 stack[stack_size / sizeof(u64)]; \ 2327 u64 regs[MAX_BPF_EXT_REG] = {}; \ 2328 \ 2329 kmsan_unpoison_memory(stack, sizeof(stack)); \ 2330 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 2331 ARG1 = (u64) (unsigned long) ctx; \ 2332 return ___bpf_prog_run(regs, insn); \ 2333 } 2334 2335 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 2336 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 2337 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 2338 const struct bpf_insn *insn) \ 2339 { \ 2340 u64 stack[stack_size / sizeof(u64)]; \ 2341 u64 regs[MAX_BPF_EXT_REG]; \ 2342 \ 2343 kmsan_unpoison_memory(stack, sizeof(stack)); \ 2344 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 2345 BPF_R1 = r1; \ 2346 BPF_R2 = r2; \ 2347 BPF_R3 = r3; \ 2348 BPF_R4 = r4; \ 2349 BPF_R5 = r5; \ 2350 return ___bpf_prog_run(regs, insn); \ 2351 } 2352 2353 #define EVAL1(FN, X) FN(X) 2354 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 2355 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 2356 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 2357 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 2358 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 2359 2360 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 2361 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 2362 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 2363 2364 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 2365 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 2366 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 2367 2368 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 2369 2370 static unsigned int (*interpreters[])(const void *ctx, 2371 const struct bpf_insn *insn) = { 2372 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 2373 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 2374 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 2375 }; 2376 #undef PROG_NAME_LIST 2377 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 2378 static __maybe_unused 2379 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 2380 const struct bpf_insn *insn) = { 2381 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 2382 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 2383 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 2384 }; 2385 #undef PROG_NAME_LIST 2386 2387 #ifdef CONFIG_BPF_SYSCALL 2388 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 2389 { 2390 stack_depth = max_t(u32, stack_depth, 1); 2391 insn->off = (s16) insn->imm; 2392 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 2393 __bpf_call_base_args; 2394 insn->code = BPF_JMP | BPF_CALL_ARGS; 2395 } 2396 #endif 2397 #endif 2398 2399 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 2400 const struct bpf_insn *insn) 2401 { 2402 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 2403 * is not working properly, so warn about it! 2404 */ 2405 WARN_ON_ONCE(1); 2406 return 0; 2407 } 2408 2409 static bool __bpf_prog_map_compatible(struct bpf_map *map, 2410 const struct bpf_prog *fp) 2411 { 2412 enum bpf_prog_type prog_type = resolve_prog_type(fp); 2413 struct bpf_prog_aux *aux = fp->aux; 2414 enum bpf_cgroup_storage_type i; 2415 bool ret = false; 2416 u64 cookie; 2417 2418 if (fp->kprobe_override) 2419 return ret; 2420 2421 spin_lock(&map->owner_lock); 2422 /* There's no owner yet where we could check for compatibility. */ 2423 if (!map->owner) { 2424 map->owner = bpf_map_owner_alloc(map); 2425 if (!map->owner) 2426 goto err; 2427 map->owner->type = prog_type; 2428 map->owner->jited = fp->jited; 2429 map->owner->xdp_has_frags = aux->xdp_has_frags; 2430 map->owner->sleepable = fp->sleepable; 2431 map->owner->expected_attach_type = fp->expected_attach_type; 2432 map->owner->attach_func_proto = aux->attach_func_proto; 2433 for_each_cgroup_storage_type(i) { 2434 map->owner->storage_cookie[i] = 2435 aux->cgroup_storage[i] ? 2436 aux->cgroup_storage[i]->cookie : 0; 2437 } 2438 ret = true; 2439 } else { 2440 ret = map->owner->type == prog_type && 2441 map->owner->jited == fp->jited && 2442 map->owner->xdp_has_frags == aux->xdp_has_frags && 2443 map->owner->sleepable == fp->sleepable; 2444 if (ret && 2445 map->map_type == BPF_MAP_TYPE_PROG_ARRAY && 2446 map->owner->expected_attach_type != fp->expected_attach_type) 2447 ret = false; 2448 for_each_cgroup_storage_type(i) { 2449 if (!ret) 2450 break; 2451 cookie = aux->cgroup_storage[i] ? 2452 aux->cgroup_storage[i]->cookie : 0; 2453 ret = map->owner->storage_cookie[i] == cookie || 2454 !cookie; 2455 } 2456 if (ret && 2457 map->owner->attach_func_proto != aux->attach_func_proto) { 2458 switch (prog_type) { 2459 case BPF_PROG_TYPE_TRACING: 2460 case BPF_PROG_TYPE_LSM: 2461 case BPF_PROG_TYPE_EXT: 2462 case BPF_PROG_TYPE_STRUCT_OPS: 2463 ret = false; 2464 break; 2465 default: 2466 break; 2467 } 2468 } 2469 } 2470 err: 2471 spin_unlock(&map->owner_lock); 2472 return ret; 2473 } 2474 2475 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) 2476 { 2477 /* XDP programs inserted into maps are not guaranteed to run on 2478 * a particular netdev (and can run outside driver context entirely 2479 * in the case of devmap and cpumap). Until device checks 2480 * are implemented, prohibit adding dev-bound programs to program maps. 2481 */ 2482 if (bpf_prog_is_dev_bound(fp->aux)) 2483 return false; 2484 2485 return __bpf_prog_map_compatible(map, fp); 2486 } 2487 2488 static int bpf_check_tail_call(const struct bpf_prog *fp) 2489 { 2490 struct bpf_prog_aux *aux = fp->aux; 2491 int i, ret = 0; 2492 2493 mutex_lock(&aux->used_maps_mutex); 2494 for (i = 0; i < aux->used_map_cnt; i++) { 2495 struct bpf_map *map = aux->used_maps[i]; 2496 2497 if (!map_type_contains_progs(map)) 2498 continue; 2499 2500 if (!__bpf_prog_map_compatible(map, fp)) { 2501 ret = -EINVAL; 2502 goto out; 2503 } 2504 } 2505 2506 out: 2507 mutex_unlock(&aux->used_maps_mutex); 2508 return ret; 2509 } 2510 2511 static bool bpf_prog_select_interpreter(struct bpf_prog *fp) 2512 { 2513 bool select_interpreter = false; 2514 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 2515 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 2516 u32 idx = (round_up(stack_depth, 32) / 32) - 1; 2517 2518 /* may_goto may cause stack size > 512, leading to idx out-of-bounds. 2519 * But for non-JITed programs, we don't need bpf_func, so no bounds 2520 * check needed. 2521 */ 2522 if (idx < ARRAY_SIZE(interpreters)) { 2523 fp->bpf_func = interpreters[idx]; 2524 select_interpreter = true; 2525 } else { 2526 fp->bpf_func = __bpf_prog_ret0_warn; 2527 } 2528 #else 2529 fp->bpf_func = __bpf_prog_ret0_warn; 2530 #endif 2531 return select_interpreter; 2532 } 2533 2534 static struct bpf_prog *bpf_prog_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 2535 { 2536 #ifdef CONFIG_BPF_JIT 2537 struct bpf_prog *orig_prog; 2538 struct bpf_insn_aux_data *orig_insn_aux; 2539 2540 if (!bpf_prog_need_blind(prog)) 2541 return bpf_int_jit_compile(env, prog); 2542 2543 if (env) { 2544 /* 2545 * If env is not NULL, we are called from the end of bpf_check(), at this 2546 * point, only insn_aux_data is used after failure, so it should be restored 2547 * on failure. 2548 */ 2549 orig_insn_aux = bpf_dup_insn_aux_data(env); 2550 if (!orig_insn_aux) 2551 return prog; 2552 } 2553 2554 orig_prog = prog; 2555 prog = bpf_jit_blind_constants(env, prog); 2556 /* 2557 * If blinding was requested and we failed during blinding, we must fall 2558 * back to the interpreter. 2559 */ 2560 if (IS_ERR(prog)) 2561 goto out_restore; 2562 2563 prog = bpf_int_jit_compile(env, prog); 2564 if (prog->jited) { 2565 bpf_jit_prog_release_other(prog, orig_prog); 2566 if (env) 2567 vfree(orig_insn_aux); 2568 return prog; 2569 } 2570 2571 bpf_jit_prog_release_other(orig_prog, prog); 2572 2573 out_restore: 2574 prog = orig_prog; 2575 if (env) 2576 bpf_restore_insn_aux_data(env, orig_insn_aux); 2577 #endif 2578 return prog; 2579 } 2580 2581 struct bpf_prog *__bpf_prog_select_runtime(struct bpf_verifier_env *env, struct bpf_prog *fp, 2582 int *err) 2583 { 2584 /* In case of BPF to BPF calls, verifier did all the prep 2585 * work with regards to JITing, etc. 2586 */ 2587 bool jit_needed = false; 2588 2589 if (fp->bpf_func) 2590 goto finalize; 2591 2592 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) || 2593 bpf_prog_has_kfunc_call(fp)) 2594 jit_needed = true; 2595 2596 if (!bpf_prog_select_interpreter(fp)) 2597 jit_needed = true; 2598 2599 /* eBPF JITs can rewrite the program in case constant 2600 * blinding is active. However, in case of error during 2601 * blinding, bpf_int_jit_compile() must always return a 2602 * valid program, which in this case would simply not 2603 * be JITed, but falls back to the interpreter. 2604 */ 2605 if (!bpf_prog_is_offloaded(fp->aux)) { 2606 *err = bpf_prog_alloc_jited_linfo(fp); 2607 if (*err) 2608 return fp; 2609 2610 fp = bpf_prog_jit_compile(env, fp); 2611 bpf_prog_jit_attempt_done(fp); 2612 if (!fp->jited && jit_needed) { 2613 *err = -ENOTSUPP; 2614 return fp; 2615 } 2616 } else { 2617 *err = bpf_prog_offload_compile(fp); 2618 if (*err) 2619 return fp; 2620 } 2621 2622 finalize: 2623 *err = bpf_prog_lock_ro(fp); 2624 if (*err) 2625 return fp; 2626 2627 /* The tail call compatibility check can only be done at 2628 * this late stage as we need to determine, if we deal 2629 * with JITed or non JITed program concatenations and not 2630 * all eBPF JITs might immediately support all features. 2631 */ 2632 *err = bpf_check_tail_call(fp); 2633 2634 return fp; 2635 } 2636 2637 /** 2638 * bpf_prog_select_runtime - select exec runtime for BPF program 2639 * @fp: bpf_prog populated with BPF program 2640 * @err: pointer to error variable 2641 * 2642 * Try to JIT eBPF program, if JIT is not available, use interpreter. 2643 * The BPF program will be executed via bpf_prog_run() function. 2644 * 2645 * Return: the &fp argument along with &err set to 0 for success or 2646 * a negative errno code on failure 2647 */ 2648 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 2649 { 2650 return __bpf_prog_select_runtime(NULL, fp, err); 2651 } 2652 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 2653 2654 static unsigned int __bpf_prog_ret1(const void *ctx, 2655 const struct bpf_insn *insn) 2656 { 2657 return 1; 2658 } 2659 2660 static struct bpf_prog_dummy { 2661 struct bpf_prog prog; 2662 } dummy_bpf_prog = { 2663 .prog = { 2664 .bpf_func = __bpf_prog_ret1, 2665 }, 2666 }; 2667 2668 struct bpf_prog_array bpf_empty_prog_array = { 2669 .items = { 2670 { .prog = NULL }, 2671 }, 2672 }; 2673 EXPORT_SYMBOL(bpf_empty_prog_array); 2674 2675 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 2676 { 2677 struct bpf_prog_array *p; 2678 2679 if (prog_cnt) 2680 p = kzalloc_flex(*p, items, prog_cnt + 1, flags); 2681 else 2682 p = &bpf_empty_prog_array; 2683 2684 return p; 2685 } 2686 2687 void bpf_prog_array_free(struct bpf_prog_array *progs) 2688 { 2689 if (!progs || progs == &bpf_empty_prog_array) 2690 return; 2691 kfree_rcu(progs, rcu); 2692 } 2693 2694 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu) 2695 { 2696 struct bpf_prog_array *progs; 2697 2698 /* 2699 * RCU Tasks Trace grace period implies RCU grace period, there is no 2700 * need to call kfree_rcu(), just call kfree() directly. 2701 */ 2702 progs = container_of(rcu, struct bpf_prog_array, rcu); 2703 kfree(progs); 2704 } 2705 2706 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs) 2707 { 2708 if (!progs || progs == &bpf_empty_prog_array) 2709 return; 2710 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb); 2711 } 2712 2713 int bpf_prog_array_length(struct bpf_prog_array *array) 2714 { 2715 struct bpf_prog_array_item *item; 2716 u32 cnt = 0; 2717 2718 for (item = array->items; item->prog; item++) 2719 if (item->prog != &dummy_bpf_prog.prog) 2720 cnt++; 2721 return cnt; 2722 } 2723 2724 bool bpf_prog_array_is_empty(struct bpf_prog_array *array) 2725 { 2726 struct bpf_prog_array_item *item; 2727 2728 for (item = array->items; item->prog; item++) 2729 if (item->prog != &dummy_bpf_prog.prog) 2730 return false; 2731 return true; 2732 } 2733 2734 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, 2735 u32 *prog_ids, 2736 u32 request_cnt) 2737 { 2738 struct bpf_prog_array_item *item; 2739 int i = 0; 2740 2741 for (item = array->items; item->prog; item++) { 2742 if (item->prog == &dummy_bpf_prog.prog) 2743 continue; 2744 prog_ids[i] = item->prog->aux->id; 2745 if (++i == request_cnt) { 2746 item++; 2747 break; 2748 } 2749 } 2750 2751 return !!(item->prog); 2752 } 2753 2754 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, 2755 __u32 __user *prog_ids, u32 cnt) 2756 { 2757 unsigned long err = 0; 2758 bool nospc; 2759 u32 *ids; 2760 2761 /* users of this function are doing: 2762 * cnt = bpf_prog_array_length(); 2763 * if (cnt > 0) 2764 * bpf_prog_array_copy_to_user(..., cnt); 2765 * so below kcalloc doesn't need extra cnt > 0 check. 2766 */ 2767 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 2768 if (!ids) 2769 return -ENOMEM; 2770 nospc = bpf_prog_array_copy_core(array, ids, cnt); 2771 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 2772 kfree(ids); 2773 if (err) 2774 return -EFAULT; 2775 if (nospc) 2776 return -ENOSPC; 2777 return 0; 2778 } 2779 2780 void bpf_prog_array_delete_safe(struct bpf_prog_array *array, 2781 struct bpf_prog *old_prog) 2782 { 2783 struct bpf_prog_array_item *item; 2784 2785 for (item = array->items; item->prog; item++) 2786 if (item->prog == old_prog) { 2787 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 2788 break; 2789 } 2790 } 2791 2792 /** 2793 * bpf_prog_array_delete_safe_at() - Replaces the program at the given 2794 * index into the program array with 2795 * a dummy no-op program. 2796 * @array: a bpf_prog_array 2797 * @index: the index of the program to replace 2798 * 2799 * Skips over dummy programs, by not counting them, when calculating 2800 * the position of the program to replace. 2801 * 2802 * Return: 2803 * * 0 - Success 2804 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2805 * * -ENOENT - Index out of range 2806 */ 2807 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index) 2808 { 2809 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog); 2810 } 2811 2812 /** 2813 * bpf_prog_array_update_at() - Updates the program at the given index 2814 * into the program array. 2815 * @array: a bpf_prog_array 2816 * @index: the index of the program to update 2817 * @prog: the program to insert into the array 2818 * 2819 * Skips over dummy programs, by not counting them, when calculating 2820 * the position of the program to update. 2821 * 2822 * Return: 2823 * * 0 - Success 2824 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2825 * * -ENOENT - Index out of range 2826 */ 2827 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 2828 struct bpf_prog *prog) 2829 { 2830 struct bpf_prog_array_item *item; 2831 2832 if (unlikely(index < 0)) 2833 return -EINVAL; 2834 2835 for (item = array->items; item->prog; item++) { 2836 if (item->prog == &dummy_bpf_prog.prog) 2837 continue; 2838 if (!index) { 2839 WRITE_ONCE(item->prog, prog); 2840 return 0; 2841 } 2842 index--; 2843 } 2844 return -ENOENT; 2845 } 2846 2847 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 2848 struct bpf_prog *exclude_prog, 2849 struct bpf_prog *include_prog, 2850 u64 bpf_cookie, 2851 struct bpf_prog_array **new_array) 2852 { 2853 int new_prog_cnt, carry_prog_cnt = 0; 2854 struct bpf_prog_array_item *existing, *new; 2855 struct bpf_prog_array *array; 2856 bool found_exclude = false; 2857 2858 /* Figure out how many existing progs we need to carry over to 2859 * the new array. 2860 */ 2861 if (old_array) { 2862 existing = old_array->items; 2863 for (; existing->prog; existing++) { 2864 if (existing->prog == exclude_prog) { 2865 found_exclude = true; 2866 continue; 2867 } 2868 if (existing->prog != &dummy_bpf_prog.prog) 2869 carry_prog_cnt++; 2870 if (existing->prog == include_prog) 2871 return -EEXIST; 2872 } 2873 } 2874 2875 if (exclude_prog && !found_exclude) 2876 return -ENOENT; 2877 2878 /* How many progs (not NULL) will be in the new array? */ 2879 new_prog_cnt = carry_prog_cnt; 2880 if (include_prog) 2881 new_prog_cnt += 1; 2882 2883 /* Do we have any prog (not NULL) in the new array? */ 2884 if (!new_prog_cnt) { 2885 *new_array = NULL; 2886 return 0; 2887 } 2888 2889 /* +1 as the end of prog_array is marked with NULL */ 2890 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 2891 if (!array) 2892 return -ENOMEM; 2893 new = array->items; 2894 2895 /* Fill in the new prog array */ 2896 if (carry_prog_cnt) { 2897 existing = old_array->items; 2898 for (; existing->prog; existing++) { 2899 if (existing->prog == exclude_prog || 2900 existing->prog == &dummy_bpf_prog.prog) 2901 continue; 2902 2903 new->prog = existing->prog; 2904 new->bpf_cookie = existing->bpf_cookie; 2905 new++; 2906 } 2907 } 2908 if (include_prog) { 2909 new->prog = include_prog; 2910 new->bpf_cookie = bpf_cookie; 2911 new++; 2912 } 2913 new->prog = NULL; 2914 *new_array = array; 2915 return 0; 2916 } 2917 2918 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 2919 u32 *prog_ids, u32 request_cnt, 2920 u32 *prog_cnt) 2921 { 2922 u32 cnt = 0; 2923 2924 if (array) 2925 cnt = bpf_prog_array_length(array); 2926 2927 *prog_cnt = cnt; 2928 2929 /* return early if user requested only program count or nothing to copy */ 2930 if (!request_cnt || !cnt) 2931 return 0; 2932 2933 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 2934 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 2935 : 0; 2936 } 2937 2938 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2939 struct bpf_map **used_maps, u32 len) 2940 { 2941 struct bpf_map *map; 2942 bool sleepable; 2943 u32 i; 2944 2945 sleepable = aux->prog->sleepable; 2946 for (i = 0; i < len; i++) { 2947 map = used_maps[i]; 2948 if (map->ops->map_poke_untrack) 2949 map->ops->map_poke_untrack(map, aux); 2950 if (sleepable) 2951 atomic64_dec(&map->sleepable_refcnt); 2952 bpf_map_put(map); 2953 } 2954 } 2955 2956 static void bpf_free_used_maps(struct bpf_prog_aux *aux) 2957 { 2958 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt); 2959 kfree(aux->used_maps); 2960 } 2961 2962 void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len) 2963 { 2964 #ifdef CONFIG_BPF_SYSCALL 2965 struct btf_mod_pair *btf_mod; 2966 u32 i; 2967 2968 for (i = 0; i < len; i++) { 2969 btf_mod = &used_btfs[i]; 2970 if (btf_mod->module) 2971 module_put(btf_mod->module); 2972 btf_put(btf_mod->btf); 2973 } 2974 #endif 2975 } 2976 2977 static void bpf_free_used_btfs(struct bpf_prog_aux *aux) 2978 { 2979 __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt); 2980 kfree(aux->used_btfs); 2981 } 2982 2983 static void bpf_prog_free_deferred(struct work_struct *work) 2984 { 2985 struct bpf_prog_aux *aux; 2986 int i; 2987 2988 aux = container_of(work, struct bpf_prog_aux, work); 2989 #ifdef CONFIG_BPF_SYSCALL 2990 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab); 2991 bpf_prog_stream_free(aux->prog); 2992 #endif 2993 #ifdef CONFIG_CGROUP_BPF 2994 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID) 2995 bpf_cgroup_atype_put(aux->cgroup_atype); 2996 #endif 2997 bpf_free_used_maps(aux); 2998 bpf_free_used_btfs(aux); 2999 bpf_prog_disassoc_struct_ops(aux->prog); 3000 if (bpf_prog_is_dev_bound(aux)) 3001 bpf_prog_dev_bound_destroy(aux->prog); 3002 #ifdef CONFIG_PERF_EVENTS 3003 if (aux->prog->has_callchain_buf) 3004 put_callchain_buffers(); 3005 #endif 3006 if (aux->dst_trampoline) 3007 bpf_trampoline_put(aux->dst_trampoline); 3008 for (i = 0; i < aux->real_func_cnt; i++) { 3009 /* We can just unlink the subprog poke descriptor table as 3010 * it was originally linked to the main program and is also 3011 * released along with it. 3012 */ 3013 aux->func[i]->aux->poke_tab = NULL; 3014 bpf_jit_free(aux->func[i]); 3015 } 3016 if (aux->real_func_cnt) { 3017 kfree(aux->func); 3018 bpf_prog_unlock_free(aux->prog); 3019 } else { 3020 bpf_jit_free(aux->prog); 3021 } 3022 } 3023 3024 void bpf_prog_free(struct bpf_prog *fp) 3025 { 3026 struct bpf_prog_aux *aux = fp->aux; 3027 3028 if (aux->dst_prog) 3029 bpf_prog_put(aux->dst_prog); 3030 bpf_token_put(aux->token); 3031 INIT_WORK(&aux->work, bpf_prog_free_deferred); 3032 schedule_work(&aux->work); 3033 } 3034 EXPORT_SYMBOL_GPL(bpf_prog_free); 3035 3036 /* RNG for unprivileged user space with separated state from prandom_u32(). */ 3037 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 3038 3039 void bpf_user_rnd_init_once(void) 3040 { 3041 prandom_init_once(&bpf_user_rnd_state); 3042 } 3043 3044 BPF_CALL_0(bpf_user_rnd_u32) 3045 { 3046 /* Should someone ever have the rather unwise idea to use some 3047 * of the registers passed into this function, then note that 3048 * this function is called from native eBPF and classic-to-eBPF 3049 * transformations. Register assignments from both sides are 3050 * different, f.e. classic always sets fn(ctx, A, X) here. 3051 */ 3052 struct rnd_state *state; 3053 u32 res; 3054 3055 state = &get_cpu_var(bpf_user_rnd_state); 3056 res = prandom_u32_state(state); 3057 put_cpu_var(bpf_user_rnd_state); 3058 3059 return res; 3060 } 3061 3062 BPF_CALL_0(bpf_get_raw_cpu_id) 3063 { 3064 return raw_smp_processor_id(); 3065 } 3066 3067 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 3068 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 3069 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 3070 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 3071 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 3072 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 3073 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 3074 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak; 3075 const struct bpf_func_proto bpf_spin_lock_proto __weak; 3076 const struct bpf_func_proto bpf_spin_unlock_proto __weak; 3077 const struct bpf_func_proto bpf_jiffies64_proto __weak; 3078 3079 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 3080 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 3081 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 3082 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 3083 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak; 3084 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak; 3085 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak; 3086 3087 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 3088 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 3089 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 3090 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 3091 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak; 3092 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 3093 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak; 3094 const struct bpf_func_proto bpf_snprintf_btf_proto __weak; 3095 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak; 3096 const struct bpf_func_proto bpf_set_retval_proto __weak; 3097 const struct bpf_func_proto bpf_get_retval_proto __weak; 3098 3099 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 3100 { 3101 return NULL; 3102 } 3103 3104 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void) 3105 { 3106 return NULL; 3107 } 3108 3109 const struct bpf_func_proto * __weak bpf_get_perf_event_read_value_proto(void) 3110 { 3111 return NULL; 3112 } 3113 3114 u64 __weak 3115 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 3116 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 3117 { 3118 return -ENOTSUPP; 3119 } 3120 EXPORT_SYMBOL_GPL(bpf_event_output); 3121 3122 /* Always built-in helper functions. */ 3123 const struct bpf_func_proto bpf_tail_call_proto = { 3124 /* func is unused for tail_call, we set it to pass the 3125 * get_helper_proto check 3126 */ 3127 .func = BPF_PTR_POISON, 3128 .gpl_only = false, 3129 .ret_type = RET_VOID, 3130 .arg1_type = ARG_PTR_TO_CTX, 3131 .arg2_type = ARG_CONST_MAP_PTR, 3132 .arg3_type = ARG_ANYTHING, 3133 }; 3134 3135 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 3136 * It is encouraged to implement bpf_int_jit_compile() instead, so that 3137 * eBPF and implicitly also cBPF can get JITed! 3138 */ 3139 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 3140 { 3141 return prog; 3142 } 3143 3144 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 3145 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 3146 */ 3147 void __weak bpf_jit_compile(struct bpf_prog *prog) 3148 { 3149 } 3150 3151 bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id) 3152 { 3153 return false; 3154 } 3155 3156 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage 3157 * analysis code and wants explicit zero extension inserted by verifier. 3158 * Otherwise, return FALSE. 3159 * 3160 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if 3161 * you don't override this. JITs that don't want these extra insns can detect 3162 * them using insn_is_zext. 3163 */ 3164 bool __weak bpf_jit_needs_zext(void) 3165 { 3166 return false; 3167 } 3168 3169 /* By default, enable the verifier's mitigations against Spectre v1 and v4 for 3170 * all archs. The value returned must not change at runtime as there is 3171 * currently no support for reloading programs that were loaded without 3172 * mitigations. 3173 */ 3174 bool __weak bpf_jit_bypass_spec_v1(void) 3175 { 3176 return false; 3177 } 3178 3179 bool __weak bpf_jit_bypass_spec_v4(void) 3180 { 3181 return false; 3182 } 3183 3184 /* Return true if the JIT inlines the call to the helper corresponding to 3185 * the imm. 3186 * 3187 * The verifier will not patch the insn->imm for the call to the helper if 3188 * this returns true. 3189 */ 3190 bool __weak bpf_jit_inlines_helper_call(s32 imm) 3191 { 3192 return false; 3193 } 3194 3195 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */ 3196 bool __weak bpf_jit_supports_subprog_tailcalls(void) 3197 { 3198 return false; 3199 } 3200 3201 bool __weak bpf_jit_supports_percpu_insn(void) 3202 { 3203 return false; 3204 } 3205 3206 bool __weak bpf_jit_supports_kfunc_call(void) 3207 { 3208 return false; 3209 } 3210 3211 bool __weak bpf_jit_supports_far_kfunc_call(void) 3212 { 3213 return false; 3214 } 3215 3216 bool __weak bpf_jit_supports_arena(void) 3217 { 3218 return false; 3219 } 3220 3221 bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) 3222 { 3223 return false; 3224 } 3225 3226 bool __weak bpf_jit_supports_fsession(void) 3227 { 3228 return false; 3229 } 3230 3231 u64 __weak bpf_arch_uaddress_limit(void) 3232 { 3233 #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) 3234 return TASK_SIZE; 3235 #else 3236 return 0; 3237 #endif 3238 } 3239 3240 /* Return TRUE if the JIT backend satisfies the following two conditions: 3241 * 1) JIT backend supports atomic_xchg() on pointer-sized words. 3242 * 2) Under the specific arch, the implementation of xchg() is the same 3243 * as atomic_xchg() on pointer-sized words. 3244 */ 3245 bool __weak bpf_jit_supports_ptr_xchg(void) 3246 { 3247 return false; 3248 } 3249 3250 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 3251 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 3252 */ 3253 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 3254 int len) 3255 { 3256 return -EFAULT; 3257 } 3258 3259 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, 3260 enum bpf_text_poke_type new_t, void *old_addr, 3261 void *new_addr) 3262 { 3263 return -ENOTSUPP; 3264 } 3265 3266 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len) 3267 { 3268 return ERR_PTR(-ENOTSUPP); 3269 } 3270 3271 int __weak bpf_arch_text_invalidate(void *dst, size_t len) 3272 { 3273 return -ENOTSUPP; 3274 } 3275 3276 bool __weak bpf_jit_supports_exceptions(void) 3277 { 3278 return false; 3279 } 3280 3281 bool __weak bpf_jit_supports_private_stack(void) 3282 { 3283 return false; 3284 } 3285 3286 void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) 3287 { 3288 } 3289 3290 bool __weak bpf_jit_supports_timed_may_goto(void) 3291 { 3292 return false; 3293 } 3294 3295 u64 __weak arch_bpf_timed_may_goto(void) 3296 { 3297 return 0; 3298 } 3299 3300 static noinline void bpf_prog_report_may_goto_violation(void) 3301 { 3302 #ifdef CONFIG_BPF_SYSCALL 3303 struct bpf_stream_stage ss; 3304 struct bpf_prog *prog; 3305 3306 prog = bpf_prog_find_from_stack(); 3307 if (!prog) 3308 return; 3309 bpf_stream_stage(ss, prog, BPF_STDERR, ({ 3310 bpf_stream_printk(ss, "ERROR: Timeout detected for may_goto instruction\n"); 3311 bpf_stream_dump_stack(ss); 3312 })); 3313 #endif 3314 } 3315 3316 u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *p) 3317 { 3318 u64 time = ktime_get_mono_fast_ns(); 3319 3320 /* Populate the timestamp for this stack frame, and refresh count. */ 3321 if (!p->timestamp) { 3322 p->timestamp = time; 3323 return BPF_MAX_TIMED_LOOPS; 3324 } 3325 /* Check if we've exhausted our time slice, and zero count. */ 3326 if (unlikely(time - p->timestamp >= (NSEC_PER_SEC / 4))) { 3327 bpf_prog_report_may_goto_violation(); 3328 return 0; 3329 } 3330 /* Refresh the count for the stack frame. */ 3331 return BPF_MAX_TIMED_LOOPS; 3332 } 3333 3334 /* for configs without MMU or 32-bit */ 3335 __weak const struct bpf_map_ops arena_map_ops; 3336 __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena) 3337 { 3338 return 0; 3339 } 3340 __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena) 3341 { 3342 return 0; 3343 } 3344 3345 #ifdef CONFIG_BPF_SYSCALL 3346 static int __init bpf_global_ma_init(void) 3347 { 3348 int ret; 3349 3350 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false); 3351 bpf_global_ma_set = !ret; 3352 return ret; 3353 } 3354 late_initcall(bpf_global_ma_init); 3355 #endif 3356 3357 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); 3358 EXPORT_SYMBOL(bpf_stats_enabled_key); 3359 3360 /* All definitions of tracepoints related to BPF. */ 3361 #define CREATE_TRACE_POINTS 3362 #include <linux/bpf_trace.h> 3363 3364 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 3365 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx); 3366 3367 #ifdef CONFIG_BPF_SYSCALL 3368 3369 void bpf_get_linfo_file_line(struct btf *btf, const struct bpf_line_info *linfo, 3370 const char **filep, const char **linep, int *nump) 3371 { 3372 /* Get base component of the file path. */ 3373 if (filep) { 3374 *filep = btf_name_by_offset(btf, linfo->file_name_off); 3375 *filep = kbasename(*filep); 3376 } 3377 3378 /* Obtain the source line, and strip whitespace in prefix. */ 3379 if (linep) { 3380 *linep = btf_name_by_offset(btf, linfo->line_off); 3381 while (isspace(**linep)) 3382 *linep += 1; 3383 } 3384 3385 if (nump) 3386 *nump = BPF_LINE_INFO_LINE_NUM(linfo->line_col); 3387 } 3388 3389 const struct bpf_line_info *bpf_find_linfo(const struct bpf_prog *prog, u32 insn_off) 3390 { 3391 const struct bpf_line_info *linfo; 3392 u32 nr_linfo; 3393 int l, r, m; 3394 3395 nr_linfo = prog->aux->nr_linfo; 3396 if (!nr_linfo || insn_off >= prog->len) 3397 return NULL; 3398 3399 linfo = prog->aux->linfo; 3400 /* Loop invariant: linfo[l].insn_off <= insns_off. 3401 * linfo[0].insn_off == 0 which always satisfies above condition. 3402 * Binary search is searching for rightmost linfo entry that satisfies 3403 * the above invariant, giving us the desired record that covers given 3404 * instruction offset. 3405 */ 3406 l = 0; 3407 r = nr_linfo - 1; 3408 while (l < r) { 3409 /* (r - l + 1) / 2 means we break a tie to the right, so if: 3410 * l=1, r=2, linfo[l].insn_off <= insn_off, linfo[r].insn_off > insn_off, 3411 * then m=2, we see that linfo[m].insn_off > insn_off, and so 3412 * r becomes 1 and we exit the loop with correct l==1. 3413 * If the tie was broken to the left, m=1 would end us up in 3414 * an endless loop where l and m stay at 1 and r stays at 2. 3415 */ 3416 m = l + (r - l + 1) / 2; 3417 if (linfo[m].insn_off <= insn_off) 3418 l = m; 3419 else 3420 r = m - 1; 3421 } 3422 3423 return &linfo[l]; 3424 } 3425 3426 int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep, 3427 const char **linep, int *nump) 3428 { 3429 int idx = -1, insn_start, insn_end, len; 3430 struct bpf_line_info *linfo; 3431 void **jited_linfo; 3432 struct btf *btf; 3433 int nr_linfo; 3434 3435 btf = prog->aux->btf; 3436 linfo = prog->aux->linfo; 3437 jited_linfo = prog->aux->jited_linfo; 3438 3439 if (!btf || !linfo || !jited_linfo) 3440 return -EINVAL; 3441 len = prog->aux->func ? prog->aux->func[prog->aux->func_idx]->len : prog->len; 3442 3443 linfo = &prog->aux->linfo[prog->aux->linfo_idx]; 3444 jited_linfo = &prog->aux->jited_linfo[prog->aux->linfo_idx]; 3445 3446 insn_start = linfo[0].insn_off; 3447 insn_end = insn_start + len; 3448 nr_linfo = prog->aux->nr_linfo - prog->aux->linfo_idx; 3449 3450 for (int i = 0; i < nr_linfo && 3451 linfo[i].insn_off >= insn_start && linfo[i].insn_off < insn_end; i++) { 3452 if (jited_linfo[i] >= (void *)ip) 3453 break; 3454 idx = i; 3455 } 3456 3457 if (idx == -1) 3458 return -ENOENT; 3459 3460 bpf_get_linfo_file_line(btf, &linfo[idx], filep, linep, nump); 3461 return 0; 3462 } 3463 3464 struct walk_stack_ctx { 3465 struct bpf_prog *prog; 3466 }; 3467 3468 static bool find_from_stack_cb(void *cookie, u64 ip, u64 sp, u64 bp) 3469 { 3470 struct walk_stack_ctx *ctxp = cookie; 3471 struct bpf_prog *prog; 3472 3473 /* 3474 * The RCU read lock is held to safely traverse the latch tree, but we 3475 * don't need its protection when accessing the prog, since it has an 3476 * active stack frame on the current stack trace, and won't disappear. 3477 */ 3478 rcu_read_lock(); 3479 prog = bpf_prog_ksym_find(ip); 3480 rcu_read_unlock(); 3481 if (!prog) 3482 return true; 3483 /* Make sure we return the main prog if we found a subprog */ 3484 ctxp->prog = prog->aux->main_prog_aux->prog; 3485 return false; 3486 } 3487 3488 struct bpf_prog *bpf_prog_find_from_stack(void) 3489 { 3490 struct walk_stack_ctx ctx = {}; 3491 3492 arch_bpf_stack_walk(find_from_stack_cb, &ctx); 3493 return ctx.prog; 3494 } 3495 3496 #endif 3497