1 /* 2 * Linux Socket Filter - Kernel level socket filtering 3 * 4 * Based on the design of the Berkeley Packet Filter. The new 5 * internal format has been designed by PLUMgrid: 6 * 7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 8 * 9 * Authors: 10 * 11 * Jay Schulist <jschlst@samba.org> 12 * Alexei Starovoitov <ast@plumgrid.com> 13 * Daniel Borkmann <dborkman@redhat.com> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * Andi Kleen - Fix a few bad bugs and races. 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 22 */ 23 24 #include <uapi/linux/btf.h> 25 #include <linux/filter.h> 26 #include <linux/skbuff.h> 27 #include <linux/vmalloc.h> 28 #include <linux/random.h> 29 #include <linux/moduleloader.h> 30 #include <linux/bpf.h> 31 #include <linux/btf.h> 32 #include <linux/frame.h> 33 #include <linux/rbtree_latch.h> 34 #include <linux/kallsyms.h> 35 #include <linux/rcupdate.h> 36 #include <linux/perf_event.h> 37 38 #include <asm/unaligned.h> 39 40 /* Registers */ 41 #define BPF_R0 regs[BPF_REG_0] 42 #define BPF_R1 regs[BPF_REG_1] 43 #define BPF_R2 regs[BPF_REG_2] 44 #define BPF_R3 regs[BPF_REG_3] 45 #define BPF_R4 regs[BPF_REG_4] 46 #define BPF_R5 regs[BPF_REG_5] 47 #define BPF_R6 regs[BPF_REG_6] 48 #define BPF_R7 regs[BPF_REG_7] 49 #define BPF_R8 regs[BPF_REG_8] 50 #define BPF_R9 regs[BPF_REG_9] 51 #define BPF_R10 regs[BPF_REG_10] 52 53 /* Named registers */ 54 #define DST regs[insn->dst_reg] 55 #define SRC regs[insn->src_reg] 56 #define FP regs[BPF_REG_FP] 57 #define ARG1 regs[BPF_REG_ARG1] 58 #define CTX regs[BPF_REG_CTX] 59 #define IMM insn->imm 60 61 /* No hurry in this branch 62 * 63 * Exported for the bpf jit load helper. 64 */ 65 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 66 { 67 u8 *ptr = NULL; 68 69 if (k >= SKF_NET_OFF) 70 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 71 else if (k >= SKF_LL_OFF) 72 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 73 74 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 75 return ptr; 76 77 return NULL; 78 } 79 80 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 81 { 82 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 83 struct bpf_prog_aux *aux; 84 struct bpf_prog *fp; 85 86 size = round_up(size, PAGE_SIZE); 87 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 88 if (fp == NULL) 89 return NULL; 90 91 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags); 92 if (aux == NULL) { 93 vfree(fp); 94 return NULL; 95 } 96 97 fp->pages = size / PAGE_SIZE; 98 fp->aux = aux; 99 fp->aux->prog = fp; 100 fp->jit_requested = ebpf_jit_enabled(); 101 102 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode); 103 104 return fp; 105 } 106 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 107 108 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 109 gfp_t gfp_extra_flags) 110 { 111 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 112 struct bpf_prog *fp; 113 u32 pages, delta; 114 int ret; 115 116 BUG_ON(fp_old == NULL); 117 118 size = round_up(size, PAGE_SIZE); 119 pages = size / PAGE_SIZE; 120 if (pages <= fp_old->pages) 121 return fp_old; 122 123 delta = pages - fp_old->pages; 124 ret = __bpf_prog_charge(fp_old->aux->user, delta); 125 if (ret) 126 return NULL; 127 128 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 129 if (fp == NULL) { 130 __bpf_prog_uncharge(fp_old->aux->user, delta); 131 } else { 132 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 133 fp->pages = pages; 134 fp->aux->prog = fp; 135 136 /* We keep fp->aux from fp_old around in the new 137 * reallocated structure. 138 */ 139 fp_old->aux = NULL; 140 __bpf_prog_free(fp_old); 141 } 142 143 return fp; 144 } 145 146 void __bpf_prog_free(struct bpf_prog *fp) 147 { 148 kfree(fp->aux); 149 vfree(fp); 150 } 151 152 int bpf_prog_calc_tag(struct bpf_prog *fp) 153 { 154 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 155 u32 raw_size = bpf_prog_tag_scratch_size(fp); 156 u32 digest[SHA_DIGEST_WORDS]; 157 u32 ws[SHA_WORKSPACE_WORDS]; 158 u32 i, bsize, psize, blocks; 159 struct bpf_insn *dst; 160 bool was_ld_map; 161 u8 *raw, *todo; 162 __be32 *result; 163 __be64 *bits; 164 165 raw = vmalloc(raw_size); 166 if (!raw) 167 return -ENOMEM; 168 169 sha_init(digest); 170 memset(ws, 0, sizeof(ws)); 171 172 /* We need to take out the map fd for the digest calculation 173 * since they are unstable from user space side. 174 */ 175 dst = (void *)raw; 176 for (i = 0, was_ld_map = false; i < fp->len; i++) { 177 dst[i] = fp->insnsi[i]; 178 if (!was_ld_map && 179 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 180 dst[i].src_reg == BPF_PSEUDO_MAP_FD) { 181 was_ld_map = true; 182 dst[i].imm = 0; 183 } else if (was_ld_map && 184 dst[i].code == 0 && 185 dst[i].dst_reg == 0 && 186 dst[i].src_reg == 0 && 187 dst[i].off == 0) { 188 was_ld_map = false; 189 dst[i].imm = 0; 190 } else { 191 was_ld_map = false; 192 } 193 } 194 195 psize = bpf_prog_insn_size(fp); 196 memset(&raw[psize], 0, raw_size - psize); 197 raw[psize++] = 0x80; 198 199 bsize = round_up(psize, SHA_MESSAGE_BYTES); 200 blocks = bsize / SHA_MESSAGE_BYTES; 201 todo = raw; 202 if (bsize - psize >= sizeof(__be64)) { 203 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 204 } else { 205 bits = (__be64 *)(todo + bsize + bits_offset); 206 blocks++; 207 } 208 *bits = cpu_to_be64((psize - 1) << 3); 209 210 while (blocks--) { 211 sha_transform(digest, todo, ws); 212 todo += SHA_MESSAGE_BYTES; 213 } 214 215 result = (__force __be32 *)digest; 216 for (i = 0; i < SHA_DIGEST_WORDS; i++) 217 result[i] = cpu_to_be32(digest[i]); 218 memcpy(fp->tag, result, sizeof(fp->tag)); 219 220 vfree(raw); 221 return 0; 222 } 223 224 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta, 225 u32 curr, const bool probe_pass) 226 { 227 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 228 s64 imm = insn->imm; 229 230 if (curr < pos && curr + imm + 1 > pos) 231 imm += delta; 232 else if (curr > pos + delta && curr + imm + 1 <= pos + delta) 233 imm -= delta; 234 if (imm < imm_min || imm > imm_max) 235 return -ERANGE; 236 if (!probe_pass) 237 insn->imm = imm; 238 return 0; 239 } 240 241 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta, 242 u32 curr, const bool probe_pass) 243 { 244 const s32 off_min = S16_MIN, off_max = S16_MAX; 245 s32 off = insn->off; 246 247 if (curr < pos && curr + off + 1 > pos) 248 off += delta; 249 else if (curr > pos + delta && curr + off + 1 <= pos + delta) 250 off -= delta; 251 if (off < off_min || off > off_max) 252 return -ERANGE; 253 if (!probe_pass) 254 insn->off = off; 255 return 0; 256 } 257 258 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta, 259 const bool probe_pass) 260 { 261 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0); 262 struct bpf_insn *insn = prog->insnsi; 263 int ret = 0; 264 265 for (i = 0; i < insn_cnt; i++, insn++) { 266 u8 code; 267 268 /* In the probing pass we still operate on the original, 269 * unpatched image in order to check overflows before we 270 * do any other adjustments. Therefore skip the patchlet. 271 */ 272 if (probe_pass && i == pos) { 273 i += delta + 1; 274 insn++; 275 } 276 code = insn->code; 277 if (BPF_CLASS(code) != BPF_JMP || 278 BPF_OP(code) == BPF_EXIT) 279 continue; 280 /* Adjust offset of jmps if we cross patch boundaries. */ 281 if (BPF_OP(code) == BPF_CALL) { 282 if (insn->src_reg != BPF_PSEUDO_CALL) 283 continue; 284 ret = bpf_adj_delta_to_imm(insn, pos, delta, i, 285 probe_pass); 286 } else { 287 ret = bpf_adj_delta_to_off(insn, pos, delta, i, 288 probe_pass); 289 } 290 if (ret) 291 break; 292 } 293 294 return ret; 295 } 296 297 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 298 const struct bpf_insn *patch, u32 len) 299 { 300 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 301 const u32 cnt_max = S16_MAX; 302 struct bpf_prog *prog_adj; 303 304 /* Since our patchlet doesn't expand the image, we're done. */ 305 if (insn_delta == 0) { 306 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 307 return prog; 308 } 309 310 insn_adj_cnt = prog->len + insn_delta; 311 312 /* Reject anything that would potentially let the insn->off 313 * target overflow when we have excessive program expansions. 314 * We need to probe here before we do any reallocation where 315 * we afterwards may not fail anymore. 316 */ 317 if (insn_adj_cnt > cnt_max && 318 bpf_adj_branches(prog, off, insn_delta, true)) 319 return NULL; 320 321 /* Several new instructions need to be inserted. Make room 322 * for them. Likely, there's no need for a new allocation as 323 * last page could have large enough tailroom. 324 */ 325 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 326 GFP_USER); 327 if (!prog_adj) 328 return NULL; 329 330 prog_adj->len = insn_adj_cnt; 331 332 /* Patching happens in 3 steps: 333 * 334 * 1) Move over tail of insnsi from next instruction onwards, 335 * so we can patch the single target insn with one or more 336 * new ones (patching is always from 1 to n insns, n > 0). 337 * 2) Inject new instructions at the target location. 338 * 3) Adjust branch offsets if necessary. 339 */ 340 insn_rest = insn_adj_cnt - off - len; 341 342 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 343 sizeof(*patch) * insn_rest); 344 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 345 346 /* We are guaranteed to not fail at this point, otherwise 347 * the ship has sailed to reverse to the original state. An 348 * overflow cannot happen at this point. 349 */ 350 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false)); 351 352 return prog_adj; 353 } 354 355 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 356 { 357 int i; 358 359 for (i = 0; i < fp->aux->func_cnt; i++) 360 bpf_prog_kallsyms_del(fp->aux->func[i]); 361 } 362 363 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 364 { 365 bpf_prog_kallsyms_del_subprogs(fp); 366 bpf_prog_kallsyms_del(fp); 367 } 368 369 #ifdef CONFIG_BPF_JIT 370 # define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000) 371 372 /* All BPF JIT sysctl knobs here. */ 373 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); 374 int bpf_jit_harden __read_mostly; 375 int bpf_jit_kallsyms __read_mostly; 376 int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT; 377 378 static __always_inline void 379 bpf_get_prog_addr_region(const struct bpf_prog *prog, 380 unsigned long *symbol_start, 381 unsigned long *symbol_end) 382 { 383 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); 384 unsigned long addr = (unsigned long)hdr; 385 386 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 387 388 *symbol_start = addr; 389 *symbol_end = addr + hdr->pages * PAGE_SIZE; 390 } 391 392 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) 393 { 394 const char *end = sym + KSYM_NAME_LEN; 395 const struct btf_type *type; 396 const char *func_name; 397 398 BUILD_BUG_ON(sizeof("bpf_prog_") + 399 sizeof(prog->tag) * 2 + 400 /* name has been null terminated. 401 * We should need +1 for the '_' preceding 402 * the name. However, the null character 403 * is double counted between the name and the 404 * sizeof("bpf_prog_") above, so we omit 405 * the +1 here. 406 */ 407 sizeof(prog->aux->name) > KSYM_NAME_LEN); 408 409 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 410 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 411 412 /* prog->aux->name will be ignored if full btf name is available */ 413 if (prog->aux->btf) { 414 type = btf_type_by_id(prog->aux->btf, 415 prog->aux->func_info[prog->aux->func_idx].type_id); 416 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); 417 snprintf(sym, (size_t)(end - sym), "_%s", func_name); 418 return; 419 } 420 421 if (prog->aux->name[0]) 422 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 423 else 424 *sym = 0; 425 } 426 427 static __always_inline unsigned long 428 bpf_get_prog_addr_start(struct latch_tree_node *n) 429 { 430 unsigned long symbol_start, symbol_end; 431 const struct bpf_prog_aux *aux; 432 433 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 434 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 435 436 return symbol_start; 437 } 438 439 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 440 struct latch_tree_node *b) 441 { 442 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b); 443 } 444 445 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 446 { 447 unsigned long val = (unsigned long)key; 448 unsigned long symbol_start, symbol_end; 449 const struct bpf_prog_aux *aux; 450 451 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 452 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 453 454 if (val < symbol_start) 455 return -1; 456 if (val >= symbol_end) 457 return 1; 458 459 return 0; 460 } 461 462 static const struct latch_tree_ops bpf_tree_ops = { 463 .less = bpf_tree_less, 464 .comp = bpf_tree_comp, 465 }; 466 467 static DEFINE_SPINLOCK(bpf_lock); 468 static LIST_HEAD(bpf_kallsyms); 469 static struct latch_tree_root bpf_tree __cacheline_aligned; 470 471 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) 472 { 473 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode)); 474 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms); 475 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 476 } 477 478 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux) 479 { 480 if (list_empty(&aux->ksym_lnode)) 481 return; 482 483 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 484 list_del_rcu(&aux->ksym_lnode); 485 } 486 487 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 488 { 489 return fp->jited && !bpf_prog_was_classic(fp); 490 } 491 492 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) 493 { 494 return list_empty(&fp->aux->ksym_lnode) || 495 fp->aux->ksym_lnode.prev == LIST_POISON2; 496 } 497 498 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 499 { 500 if (!bpf_prog_kallsyms_candidate(fp) || 501 !capable(CAP_SYS_ADMIN)) 502 return; 503 504 spin_lock_bh(&bpf_lock); 505 bpf_prog_ksym_node_add(fp->aux); 506 spin_unlock_bh(&bpf_lock); 507 } 508 509 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 510 { 511 if (!bpf_prog_kallsyms_candidate(fp)) 512 return; 513 514 spin_lock_bh(&bpf_lock); 515 bpf_prog_ksym_node_del(fp->aux); 516 spin_unlock_bh(&bpf_lock); 517 } 518 519 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) 520 { 521 struct latch_tree_node *n; 522 523 if (!bpf_jit_kallsyms_enabled()) 524 return NULL; 525 526 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 527 return n ? 528 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : 529 NULL; 530 } 531 532 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 533 unsigned long *off, char *sym) 534 { 535 unsigned long symbol_start, symbol_end; 536 struct bpf_prog *prog; 537 char *ret = NULL; 538 539 rcu_read_lock(); 540 prog = bpf_prog_kallsyms_find(addr); 541 if (prog) { 542 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end); 543 bpf_get_prog_name(prog, sym); 544 545 ret = sym; 546 if (size) 547 *size = symbol_end - symbol_start; 548 if (off) 549 *off = addr - symbol_start; 550 } 551 rcu_read_unlock(); 552 553 return ret; 554 } 555 556 bool is_bpf_text_address(unsigned long addr) 557 { 558 bool ret; 559 560 rcu_read_lock(); 561 ret = bpf_prog_kallsyms_find(addr) != NULL; 562 rcu_read_unlock(); 563 564 return ret; 565 } 566 567 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 568 char *sym) 569 { 570 struct bpf_prog_aux *aux; 571 unsigned int it = 0; 572 int ret = -ERANGE; 573 574 if (!bpf_jit_kallsyms_enabled()) 575 return ret; 576 577 rcu_read_lock(); 578 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) { 579 if (it++ != symnum) 580 continue; 581 582 bpf_get_prog_name(aux->prog, sym); 583 584 *value = (unsigned long)aux->prog->bpf_func; 585 *type = BPF_SYM_ELF_TYPE; 586 587 ret = 0; 588 break; 589 } 590 rcu_read_unlock(); 591 592 return ret; 593 } 594 595 static atomic_long_t bpf_jit_current; 596 597 #if defined(MODULES_VADDR) 598 static int __init bpf_jit_charge_init(void) 599 { 600 /* Only used as heuristic here to derive limit. */ 601 bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2, 602 PAGE_SIZE), INT_MAX); 603 return 0; 604 } 605 pure_initcall(bpf_jit_charge_init); 606 #endif 607 608 static int bpf_jit_charge_modmem(u32 pages) 609 { 610 if (atomic_long_add_return(pages, &bpf_jit_current) > 611 (bpf_jit_limit >> PAGE_SHIFT)) { 612 if (!capable(CAP_SYS_ADMIN)) { 613 atomic_long_sub(pages, &bpf_jit_current); 614 return -EPERM; 615 } 616 } 617 618 return 0; 619 } 620 621 static void bpf_jit_uncharge_modmem(u32 pages) 622 { 623 atomic_long_sub(pages, &bpf_jit_current); 624 } 625 626 void *__weak bpf_jit_alloc_exec(unsigned long size) 627 { 628 return module_alloc(size); 629 } 630 631 void __weak bpf_jit_free_exec(void *addr) 632 { 633 module_memfree(addr); 634 } 635 636 struct bpf_binary_header * 637 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 638 unsigned int alignment, 639 bpf_jit_fill_hole_t bpf_fill_ill_insns) 640 { 641 struct bpf_binary_header *hdr; 642 u32 size, hole, start, pages; 643 644 /* Most of BPF filters are really small, but if some of them 645 * fill a page, allow at least 128 extra bytes to insert a 646 * random section of illegal instructions. 647 */ 648 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 649 pages = size / PAGE_SIZE; 650 651 if (bpf_jit_charge_modmem(pages)) 652 return NULL; 653 hdr = bpf_jit_alloc_exec(size); 654 if (!hdr) { 655 bpf_jit_uncharge_modmem(pages); 656 return NULL; 657 } 658 659 /* Fill space with illegal/arch-dep instructions. */ 660 bpf_fill_ill_insns(hdr, size); 661 662 hdr->pages = pages; 663 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 664 PAGE_SIZE - sizeof(*hdr)); 665 start = (get_random_int() % hole) & ~(alignment - 1); 666 667 /* Leave a random number of instructions before BPF code. */ 668 *image_ptr = &hdr->image[start]; 669 670 return hdr; 671 } 672 673 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 674 { 675 u32 pages = hdr->pages; 676 677 bpf_jit_free_exec(hdr); 678 bpf_jit_uncharge_modmem(pages); 679 } 680 681 /* This symbol is only overridden by archs that have different 682 * requirements than the usual eBPF JITs, f.e. when they only 683 * implement cBPF JIT, do not set images read-only, etc. 684 */ 685 void __weak bpf_jit_free(struct bpf_prog *fp) 686 { 687 if (fp->jited) { 688 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 689 690 bpf_jit_binary_unlock_ro(hdr); 691 bpf_jit_binary_free(hdr); 692 693 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 694 } 695 696 bpf_prog_unlock_free(fp); 697 } 698 699 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 700 const struct bpf_insn *insn, bool extra_pass, 701 u64 *func_addr, bool *func_addr_fixed) 702 { 703 s16 off = insn->off; 704 s32 imm = insn->imm; 705 u8 *addr; 706 707 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 708 if (!*func_addr_fixed) { 709 /* Place-holder address till the last pass has collected 710 * all addresses for JITed subprograms in which case we 711 * can pick them up from prog->aux. 712 */ 713 if (!extra_pass) 714 addr = NULL; 715 else if (prog->aux->func && 716 off >= 0 && off < prog->aux->func_cnt) 717 addr = (u8 *)prog->aux->func[off]->bpf_func; 718 else 719 return -EINVAL; 720 } else { 721 /* Address of a BPF helper call. Since part of the core 722 * kernel, it's always at a fixed location. __bpf_call_base 723 * and the helper with imm relative to it are both in core 724 * kernel. 725 */ 726 addr = (u8 *)__bpf_call_base + imm; 727 } 728 729 *func_addr = (unsigned long)addr; 730 return 0; 731 } 732 733 static int bpf_jit_blind_insn(const struct bpf_insn *from, 734 const struct bpf_insn *aux, 735 struct bpf_insn *to_buff) 736 { 737 struct bpf_insn *to = to_buff; 738 u32 imm_rnd = get_random_int(); 739 s16 off; 740 741 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 742 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 743 744 if (from->imm == 0 && 745 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 746 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 747 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 748 goto out; 749 } 750 751 switch (from->code) { 752 case BPF_ALU | BPF_ADD | BPF_K: 753 case BPF_ALU | BPF_SUB | BPF_K: 754 case BPF_ALU | BPF_AND | BPF_K: 755 case BPF_ALU | BPF_OR | BPF_K: 756 case BPF_ALU | BPF_XOR | BPF_K: 757 case BPF_ALU | BPF_MUL | BPF_K: 758 case BPF_ALU | BPF_MOV | BPF_K: 759 case BPF_ALU | BPF_DIV | BPF_K: 760 case BPF_ALU | BPF_MOD | BPF_K: 761 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 762 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 763 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); 764 break; 765 766 case BPF_ALU64 | BPF_ADD | BPF_K: 767 case BPF_ALU64 | BPF_SUB | BPF_K: 768 case BPF_ALU64 | BPF_AND | BPF_K: 769 case BPF_ALU64 | BPF_OR | BPF_K: 770 case BPF_ALU64 | BPF_XOR | BPF_K: 771 case BPF_ALU64 | BPF_MUL | BPF_K: 772 case BPF_ALU64 | BPF_MOV | BPF_K: 773 case BPF_ALU64 | BPF_DIV | BPF_K: 774 case BPF_ALU64 | BPF_MOD | BPF_K: 775 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 776 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 777 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); 778 break; 779 780 case BPF_JMP | BPF_JEQ | BPF_K: 781 case BPF_JMP | BPF_JNE | BPF_K: 782 case BPF_JMP | BPF_JGT | BPF_K: 783 case BPF_JMP | BPF_JLT | BPF_K: 784 case BPF_JMP | BPF_JGE | BPF_K: 785 case BPF_JMP | BPF_JLE | BPF_K: 786 case BPF_JMP | BPF_JSGT | BPF_K: 787 case BPF_JMP | BPF_JSLT | BPF_K: 788 case BPF_JMP | BPF_JSGE | BPF_K: 789 case BPF_JMP | BPF_JSLE | BPF_K: 790 case BPF_JMP | BPF_JSET | BPF_K: 791 /* Accommodate for extra offset in case of a backjump. */ 792 off = from->off; 793 if (off < 0) 794 off -= 2; 795 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 796 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 797 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 798 break; 799 800 case BPF_LD | BPF_IMM | BPF_DW: 801 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 802 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 803 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 804 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 805 break; 806 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 807 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 808 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 809 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 810 break; 811 812 case BPF_ST | BPF_MEM | BPF_DW: 813 case BPF_ST | BPF_MEM | BPF_W: 814 case BPF_ST | BPF_MEM | BPF_H: 815 case BPF_ST | BPF_MEM | BPF_B: 816 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 817 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 818 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 819 break; 820 } 821 out: 822 return to - to_buff; 823 } 824 825 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 826 gfp_t gfp_extra_flags) 827 { 828 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 829 struct bpf_prog *fp; 830 831 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); 832 if (fp != NULL) { 833 /* aux->prog still points to the fp_other one, so 834 * when promoting the clone to the real program, 835 * this still needs to be adapted. 836 */ 837 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 838 } 839 840 return fp; 841 } 842 843 static void bpf_prog_clone_free(struct bpf_prog *fp) 844 { 845 /* aux was stolen by the other clone, so we cannot free 846 * it from this path! It will be freed eventually by the 847 * other program on release. 848 * 849 * At this point, we don't need a deferred release since 850 * clone is guaranteed to not be locked. 851 */ 852 fp->aux = NULL; 853 __bpf_prog_free(fp); 854 } 855 856 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 857 { 858 /* We have to repoint aux->prog to self, as we don't 859 * know whether fp here is the clone or the original. 860 */ 861 fp->aux->prog = fp; 862 bpf_prog_clone_free(fp_other); 863 } 864 865 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 866 { 867 struct bpf_insn insn_buff[16], aux[2]; 868 struct bpf_prog *clone, *tmp; 869 int insn_delta, insn_cnt; 870 struct bpf_insn *insn; 871 int i, rewritten; 872 873 if (!bpf_jit_blinding_enabled(prog) || prog->blinded) 874 return prog; 875 876 clone = bpf_prog_clone_create(prog, GFP_USER); 877 if (!clone) 878 return ERR_PTR(-ENOMEM); 879 880 insn_cnt = clone->len; 881 insn = clone->insnsi; 882 883 for (i = 0; i < insn_cnt; i++, insn++) { 884 /* We temporarily need to hold the original ld64 insn 885 * so that we can still access the first part in the 886 * second blinding run. 887 */ 888 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 889 insn[1].code == 0) 890 memcpy(aux, insn, sizeof(aux)); 891 892 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); 893 if (!rewritten) 894 continue; 895 896 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 897 if (!tmp) { 898 /* Patching may have repointed aux->prog during 899 * realloc from the original one, so we need to 900 * fix it up here on error. 901 */ 902 bpf_jit_prog_release_other(prog, clone); 903 return ERR_PTR(-ENOMEM); 904 } 905 906 clone = tmp; 907 insn_delta = rewritten - 1; 908 909 /* Walk new program and skip insns we just inserted. */ 910 insn = clone->insnsi + i + insn_delta; 911 insn_cnt += insn_delta; 912 i += insn_delta; 913 } 914 915 clone->blinded = 1; 916 return clone; 917 } 918 #endif /* CONFIG_BPF_JIT */ 919 920 /* Base function for offset calculation. Needs to go into .text section, 921 * therefore keeping it non-static as well; will also be used by JITs 922 * anyway later on, so do not let the compiler omit it. This also needs 923 * to go into kallsyms for correlation from e.g. bpftool, so naming 924 * must not change. 925 */ 926 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 927 { 928 return 0; 929 } 930 EXPORT_SYMBOL_GPL(__bpf_call_base); 931 932 /* All UAPI available opcodes. */ 933 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 934 /* 32 bit ALU operations. */ \ 935 /* Register based. */ \ 936 INSN_3(ALU, ADD, X), \ 937 INSN_3(ALU, SUB, X), \ 938 INSN_3(ALU, AND, X), \ 939 INSN_3(ALU, OR, X), \ 940 INSN_3(ALU, LSH, X), \ 941 INSN_3(ALU, RSH, X), \ 942 INSN_3(ALU, XOR, X), \ 943 INSN_3(ALU, MUL, X), \ 944 INSN_3(ALU, MOV, X), \ 945 INSN_3(ALU, DIV, X), \ 946 INSN_3(ALU, MOD, X), \ 947 INSN_2(ALU, NEG), \ 948 INSN_3(ALU, END, TO_BE), \ 949 INSN_3(ALU, END, TO_LE), \ 950 /* Immediate based. */ \ 951 INSN_3(ALU, ADD, K), \ 952 INSN_3(ALU, SUB, K), \ 953 INSN_3(ALU, AND, K), \ 954 INSN_3(ALU, OR, K), \ 955 INSN_3(ALU, LSH, K), \ 956 INSN_3(ALU, RSH, K), \ 957 INSN_3(ALU, XOR, K), \ 958 INSN_3(ALU, MUL, K), \ 959 INSN_3(ALU, MOV, K), \ 960 INSN_3(ALU, DIV, K), \ 961 INSN_3(ALU, MOD, K), \ 962 /* 64 bit ALU operations. */ \ 963 /* Register based. */ \ 964 INSN_3(ALU64, ADD, X), \ 965 INSN_3(ALU64, SUB, X), \ 966 INSN_3(ALU64, AND, X), \ 967 INSN_3(ALU64, OR, X), \ 968 INSN_3(ALU64, LSH, X), \ 969 INSN_3(ALU64, RSH, X), \ 970 INSN_3(ALU64, XOR, X), \ 971 INSN_3(ALU64, MUL, X), \ 972 INSN_3(ALU64, MOV, X), \ 973 INSN_3(ALU64, ARSH, X), \ 974 INSN_3(ALU64, DIV, X), \ 975 INSN_3(ALU64, MOD, X), \ 976 INSN_2(ALU64, NEG), \ 977 /* Immediate based. */ \ 978 INSN_3(ALU64, ADD, K), \ 979 INSN_3(ALU64, SUB, K), \ 980 INSN_3(ALU64, AND, K), \ 981 INSN_3(ALU64, OR, K), \ 982 INSN_3(ALU64, LSH, K), \ 983 INSN_3(ALU64, RSH, K), \ 984 INSN_3(ALU64, XOR, K), \ 985 INSN_3(ALU64, MUL, K), \ 986 INSN_3(ALU64, MOV, K), \ 987 INSN_3(ALU64, ARSH, K), \ 988 INSN_3(ALU64, DIV, K), \ 989 INSN_3(ALU64, MOD, K), \ 990 /* Call instruction. */ \ 991 INSN_2(JMP, CALL), \ 992 /* Exit instruction. */ \ 993 INSN_2(JMP, EXIT), \ 994 /* Jump instructions. */ \ 995 /* Register based. */ \ 996 INSN_3(JMP, JEQ, X), \ 997 INSN_3(JMP, JNE, X), \ 998 INSN_3(JMP, JGT, X), \ 999 INSN_3(JMP, JLT, X), \ 1000 INSN_3(JMP, JGE, X), \ 1001 INSN_3(JMP, JLE, X), \ 1002 INSN_3(JMP, JSGT, X), \ 1003 INSN_3(JMP, JSLT, X), \ 1004 INSN_3(JMP, JSGE, X), \ 1005 INSN_3(JMP, JSLE, X), \ 1006 INSN_3(JMP, JSET, X), \ 1007 /* Immediate based. */ \ 1008 INSN_3(JMP, JEQ, K), \ 1009 INSN_3(JMP, JNE, K), \ 1010 INSN_3(JMP, JGT, K), \ 1011 INSN_3(JMP, JLT, K), \ 1012 INSN_3(JMP, JGE, K), \ 1013 INSN_3(JMP, JLE, K), \ 1014 INSN_3(JMP, JSGT, K), \ 1015 INSN_3(JMP, JSLT, K), \ 1016 INSN_3(JMP, JSGE, K), \ 1017 INSN_3(JMP, JSLE, K), \ 1018 INSN_3(JMP, JSET, K), \ 1019 INSN_2(JMP, JA), \ 1020 /* Store instructions. */ \ 1021 /* Register based. */ \ 1022 INSN_3(STX, MEM, B), \ 1023 INSN_3(STX, MEM, H), \ 1024 INSN_3(STX, MEM, W), \ 1025 INSN_3(STX, MEM, DW), \ 1026 INSN_3(STX, XADD, W), \ 1027 INSN_3(STX, XADD, DW), \ 1028 /* Immediate based. */ \ 1029 INSN_3(ST, MEM, B), \ 1030 INSN_3(ST, MEM, H), \ 1031 INSN_3(ST, MEM, W), \ 1032 INSN_3(ST, MEM, DW), \ 1033 /* Load instructions. */ \ 1034 /* Register based. */ \ 1035 INSN_3(LDX, MEM, B), \ 1036 INSN_3(LDX, MEM, H), \ 1037 INSN_3(LDX, MEM, W), \ 1038 INSN_3(LDX, MEM, DW), \ 1039 /* Immediate based. */ \ 1040 INSN_3(LD, IMM, DW) 1041 1042 bool bpf_opcode_in_insntable(u8 code) 1043 { 1044 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1045 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1046 static const bool public_insntable[256] = { 1047 [0 ... 255] = false, 1048 /* Now overwrite non-defaults ... */ 1049 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1050 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1051 [BPF_LD | BPF_ABS | BPF_B] = true, 1052 [BPF_LD | BPF_ABS | BPF_H] = true, 1053 [BPF_LD | BPF_ABS | BPF_W] = true, 1054 [BPF_LD | BPF_IND | BPF_B] = true, 1055 [BPF_LD | BPF_IND | BPF_H] = true, 1056 [BPF_LD | BPF_IND | BPF_W] = true, 1057 }; 1058 #undef BPF_INSN_3_TBL 1059 #undef BPF_INSN_2_TBL 1060 return public_insntable[code]; 1061 } 1062 1063 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1064 /** 1065 * __bpf_prog_run - run eBPF program on a given context 1066 * @ctx: is the data we are operating on 1067 * @insn: is the array of eBPF instructions 1068 * 1069 * Decode and execute eBPF instructions. 1070 */ 1071 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) 1072 { 1073 u64 tmp; 1074 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1075 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1076 static const void *jumptable[256] = { 1077 [0 ... 255] = &&default_label, 1078 /* Now overwrite non-defaults ... */ 1079 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1080 /* Non-UAPI available opcodes. */ 1081 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1082 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1083 }; 1084 #undef BPF_INSN_3_LBL 1085 #undef BPF_INSN_2_LBL 1086 u32 tail_call_cnt = 0; 1087 1088 #define CONT ({ insn++; goto select_insn; }) 1089 #define CONT_JMP ({ insn++; goto select_insn; }) 1090 1091 select_insn: 1092 goto *jumptable[insn->code]; 1093 1094 /* ALU */ 1095 #define ALU(OPCODE, OP) \ 1096 ALU64_##OPCODE##_X: \ 1097 DST = DST OP SRC; \ 1098 CONT; \ 1099 ALU_##OPCODE##_X: \ 1100 DST = (u32) DST OP (u32) SRC; \ 1101 CONT; \ 1102 ALU64_##OPCODE##_K: \ 1103 DST = DST OP IMM; \ 1104 CONT; \ 1105 ALU_##OPCODE##_K: \ 1106 DST = (u32) DST OP (u32) IMM; \ 1107 CONT; 1108 1109 ALU(ADD, +) 1110 ALU(SUB, -) 1111 ALU(AND, &) 1112 ALU(OR, |) 1113 ALU(LSH, <<) 1114 ALU(RSH, >>) 1115 ALU(XOR, ^) 1116 ALU(MUL, *) 1117 #undef ALU 1118 ALU_NEG: 1119 DST = (u32) -DST; 1120 CONT; 1121 ALU64_NEG: 1122 DST = -DST; 1123 CONT; 1124 ALU_MOV_X: 1125 DST = (u32) SRC; 1126 CONT; 1127 ALU_MOV_K: 1128 DST = (u32) IMM; 1129 CONT; 1130 ALU64_MOV_X: 1131 DST = SRC; 1132 CONT; 1133 ALU64_MOV_K: 1134 DST = IMM; 1135 CONT; 1136 LD_IMM_DW: 1137 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1138 insn++; 1139 CONT; 1140 ALU64_ARSH_X: 1141 (*(s64 *) &DST) >>= SRC; 1142 CONT; 1143 ALU64_ARSH_K: 1144 (*(s64 *) &DST) >>= IMM; 1145 CONT; 1146 ALU64_MOD_X: 1147 div64_u64_rem(DST, SRC, &tmp); 1148 DST = tmp; 1149 CONT; 1150 ALU_MOD_X: 1151 tmp = (u32) DST; 1152 DST = do_div(tmp, (u32) SRC); 1153 CONT; 1154 ALU64_MOD_K: 1155 div64_u64_rem(DST, IMM, &tmp); 1156 DST = tmp; 1157 CONT; 1158 ALU_MOD_K: 1159 tmp = (u32) DST; 1160 DST = do_div(tmp, (u32) IMM); 1161 CONT; 1162 ALU64_DIV_X: 1163 DST = div64_u64(DST, SRC); 1164 CONT; 1165 ALU_DIV_X: 1166 tmp = (u32) DST; 1167 do_div(tmp, (u32) SRC); 1168 DST = (u32) tmp; 1169 CONT; 1170 ALU64_DIV_K: 1171 DST = div64_u64(DST, IMM); 1172 CONT; 1173 ALU_DIV_K: 1174 tmp = (u32) DST; 1175 do_div(tmp, (u32) IMM); 1176 DST = (u32) tmp; 1177 CONT; 1178 ALU_END_TO_BE: 1179 switch (IMM) { 1180 case 16: 1181 DST = (__force u16) cpu_to_be16(DST); 1182 break; 1183 case 32: 1184 DST = (__force u32) cpu_to_be32(DST); 1185 break; 1186 case 64: 1187 DST = (__force u64) cpu_to_be64(DST); 1188 break; 1189 } 1190 CONT; 1191 ALU_END_TO_LE: 1192 switch (IMM) { 1193 case 16: 1194 DST = (__force u16) cpu_to_le16(DST); 1195 break; 1196 case 32: 1197 DST = (__force u32) cpu_to_le32(DST); 1198 break; 1199 case 64: 1200 DST = (__force u64) cpu_to_le64(DST); 1201 break; 1202 } 1203 CONT; 1204 1205 /* CALL */ 1206 JMP_CALL: 1207 /* Function call scratches BPF_R1-BPF_R5 registers, 1208 * preserves BPF_R6-BPF_R9, and stores return value 1209 * into BPF_R0. 1210 */ 1211 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1212 BPF_R4, BPF_R5); 1213 CONT; 1214 1215 JMP_CALL_ARGS: 1216 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 1217 BPF_R3, BPF_R4, 1218 BPF_R5, 1219 insn + insn->off + 1); 1220 CONT; 1221 1222 JMP_TAIL_CALL: { 1223 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1224 struct bpf_array *array = container_of(map, struct bpf_array, map); 1225 struct bpf_prog *prog; 1226 u32 index = BPF_R3; 1227 1228 if (unlikely(index >= array->map.max_entries)) 1229 goto out; 1230 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) 1231 goto out; 1232 1233 tail_call_cnt++; 1234 1235 prog = READ_ONCE(array->ptrs[index]); 1236 if (!prog) 1237 goto out; 1238 1239 /* ARG1 at this point is guaranteed to point to CTX from 1240 * the verifier side due to the fact that the tail call is 1241 * handeled like a helper, that is, bpf_tail_call_proto, 1242 * where arg1_type is ARG_PTR_TO_CTX. 1243 */ 1244 insn = prog->insnsi; 1245 goto select_insn; 1246 out: 1247 CONT; 1248 } 1249 /* JMP */ 1250 JMP_JA: 1251 insn += insn->off; 1252 CONT; 1253 JMP_JEQ_X: 1254 if (DST == SRC) { 1255 insn += insn->off; 1256 CONT_JMP; 1257 } 1258 CONT; 1259 JMP_JEQ_K: 1260 if (DST == IMM) { 1261 insn += insn->off; 1262 CONT_JMP; 1263 } 1264 CONT; 1265 JMP_JNE_X: 1266 if (DST != SRC) { 1267 insn += insn->off; 1268 CONT_JMP; 1269 } 1270 CONT; 1271 JMP_JNE_K: 1272 if (DST != IMM) { 1273 insn += insn->off; 1274 CONT_JMP; 1275 } 1276 CONT; 1277 JMP_JGT_X: 1278 if (DST > SRC) { 1279 insn += insn->off; 1280 CONT_JMP; 1281 } 1282 CONT; 1283 JMP_JGT_K: 1284 if (DST > IMM) { 1285 insn += insn->off; 1286 CONT_JMP; 1287 } 1288 CONT; 1289 JMP_JLT_X: 1290 if (DST < SRC) { 1291 insn += insn->off; 1292 CONT_JMP; 1293 } 1294 CONT; 1295 JMP_JLT_K: 1296 if (DST < IMM) { 1297 insn += insn->off; 1298 CONT_JMP; 1299 } 1300 CONT; 1301 JMP_JGE_X: 1302 if (DST >= SRC) { 1303 insn += insn->off; 1304 CONT_JMP; 1305 } 1306 CONT; 1307 JMP_JGE_K: 1308 if (DST >= IMM) { 1309 insn += insn->off; 1310 CONT_JMP; 1311 } 1312 CONT; 1313 JMP_JLE_X: 1314 if (DST <= SRC) { 1315 insn += insn->off; 1316 CONT_JMP; 1317 } 1318 CONT; 1319 JMP_JLE_K: 1320 if (DST <= IMM) { 1321 insn += insn->off; 1322 CONT_JMP; 1323 } 1324 CONT; 1325 JMP_JSGT_X: 1326 if (((s64) DST) > ((s64) SRC)) { 1327 insn += insn->off; 1328 CONT_JMP; 1329 } 1330 CONT; 1331 JMP_JSGT_K: 1332 if (((s64) DST) > ((s64) IMM)) { 1333 insn += insn->off; 1334 CONT_JMP; 1335 } 1336 CONT; 1337 JMP_JSLT_X: 1338 if (((s64) DST) < ((s64) SRC)) { 1339 insn += insn->off; 1340 CONT_JMP; 1341 } 1342 CONT; 1343 JMP_JSLT_K: 1344 if (((s64) DST) < ((s64) IMM)) { 1345 insn += insn->off; 1346 CONT_JMP; 1347 } 1348 CONT; 1349 JMP_JSGE_X: 1350 if (((s64) DST) >= ((s64) SRC)) { 1351 insn += insn->off; 1352 CONT_JMP; 1353 } 1354 CONT; 1355 JMP_JSGE_K: 1356 if (((s64) DST) >= ((s64) IMM)) { 1357 insn += insn->off; 1358 CONT_JMP; 1359 } 1360 CONT; 1361 JMP_JSLE_X: 1362 if (((s64) DST) <= ((s64) SRC)) { 1363 insn += insn->off; 1364 CONT_JMP; 1365 } 1366 CONT; 1367 JMP_JSLE_K: 1368 if (((s64) DST) <= ((s64) IMM)) { 1369 insn += insn->off; 1370 CONT_JMP; 1371 } 1372 CONT; 1373 JMP_JSET_X: 1374 if (DST & SRC) { 1375 insn += insn->off; 1376 CONT_JMP; 1377 } 1378 CONT; 1379 JMP_JSET_K: 1380 if (DST & IMM) { 1381 insn += insn->off; 1382 CONT_JMP; 1383 } 1384 CONT; 1385 JMP_EXIT: 1386 return BPF_R0; 1387 1388 /* STX and ST and LDX*/ 1389 #define LDST(SIZEOP, SIZE) \ 1390 STX_MEM_##SIZEOP: \ 1391 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 1392 CONT; \ 1393 ST_MEM_##SIZEOP: \ 1394 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 1395 CONT; \ 1396 LDX_MEM_##SIZEOP: \ 1397 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1398 CONT; 1399 1400 LDST(B, u8) 1401 LDST(H, u16) 1402 LDST(W, u32) 1403 LDST(DW, u64) 1404 #undef LDST 1405 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ 1406 atomic_add((u32) SRC, (atomic_t *)(unsigned long) 1407 (DST + insn->off)); 1408 CONT; 1409 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ 1410 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) 1411 (DST + insn->off)); 1412 CONT; 1413 1414 default_label: 1415 /* If we ever reach this, we have a bug somewhere. Die hard here 1416 * instead of just returning 0; we could be somewhere in a subprog, 1417 * so execution could continue otherwise which we do /not/ want. 1418 * 1419 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 1420 */ 1421 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code); 1422 BUG_ON(1); 1423 return 0; 1424 } 1425 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */ 1426 1427 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 1428 #define DEFINE_BPF_PROG_RUN(stack_size) \ 1429 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 1430 { \ 1431 u64 stack[stack_size / sizeof(u64)]; \ 1432 u64 regs[MAX_BPF_REG]; \ 1433 \ 1434 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1435 ARG1 = (u64) (unsigned long) ctx; \ 1436 return ___bpf_prog_run(regs, insn, stack); \ 1437 } 1438 1439 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 1440 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 1441 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 1442 const struct bpf_insn *insn) \ 1443 { \ 1444 u64 stack[stack_size / sizeof(u64)]; \ 1445 u64 regs[MAX_BPF_REG]; \ 1446 \ 1447 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1448 BPF_R1 = r1; \ 1449 BPF_R2 = r2; \ 1450 BPF_R3 = r3; \ 1451 BPF_R4 = r4; \ 1452 BPF_R5 = r5; \ 1453 return ___bpf_prog_run(regs, insn, stack); \ 1454 } 1455 1456 #define EVAL1(FN, X) FN(X) 1457 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 1458 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 1459 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 1460 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 1461 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 1462 1463 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 1464 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 1465 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 1466 1467 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 1468 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 1469 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 1470 1471 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 1472 1473 static unsigned int (*interpreters[])(const void *ctx, 1474 const struct bpf_insn *insn) = { 1475 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1476 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1477 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1478 }; 1479 #undef PROG_NAME_LIST 1480 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 1481 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 1482 const struct bpf_insn *insn) = { 1483 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1484 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1485 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1486 }; 1487 #undef PROG_NAME_LIST 1488 1489 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 1490 { 1491 stack_depth = max_t(u32, stack_depth, 1); 1492 insn->off = (s16) insn->imm; 1493 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 1494 __bpf_call_base_args; 1495 insn->code = BPF_JMP | BPF_CALL_ARGS; 1496 } 1497 1498 #else 1499 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 1500 const struct bpf_insn *insn) 1501 { 1502 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 1503 * is not working properly, so warn about it! 1504 */ 1505 WARN_ON_ONCE(1); 1506 return 0; 1507 } 1508 #endif 1509 1510 bool bpf_prog_array_compatible(struct bpf_array *array, 1511 const struct bpf_prog *fp) 1512 { 1513 if (fp->kprobe_override) 1514 return false; 1515 1516 if (!array->owner_prog_type) { 1517 /* There's no owner yet where we could check for 1518 * compatibility. 1519 */ 1520 array->owner_prog_type = fp->type; 1521 array->owner_jited = fp->jited; 1522 1523 return true; 1524 } 1525 1526 return array->owner_prog_type == fp->type && 1527 array->owner_jited == fp->jited; 1528 } 1529 1530 static int bpf_check_tail_call(const struct bpf_prog *fp) 1531 { 1532 struct bpf_prog_aux *aux = fp->aux; 1533 int i; 1534 1535 for (i = 0; i < aux->used_map_cnt; i++) { 1536 struct bpf_map *map = aux->used_maps[i]; 1537 struct bpf_array *array; 1538 1539 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1540 continue; 1541 1542 array = container_of(map, struct bpf_array, map); 1543 if (!bpf_prog_array_compatible(array, fp)) 1544 return -EINVAL; 1545 } 1546 1547 return 0; 1548 } 1549 1550 static void bpf_prog_select_func(struct bpf_prog *fp) 1551 { 1552 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1553 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1554 1555 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1556 #else 1557 fp->bpf_func = __bpf_prog_ret0_warn; 1558 #endif 1559 } 1560 1561 /** 1562 * bpf_prog_select_runtime - select exec runtime for BPF program 1563 * @fp: bpf_prog populated with internal BPF program 1564 * @err: pointer to error variable 1565 * 1566 * Try to JIT eBPF program, if JIT is not available, use interpreter. 1567 * The BPF program will be executed via BPF_PROG_RUN() macro. 1568 */ 1569 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1570 { 1571 /* In case of BPF to BPF calls, verifier did all the prep 1572 * work with regards to JITing, etc. 1573 */ 1574 if (fp->bpf_func) 1575 goto finalize; 1576 1577 bpf_prog_select_func(fp); 1578 1579 /* eBPF JITs can rewrite the program in case constant 1580 * blinding is active. However, in case of error during 1581 * blinding, bpf_int_jit_compile() must always return a 1582 * valid program, which in this case would simply not 1583 * be JITed, but falls back to the interpreter. 1584 */ 1585 if (!bpf_prog_is_dev_bound(fp->aux)) { 1586 fp = bpf_int_jit_compile(fp); 1587 #ifdef CONFIG_BPF_JIT_ALWAYS_ON 1588 if (!fp->jited) { 1589 *err = -ENOTSUPP; 1590 return fp; 1591 } 1592 #endif 1593 } else { 1594 *err = bpf_prog_offload_compile(fp); 1595 if (*err) 1596 return fp; 1597 } 1598 1599 finalize: 1600 bpf_prog_lock_ro(fp); 1601 1602 /* The tail call compatibility check can only be done at 1603 * this late stage as we need to determine, if we deal 1604 * with JITed or non JITed program concatenations and not 1605 * all eBPF JITs might immediately support all features. 1606 */ 1607 *err = bpf_check_tail_call(fp); 1608 1609 return fp; 1610 } 1611 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1612 1613 static unsigned int __bpf_prog_ret1(const void *ctx, 1614 const struct bpf_insn *insn) 1615 { 1616 return 1; 1617 } 1618 1619 static struct bpf_prog_dummy { 1620 struct bpf_prog prog; 1621 } dummy_bpf_prog = { 1622 .prog = { 1623 .bpf_func = __bpf_prog_ret1, 1624 }, 1625 }; 1626 1627 /* to avoid allocating empty bpf_prog_array for cgroups that 1628 * don't have bpf program attached use one global 'empty_prog_array' 1629 * It will not be modified the caller of bpf_prog_array_alloc() 1630 * (since caller requested prog_cnt == 0) 1631 * that pointer should be 'freed' by bpf_prog_array_free() 1632 */ 1633 static struct { 1634 struct bpf_prog_array hdr; 1635 struct bpf_prog *null_prog; 1636 } empty_prog_array = { 1637 .null_prog = NULL, 1638 }; 1639 1640 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 1641 { 1642 if (prog_cnt) 1643 return kzalloc(sizeof(struct bpf_prog_array) + 1644 sizeof(struct bpf_prog_array_item) * 1645 (prog_cnt + 1), 1646 flags); 1647 1648 return &empty_prog_array.hdr; 1649 } 1650 1651 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs) 1652 { 1653 if (!progs || 1654 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr) 1655 return; 1656 kfree_rcu(progs, rcu); 1657 } 1658 1659 int bpf_prog_array_length(struct bpf_prog_array __rcu *array) 1660 { 1661 struct bpf_prog_array_item *item; 1662 u32 cnt = 0; 1663 1664 rcu_read_lock(); 1665 item = rcu_dereference(array)->items; 1666 for (; item->prog; item++) 1667 if (item->prog != &dummy_bpf_prog.prog) 1668 cnt++; 1669 rcu_read_unlock(); 1670 return cnt; 1671 } 1672 1673 1674 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array, 1675 u32 *prog_ids, 1676 u32 request_cnt) 1677 { 1678 struct bpf_prog_array_item *item; 1679 int i = 0; 1680 1681 item = rcu_dereference_check(array, 1)->items; 1682 for (; item->prog; item++) { 1683 if (item->prog == &dummy_bpf_prog.prog) 1684 continue; 1685 prog_ids[i] = item->prog->aux->id; 1686 if (++i == request_cnt) { 1687 item++; 1688 break; 1689 } 1690 } 1691 1692 return !!(item->prog); 1693 } 1694 1695 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array, 1696 __u32 __user *prog_ids, u32 cnt) 1697 { 1698 unsigned long err = 0; 1699 bool nospc; 1700 u32 *ids; 1701 1702 /* users of this function are doing: 1703 * cnt = bpf_prog_array_length(); 1704 * if (cnt > 0) 1705 * bpf_prog_array_copy_to_user(..., cnt); 1706 * so below kcalloc doesn't need extra cnt > 0 check, but 1707 * bpf_prog_array_length() releases rcu lock and 1708 * prog array could have been swapped with empty or larger array, 1709 * so always copy 'cnt' prog_ids to the user. 1710 * In a rare race the user will see zero prog_ids 1711 */ 1712 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 1713 if (!ids) 1714 return -ENOMEM; 1715 rcu_read_lock(); 1716 nospc = bpf_prog_array_copy_core(array, ids, cnt); 1717 rcu_read_unlock(); 1718 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 1719 kfree(ids); 1720 if (err) 1721 return -EFAULT; 1722 if (nospc) 1723 return -ENOSPC; 1724 return 0; 1725 } 1726 1727 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array, 1728 struct bpf_prog *old_prog) 1729 { 1730 struct bpf_prog_array_item *item = array->items; 1731 1732 for (; item->prog; item++) 1733 if (item->prog == old_prog) { 1734 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 1735 break; 1736 } 1737 } 1738 1739 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, 1740 struct bpf_prog *exclude_prog, 1741 struct bpf_prog *include_prog, 1742 struct bpf_prog_array **new_array) 1743 { 1744 int new_prog_cnt, carry_prog_cnt = 0; 1745 struct bpf_prog_array_item *existing; 1746 struct bpf_prog_array *array; 1747 bool found_exclude = false; 1748 int new_prog_idx = 0; 1749 1750 /* Figure out how many existing progs we need to carry over to 1751 * the new array. 1752 */ 1753 if (old_array) { 1754 existing = old_array->items; 1755 for (; existing->prog; existing++) { 1756 if (existing->prog == exclude_prog) { 1757 found_exclude = true; 1758 continue; 1759 } 1760 if (existing->prog != &dummy_bpf_prog.prog) 1761 carry_prog_cnt++; 1762 if (existing->prog == include_prog) 1763 return -EEXIST; 1764 } 1765 } 1766 1767 if (exclude_prog && !found_exclude) 1768 return -ENOENT; 1769 1770 /* How many progs (not NULL) will be in the new array? */ 1771 new_prog_cnt = carry_prog_cnt; 1772 if (include_prog) 1773 new_prog_cnt += 1; 1774 1775 /* Do we have any prog (not NULL) in the new array? */ 1776 if (!new_prog_cnt) { 1777 *new_array = NULL; 1778 return 0; 1779 } 1780 1781 /* +1 as the end of prog_array is marked with NULL */ 1782 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 1783 if (!array) 1784 return -ENOMEM; 1785 1786 /* Fill in the new prog array */ 1787 if (carry_prog_cnt) { 1788 existing = old_array->items; 1789 for (; existing->prog; existing++) 1790 if (existing->prog != exclude_prog && 1791 existing->prog != &dummy_bpf_prog.prog) { 1792 array->items[new_prog_idx++].prog = 1793 existing->prog; 1794 } 1795 } 1796 if (include_prog) 1797 array->items[new_prog_idx++].prog = include_prog; 1798 array->items[new_prog_idx].prog = NULL; 1799 *new_array = array; 1800 return 0; 1801 } 1802 1803 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, 1804 u32 *prog_ids, u32 request_cnt, 1805 u32 *prog_cnt) 1806 { 1807 u32 cnt = 0; 1808 1809 if (array) 1810 cnt = bpf_prog_array_length(array); 1811 1812 *prog_cnt = cnt; 1813 1814 /* return early if user requested only program count or nothing to copy */ 1815 if (!request_cnt || !cnt) 1816 return 0; 1817 1818 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 1819 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 1820 : 0; 1821 } 1822 1823 static void bpf_prog_free_deferred(struct work_struct *work) 1824 { 1825 struct bpf_prog_aux *aux; 1826 int i; 1827 1828 aux = container_of(work, struct bpf_prog_aux, work); 1829 if (bpf_prog_is_dev_bound(aux)) 1830 bpf_prog_offload_destroy(aux->prog); 1831 #ifdef CONFIG_PERF_EVENTS 1832 if (aux->prog->has_callchain_buf) 1833 put_callchain_buffers(); 1834 #endif 1835 for (i = 0; i < aux->func_cnt; i++) 1836 bpf_jit_free(aux->func[i]); 1837 if (aux->func_cnt) { 1838 kfree(aux->func); 1839 bpf_prog_unlock_free(aux->prog); 1840 } else { 1841 bpf_jit_free(aux->prog); 1842 } 1843 } 1844 1845 /* Free internal BPF program */ 1846 void bpf_prog_free(struct bpf_prog *fp) 1847 { 1848 struct bpf_prog_aux *aux = fp->aux; 1849 1850 INIT_WORK(&aux->work, bpf_prog_free_deferred); 1851 schedule_work(&aux->work); 1852 } 1853 EXPORT_SYMBOL_GPL(bpf_prog_free); 1854 1855 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 1856 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 1857 1858 void bpf_user_rnd_init_once(void) 1859 { 1860 prandom_init_once(&bpf_user_rnd_state); 1861 } 1862 1863 BPF_CALL_0(bpf_user_rnd_u32) 1864 { 1865 /* Should someone ever have the rather unwise idea to use some 1866 * of the registers passed into this function, then note that 1867 * this function is called from native eBPF and classic-to-eBPF 1868 * transformations. Register assignments from both sides are 1869 * different, f.e. classic always sets fn(ctx, A, X) here. 1870 */ 1871 struct rnd_state *state; 1872 u32 res; 1873 1874 state = &get_cpu_var(bpf_user_rnd_state); 1875 res = prandom_u32_state(state); 1876 put_cpu_var(bpf_user_rnd_state); 1877 1878 return res; 1879 } 1880 1881 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 1882 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 1883 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 1884 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 1885 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 1886 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 1887 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 1888 1889 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 1890 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 1891 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 1892 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 1893 1894 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 1895 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 1896 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 1897 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 1898 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 1899 1900 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 1901 { 1902 return NULL; 1903 } 1904 1905 u64 __weak 1906 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1907 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 1908 { 1909 return -ENOTSUPP; 1910 } 1911 EXPORT_SYMBOL_GPL(bpf_event_output); 1912 1913 /* Always built-in helper functions. */ 1914 const struct bpf_func_proto bpf_tail_call_proto = { 1915 .func = NULL, 1916 .gpl_only = false, 1917 .ret_type = RET_VOID, 1918 .arg1_type = ARG_PTR_TO_CTX, 1919 .arg2_type = ARG_CONST_MAP_PTR, 1920 .arg3_type = ARG_ANYTHING, 1921 }; 1922 1923 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 1924 * It is encouraged to implement bpf_int_jit_compile() instead, so that 1925 * eBPF and implicitly also cBPF can get JITed! 1926 */ 1927 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 1928 { 1929 return prog; 1930 } 1931 1932 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 1933 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 1934 */ 1935 void __weak bpf_jit_compile(struct bpf_prog *prog) 1936 { 1937 } 1938 1939 bool __weak bpf_helper_changes_pkt_data(void *func) 1940 { 1941 return false; 1942 } 1943 1944 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 1945 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 1946 */ 1947 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 1948 int len) 1949 { 1950 return -EFAULT; 1951 } 1952 1953 /* All definitions of tracepoints related to BPF. */ 1954 #define CREATE_TRACE_POINTS 1955 #include <linux/bpf_trace.h> 1956 1957 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 1958