1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <uapi/linux/btf.h> 21 #include <linux/filter.h> 22 #include <linux/skbuff.h> 23 #include <linux/vmalloc.h> 24 #include <linux/random.h> 25 #include <linux/moduleloader.h> 26 #include <linux/bpf.h> 27 #include <linux/btf.h> 28 #include <linux/objtool.h> 29 #include <linux/rbtree_latch.h> 30 #include <linux/kallsyms.h> 31 #include <linux/rcupdate.h> 32 #include <linux/perf_event.h> 33 #include <linux/extable.h> 34 #include <linux/log2.h> 35 #include <asm/unaligned.h> 36 37 /* Registers */ 38 #define BPF_R0 regs[BPF_REG_0] 39 #define BPF_R1 regs[BPF_REG_1] 40 #define BPF_R2 regs[BPF_REG_2] 41 #define BPF_R3 regs[BPF_REG_3] 42 #define BPF_R4 regs[BPF_REG_4] 43 #define BPF_R5 regs[BPF_REG_5] 44 #define BPF_R6 regs[BPF_REG_6] 45 #define BPF_R7 regs[BPF_REG_7] 46 #define BPF_R8 regs[BPF_REG_8] 47 #define BPF_R9 regs[BPF_REG_9] 48 #define BPF_R10 regs[BPF_REG_10] 49 50 /* Named registers */ 51 #define DST regs[insn->dst_reg] 52 #define SRC regs[insn->src_reg] 53 #define FP regs[BPF_REG_FP] 54 #define AX regs[BPF_REG_AX] 55 #define ARG1 regs[BPF_REG_ARG1] 56 #define CTX regs[BPF_REG_CTX] 57 #define IMM insn->imm 58 59 /* No hurry in this branch 60 * 61 * Exported for the bpf jit load helper. 62 */ 63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 64 { 65 u8 *ptr = NULL; 66 67 if (k >= SKF_NET_OFF) 68 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 69 else if (k >= SKF_LL_OFF) 70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 71 72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 73 return ptr; 74 75 return NULL; 76 } 77 78 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags) 79 { 80 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags; 81 struct bpf_prog_aux *aux; 82 struct bpf_prog *fp; 83 84 size = round_up(size, PAGE_SIZE); 85 fp = __vmalloc(size, gfp_flags); 86 if (fp == NULL) 87 return NULL; 88 89 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags); 90 if (aux == NULL) { 91 vfree(fp); 92 return NULL; 93 } 94 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags); 95 if (!fp->active) { 96 vfree(fp); 97 kfree(aux); 98 return NULL; 99 } 100 101 fp->pages = size / PAGE_SIZE; 102 fp->aux = aux; 103 fp->aux->prog = fp; 104 fp->jit_requested = ebpf_jit_enabled(); 105 106 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); 107 mutex_init(&fp->aux->used_maps_mutex); 108 mutex_init(&fp->aux->dst_mutex); 109 110 return fp; 111 } 112 113 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 114 { 115 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags; 116 struct bpf_prog *prog; 117 int cpu; 118 119 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags); 120 if (!prog) 121 return NULL; 122 123 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); 124 if (!prog->stats) { 125 free_percpu(prog->active); 126 kfree(prog->aux); 127 vfree(prog); 128 return NULL; 129 } 130 131 for_each_possible_cpu(cpu) { 132 struct bpf_prog_stats *pstats; 133 134 pstats = per_cpu_ptr(prog->stats, cpu); 135 u64_stats_init(&pstats->syncp); 136 } 137 return prog; 138 } 139 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 140 141 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) 142 { 143 if (!prog->aux->nr_linfo || !prog->jit_requested) 144 return 0; 145 146 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo, 147 sizeof(*prog->aux->jited_linfo), 148 GFP_KERNEL_ACCOUNT | __GFP_NOWARN); 149 if (!prog->aux->jited_linfo) 150 return -ENOMEM; 151 152 return 0; 153 } 154 155 void bpf_prog_free_jited_linfo(struct bpf_prog *prog) 156 { 157 kfree(prog->aux->jited_linfo); 158 prog->aux->jited_linfo = NULL; 159 } 160 161 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog) 162 { 163 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0]) 164 bpf_prog_free_jited_linfo(prog); 165 } 166 167 /* The jit engine is responsible to provide an array 168 * for insn_off to the jited_off mapping (insn_to_jit_off). 169 * 170 * The idx to this array is the insn_off. Hence, the insn_off 171 * here is relative to the prog itself instead of the main prog. 172 * This array has one entry for each xlated bpf insn. 173 * 174 * jited_off is the byte off to the last byte of the jited insn. 175 * 176 * Hence, with 177 * insn_start: 178 * The first bpf insn off of the prog. The insn off 179 * here is relative to the main prog. 180 * e.g. if prog is a subprog, insn_start > 0 181 * linfo_idx: 182 * The prog's idx to prog->aux->linfo and jited_linfo 183 * 184 * jited_linfo[linfo_idx] = prog->bpf_func 185 * 186 * For i > linfo_idx, 187 * 188 * jited_linfo[i] = prog->bpf_func + 189 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1] 190 */ 191 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, 192 const u32 *insn_to_jit_off) 193 { 194 u32 linfo_idx, insn_start, insn_end, nr_linfo, i; 195 const struct bpf_line_info *linfo; 196 void **jited_linfo; 197 198 if (!prog->aux->jited_linfo) 199 /* Userspace did not provide linfo */ 200 return; 201 202 linfo_idx = prog->aux->linfo_idx; 203 linfo = &prog->aux->linfo[linfo_idx]; 204 insn_start = linfo[0].insn_off; 205 insn_end = insn_start + prog->len; 206 207 jited_linfo = &prog->aux->jited_linfo[linfo_idx]; 208 jited_linfo[0] = prog->bpf_func; 209 210 nr_linfo = prog->aux->nr_linfo - linfo_idx; 211 212 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++) 213 /* The verifier ensures that linfo[i].insn_off is 214 * strictly increasing 215 */ 216 jited_linfo[i] = prog->bpf_func + 217 insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; 218 } 219 220 void bpf_prog_free_linfo(struct bpf_prog *prog) 221 { 222 bpf_prog_free_jited_linfo(prog); 223 kvfree(prog->aux->linfo); 224 } 225 226 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 227 gfp_t gfp_extra_flags) 228 { 229 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags; 230 struct bpf_prog *fp; 231 u32 pages; 232 233 size = round_up(size, PAGE_SIZE); 234 pages = size / PAGE_SIZE; 235 if (pages <= fp_old->pages) 236 return fp_old; 237 238 fp = __vmalloc(size, gfp_flags); 239 if (fp) { 240 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 241 fp->pages = pages; 242 fp->aux->prog = fp; 243 244 /* We keep fp->aux from fp_old around in the new 245 * reallocated structure. 246 */ 247 fp_old->aux = NULL; 248 fp_old->stats = NULL; 249 fp_old->active = NULL; 250 __bpf_prog_free(fp_old); 251 } 252 253 return fp; 254 } 255 256 void __bpf_prog_free(struct bpf_prog *fp) 257 { 258 if (fp->aux) { 259 mutex_destroy(&fp->aux->used_maps_mutex); 260 mutex_destroy(&fp->aux->dst_mutex); 261 kfree(fp->aux->poke_tab); 262 kfree(fp->aux); 263 } 264 free_percpu(fp->stats); 265 free_percpu(fp->active); 266 vfree(fp); 267 } 268 269 int bpf_prog_calc_tag(struct bpf_prog *fp) 270 { 271 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64); 272 u32 raw_size = bpf_prog_tag_scratch_size(fp); 273 u32 digest[SHA1_DIGEST_WORDS]; 274 u32 ws[SHA1_WORKSPACE_WORDS]; 275 u32 i, bsize, psize, blocks; 276 struct bpf_insn *dst; 277 bool was_ld_map; 278 u8 *raw, *todo; 279 __be32 *result; 280 __be64 *bits; 281 282 raw = vmalloc(raw_size); 283 if (!raw) 284 return -ENOMEM; 285 286 sha1_init(digest); 287 memset(ws, 0, sizeof(ws)); 288 289 /* We need to take out the map fd for the digest calculation 290 * since they are unstable from user space side. 291 */ 292 dst = (void *)raw; 293 for (i = 0, was_ld_map = false; i < fp->len; i++) { 294 dst[i] = fp->insnsi[i]; 295 if (!was_ld_map && 296 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 297 (dst[i].src_reg == BPF_PSEUDO_MAP_FD || 298 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { 299 was_ld_map = true; 300 dst[i].imm = 0; 301 } else if (was_ld_map && 302 dst[i].code == 0 && 303 dst[i].dst_reg == 0 && 304 dst[i].src_reg == 0 && 305 dst[i].off == 0) { 306 was_ld_map = false; 307 dst[i].imm = 0; 308 } else { 309 was_ld_map = false; 310 } 311 } 312 313 psize = bpf_prog_insn_size(fp); 314 memset(&raw[psize], 0, raw_size - psize); 315 raw[psize++] = 0x80; 316 317 bsize = round_up(psize, SHA1_BLOCK_SIZE); 318 blocks = bsize / SHA1_BLOCK_SIZE; 319 todo = raw; 320 if (bsize - psize >= sizeof(__be64)) { 321 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 322 } else { 323 bits = (__be64 *)(todo + bsize + bits_offset); 324 blocks++; 325 } 326 *bits = cpu_to_be64((psize - 1) << 3); 327 328 while (blocks--) { 329 sha1_transform(digest, todo, ws); 330 todo += SHA1_BLOCK_SIZE; 331 } 332 333 result = (__force __be32 *)digest; 334 for (i = 0; i < SHA1_DIGEST_WORDS; i++) 335 result[i] = cpu_to_be32(digest[i]); 336 memcpy(fp->tag, result, sizeof(fp->tag)); 337 338 vfree(raw); 339 return 0; 340 } 341 342 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, 343 s32 end_new, s32 curr, const bool probe_pass) 344 { 345 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 346 s32 delta = end_new - end_old; 347 s64 imm = insn->imm; 348 349 if (curr < pos && curr + imm + 1 >= end_old) 350 imm += delta; 351 else if (curr >= end_new && curr + imm + 1 < end_new) 352 imm -= delta; 353 if (imm < imm_min || imm > imm_max) 354 return -ERANGE; 355 if (!probe_pass) 356 insn->imm = imm; 357 return 0; 358 } 359 360 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, 361 s32 end_new, s32 curr, const bool probe_pass) 362 { 363 const s32 off_min = S16_MIN, off_max = S16_MAX; 364 s32 delta = end_new - end_old; 365 s32 off = insn->off; 366 367 if (curr < pos && curr + off + 1 >= end_old) 368 off += delta; 369 else if (curr >= end_new && curr + off + 1 < end_new) 370 off -= delta; 371 if (off < off_min || off > off_max) 372 return -ERANGE; 373 if (!probe_pass) 374 insn->off = off; 375 return 0; 376 } 377 378 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, 379 s32 end_new, const bool probe_pass) 380 { 381 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); 382 struct bpf_insn *insn = prog->insnsi; 383 int ret = 0; 384 385 for (i = 0; i < insn_cnt; i++, insn++) { 386 u8 code; 387 388 /* In the probing pass we still operate on the original, 389 * unpatched image in order to check overflows before we 390 * do any other adjustments. Therefore skip the patchlet. 391 */ 392 if (probe_pass && i == pos) { 393 i = end_new; 394 insn = prog->insnsi + end_old; 395 } 396 code = insn->code; 397 if ((BPF_CLASS(code) != BPF_JMP && 398 BPF_CLASS(code) != BPF_JMP32) || 399 BPF_OP(code) == BPF_EXIT) 400 continue; 401 /* Adjust offset of jmps if we cross patch boundaries. */ 402 if (BPF_OP(code) == BPF_CALL) { 403 if (insn->src_reg != BPF_PSEUDO_CALL) 404 continue; 405 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 406 end_new, i, probe_pass); 407 } else { 408 ret = bpf_adj_delta_to_off(insn, pos, end_old, 409 end_new, i, probe_pass); 410 } 411 if (ret) 412 break; 413 } 414 415 return ret; 416 } 417 418 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta) 419 { 420 struct bpf_line_info *linfo; 421 u32 i, nr_linfo; 422 423 nr_linfo = prog->aux->nr_linfo; 424 if (!nr_linfo || !delta) 425 return; 426 427 linfo = prog->aux->linfo; 428 429 for (i = 0; i < nr_linfo; i++) 430 if (off < linfo[i].insn_off) 431 break; 432 433 /* Push all off < linfo[i].insn_off by delta */ 434 for (; i < nr_linfo; i++) 435 linfo[i].insn_off += delta; 436 } 437 438 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 439 const struct bpf_insn *patch, u32 len) 440 { 441 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 442 const u32 cnt_max = S16_MAX; 443 struct bpf_prog *prog_adj; 444 int err; 445 446 /* Since our patchlet doesn't expand the image, we're done. */ 447 if (insn_delta == 0) { 448 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 449 return prog; 450 } 451 452 insn_adj_cnt = prog->len + insn_delta; 453 454 /* Reject anything that would potentially let the insn->off 455 * target overflow when we have excessive program expansions. 456 * We need to probe here before we do any reallocation where 457 * we afterwards may not fail anymore. 458 */ 459 if (insn_adj_cnt > cnt_max && 460 (err = bpf_adj_branches(prog, off, off + 1, off + len, true))) 461 return ERR_PTR(err); 462 463 /* Several new instructions need to be inserted. Make room 464 * for them. Likely, there's no need for a new allocation as 465 * last page could have large enough tailroom. 466 */ 467 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 468 GFP_USER); 469 if (!prog_adj) 470 return ERR_PTR(-ENOMEM); 471 472 prog_adj->len = insn_adj_cnt; 473 474 /* Patching happens in 3 steps: 475 * 476 * 1) Move over tail of insnsi from next instruction onwards, 477 * so we can patch the single target insn with one or more 478 * new ones (patching is always from 1 to n insns, n > 0). 479 * 2) Inject new instructions at the target location. 480 * 3) Adjust branch offsets if necessary. 481 */ 482 insn_rest = insn_adj_cnt - off - len; 483 484 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 485 sizeof(*patch) * insn_rest); 486 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 487 488 /* We are guaranteed to not fail at this point, otherwise 489 * the ship has sailed to reverse to the original state. An 490 * overflow cannot happen at this point. 491 */ 492 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false)); 493 494 bpf_adj_linfo(prog_adj, off, insn_delta); 495 496 return prog_adj; 497 } 498 499 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) 500 { 501 /* Branch offsets can't overflow when program is shrinking, no need 502 * to call bpf_adj_branches(..., true) here 503 */ 504 memmove(prog->insnsi + off, prog->insnsi + off + cnt, 505 sizeof(struct bpf_insn) * (prog->len - off - cnt)); 506 prog->len -= cnt; 507 508 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); 509 } 510 511 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 512 { 513 int i; 514 515 for (i = 0; i < fp->aux->func_cnt; i++) 516 bpf_prog_kallsyms_del(fp->aux->func[i]); 517 } 518 519 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 520 { 521 bpf_prog_kallsyms_del_subprogs(fp); 522 bpf_prog_kallsyms_del(fp); 523 } 524 525 #ifdef CONFIG_BPF_JIT 526 /* All BPF JIT sysctl knobs here. */ 527 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 528 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 529 int bpf_jit_harden __read_mostly; 530 long bpf_jit_limit __read_mostly; 531 532 static void 533 bpf_prog_ksym_set_addr(struct bpf_prog *prog) 534 { 535 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); 536 unsigned long addr = (unsigned long)hdr; 537 538 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 539 540 prog->aux->ksym.start = (unsigned long) prog->bpf_func; 541 prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE; 542 } 543 544 static void 545 bpf_prog_ksym_set_name(struct bpf_prog *prog) 546 { 547 char *sym = prog->aux->ksym.name; 548 const char *end = sym + KSYM_NAME_LEN; 549 const struct btf_type *type; 550 const char *func_name; 551 552 BUILD_BUG_ON(sizeof("bpf_prog_") + 553 sizeof(prog->tag) * 2 + 554 /* name has been null terminated. 555 * We should need +1 for the '_' preceding 556 * the name. However, the null character 557 * is double counted between the name and the 558 * sizeof("bpf_prog_") above, so we omit 559 * the +1 here. 560 */ 561 sizeof(prog->aux->name) > KSYM_NAME_LEN); 562 563 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 564 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 565 566 /* prog->aux->name will be ignored if full btf name is available */ 567 if (prog->aux->func_info_cnt) { 568 type = btf_type_by_id(prog->aux->btf, 569 prog->aux->func_info[prog->aux->func_idx].type_id); 570 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); 571 snprintf(sym, (size_t)(end - sym), "_%s", func_name); 572 return; 573 } 574 575 if (prog->aux->name[0]) 576 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 577 else 578 *sym = 0; 579 } 580 581 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n) 582 { 583 return container_of(n, struct bpf_ksym, tnode)->start; 584 } 585 586 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 587 struct latch_tree_node *b) 588 { 589 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b); 590 } 591 592 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 593 { 594 unsigned long val = (unsigned long)key; 595 const struct bpf_ksym *ksym; 596 597 ksym = container_of(n, struct bpf_ksym, tnode); 598 599 if (val < ksym->start) 600 return -1; 601 if (val >= ksym->end) 602 return 1; 603 604 return 0; 605 } 606 607 static const struct latch_tree_ops bpf_tree_ops = { 608 .less = bpf_tree_less, 609 .comp = bpf_tree_comp, 610 }; 611 612 static DEFINE_SPINLOCK(bpf_lock); 613 static LIST_HEAD(bpf_kallsyms); 614 static struct latch_tree_root bpf_tree __cacheline_aligned; 615 616 void bpf_ksym_add(struct bpf_ksym *ksym) 617 { 618 spin_lock_bh(&bpf_lock); 619 WARN_ON_ONCE(!list_empty(&ksym->lnode)); 620 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms); 621 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 622 spin_unlock_bh(&bpf_lock); 623 } 624 625 static void __bpf_ksym_del(struct bpf_ksym *ksym) 626 { 627 if (list_empty(&ksym->lnode)) 628 return; 629 630 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 631 list_del_rcu(&ksym->lnode); 632 } 633 634 void bpf_ksym_del(struct bpf_ksym *ksym) 635 { 636 spin_lock_bh(&bpf_lock); 637 __bpf_ksym_del(ksym); 638 spin_unlock_bh(&bpf_lock); 639 } 640 641 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 642 { 643 return fp->jited && !bpf_prog_was_classic(fp); 644 } 645 646 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) 647 { 648 return list_empty(&fp->aux->ksym.lnode) || 649 fp->aux->ksym.lnode.prev == LIST_POISON2; 650 } 651 652 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 653 { 654 if (!bpf_prog_kallsyms_candidate(fp) || 655 !bpf_capable()) 656 return; 657 658 bpf_prog_ksym_set_addr(fp); 659 bpf_prog_ksym_set_name(fp); 660 fp->aux->ksym.prog = true; 661 662 bpf_ksym_add(&fp->aux->ksym); 663 } 664 665 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 666 { 667 if (!bpf_prog_kallsyms_candidate(fp)) 668 return; 669 670 bpf_ksym_del(&fp->aux->ksym); 671 } 672 673 static struct bpf_ksym *bpf_ksym_find(unsigned long addr) 674 { 675 struct latch_tree_node *n; 676 677 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 678 return n ? container_of(n, struct bpf_ksym, tnode) : NULL; 679 } 680 681 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 682 unsigned long *off, char *sym) 683 { 684 struct bpf_ksym *ksym; 685 char *ret = NULL; 686 687 rcu_read_lock(); 688 ksym = bpf_ksym_find(addr); 689 if (ksym) { 690 unsigned long symbol_start = ksym->start; 691 unsigned long symbol_end = ksym->end; 692 693 strncpy(sym, ksym->name, KSYM_NAME_LEN); 694 695 ret = sym; 696 if (size) 697 *size = symbol_end - symbol_start; 698 if (off) 699 *off = addr - symbol_start; 700 } 701 rcu_read_unlock(); 702 703 return ret; 704 } 705 706 bool is_bpf_text_address(unsigned long addr) 707 { 708 bool ret; 709 710 rcu_read_lock(); 711 ret = bpf_ksym_find(addr) != NULL; 712 rcu_read_unlock(); 713 714 return ret; 715 } 716 717 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) 718 { 719 struct bpf_ksym *ksym = bpf_ksym_find(addr); 720 721 return ksym && ksym->prog ? 722 container_of(ksym, struct bpf_prog_aux, ksym)->prog : 723 NULL; 724 } 725 726 const struct exception_table_entry *search_bpf_extables(unsigned long addr) 727 { 728 const struct exception_table_entry *e = NULL; 729 struct bpf_prog *prog; 730 731 rcu_read_lock(); 732 prog = bpf_prog_ksym_find(addr); 733 if (!prog) 734 goto out; 735 if (!prog->aux->num_exentries) 736 goto out; 737 738 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); 739 out: 740 rcu_read_unlock(); 741 return e; 742 } 743 744 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 745 char *sym) 746 { 747 struct bpf_ksym *ksym; 748 unsigned int it = 0; 749 int ret = -ERANGE; 750 751 if (!bpf_jit_kallsyms_enabled()) 752 return ret; 753 754 rcu_read_lock(); 755 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) { 756 if (it++ != symnum) 757 continue; 758 759 strncpy(sym, ksym->name, KSYM_NAME_LEN); 760 761 *value = ksym->start; 762 *type = BPF_SYM_ELF_TYPE; 763 764 ret = 0; 765 break; 766 } 767 rcu_read_unlock(); 768 769 return ret; 770 } 771 772 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, 773 struct bpf_jit_poke_descriptor *poke) 774 { 775 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 776 static const u32 poke_tab_max = 1024; 777 u32 slot = prog->aux->size_poke_tab; 778 u32 size = slot + 1; 779 780 if (size > poke_tab_max) 781 return -ENOSPC; 782 if (poke->tailcall_target || poke->tailcall_target_stable || 783 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr) 784 return -EINVAL; 785 786 switch (poke->reason) { 787 case BPF_POKE_REASON_TAIL_CALL: 788 if (!poke->tail_call.map) 789 return -EINVAL; 790 break; 791 default: 792 return -EINVAL; 793 } 794 795 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL); 796 if (!tab) 797 return -ENOMEM; 798 799 memcpy(&tab[slot], poke, sizeof(*poke)); 800 prog->aux->size_poke_tab = size; 801 prog->aux->poke_tab = tab; 802 803 return slot; 804 } 805 806 static atomic_long_t bpf_jit_current; 807 808 /* Can be overridden by an arch's JIT compiler if it has a custom, 809 * dedicated BPF backend memory area, or if neither of the two 810 * below apply. 811 */ 812 u64 __weak bpf_jit_alloc_exec_limit(void) 813 { 814 #if defined(MODULES_VADDR) 815 return MODULES_END - MODULES_VADDR; 816 #else 817 return VMALLOC_END - VMALLOC_START; 818 #endif 819 } 820 821 static int __init bpf_jit_charge_init(void) 822 { 823 /* Only used as heuristic here to derive limit. */ 824 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, 825 PAGE_SIZE), LONG_MAX); 826 return 0; 827 } 828 pure_initcall(bpf_jit_charge_init); 829 830 static int bpf_jit_charge_modmem(u32 pages) 831 { 832 if (atomic_long_add_return(pages, &bpf_jit_current) > 833 (bpf_jit_limit >> PAGE_SHIFT)) { 834 if (!capable(CAP_SYS_ADMIN)) { 835 atomic_long_sub(pages, &bpf_jit_current); 836 return -EPERM; 837 } 838 } 839 840 return 0; 841 } 842 843 static void bpf_jit_uncharge_modmem(u32 pages) 844 { 845 atomic_long_sub(pages, &bpf_jit_current); 846 } 847 848 void *__weak bpf_jit_alloc_exec(unsigned long size) 849 { 850 return module_alloc(size); 851 } 852 853 void __weak bpf_jit_free_exec(void *addr) 854 { 855 module_memfree(addr); 856 } 857 858 struct bpf_binary_header * 859 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 860 unsigned int alignment, 861 bpf_jit_fill_hole_t bpf_fill_ill_insns) 862 { 863 struct bpf_binary_header *hdr; 864 u32 size, hole, start, pages; 865 866 WARN_ON_ONCE(!is_power_of_2(alignment) || 867 alignment > BPF_IMAGE_ALIGNMENT); 868 869 /* Most of BPF filters are really small, but if some of them 870 * fill a page, allow at least 128 extra bytes to insert a 871 * random section of illegal instructions. 872 */ 873 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 874 pages = size / PAGE_SIZE; 875 876 if (bpf_jit_charge_modmem(pages)) 877 return NULL; 878 hdr = bpf_jit_alloc_exec(size); 879 if (!hdr) { 880 bpf_jit_uncharge_modmem(pages); 881 return NULL; 882 } 883 884 /* Fill space with illegal/arch-dep instructions. */ 885 bpf_fill_ill_insns(hdr, size); 886 887 hdr->pages = pages; 888 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 889 PAGE_SIZE - sizeof(*hdr)); 890 start = (get_random_int() % hole) & ~(alignment - 1); 891 892 /* Leave a random number of instructions before BPF code. */ 893 *image_ptr = &hdr->image[start]; 894 895 return hdr; 896 } 897 898 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 899 { 900 u32 pages = hdr->pages; 901 902 bpf_jit_free_exec(hdr); 903 bpf_jit_uncharge_modmem(pages); 904 } 905 906 /* This symbol is only overridden by archs that have different 907 * requirements than the usual eBPF JITs, f.e. when they only 908 * implement cBPF JIT, do not set images read-only, etc. 909 */ 910 void __weak bpf_jit_free(struct bpf_prog *fp) 911 { 912 if (fp->jited) { 913 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 914 915 bpf_jit_binary_free(hdr); 916 917 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 918 } 919 920 bpf_prog_unlock_free(fp); 921 } 922 923 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 924 const struct bpf_insn *insn, bool extra_pass, 925 u64 *func_addr, bool *func_addr_fixed) 926 { 927 s16 off = insn->off; 928 s32 imm = insn->imm; 929 u8 *addr; 930 931 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 932 if (!*func_addr_fixed) { 933 /* Place-holder address till the last pass has collected 934 * all addresses for JITed subprograms in which case we 935 * can pick them up from prog->aux. 936 */ 937 if (!extra_pass) 938 addr = NULL; 939 else if (prog->aux->func && 940 off >= 0 && off < prog->aux->func_cnt) 941 addr = (u8 *)prog->aux->func[off]->bpf_func; 942 else 943 return -EINVAL; 944 } else { 945 /* Address of a BPF helper call. Since part of the core 946 * kernel, it's always at a fixed location. __bpf_call_base 947 * and the helper with imm relative to it are both in core 948 * kernel. 949 */ 950 addr = (u8 *)__bpf_call_base + imm; 951 } 952 953 *func_addr = (unsigned long)addr; 954 return 0; 955 } 956 957 static int bpf_jit_blind_insn(const struct bpf_insn *from, 958 const struct bpf_insn *aux, 959 struct bpf_insn *to_buff, 960 bool emit_zext) 961 { 962 struct bpf_insn *to = to_buff; 963 u32 imm_rnd = get_random_int(); 964 s16 off; 965 966 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 967 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 968 969 /* Constraints on AX register: 970 * 971 * AX register is inaccessible from user space. It is mapped in 972 * all JITs, and used here for constant blinding rewrites. It is 973 * typically "stateless" meaning its contents are only valid within 974 * the executed instruction, but not across several instructions. 975 * There are a few exceptions however which are further detailed 976 * below. 977 * 978 * Constant blinding is only used by JITs, not in the interpreter. 979 * The interpreter uses AX in some occasions as a local temporary 980 * register e.g. in DIV or MOD instructions. 981 * 982 * In restricted circumstances, the verifier can also use the AX 983 * register for rewrites as long as they do not interfere with 984 * the above cases! 985 */ 986 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) 987 goto out; 988 989 if (from->imm == 0 && 990 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 991 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 992 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 993 goto out; 994 } 995 996 switch (from->code) { 997 case BPF_ALU | BPF_ADD | BPF_K: 998 case BPF_ALU | BPF_SUB | BPF_K: 999 case BPF_ALU | BPF_AND | BPF_K: 1000 case BPF_ALU | BPF_OR | BPF_K: 1001 case BPF_ALU | BPF_XOR | BPF_K: 1002 case BPF_ALU | BPF_MUL | BPF_K: 1003 case BPF_ALU | BPF_MOV | BPF_K: 1004 case BPF_ALU | BPF_DIV | BPF_K: 1005 case BPF_ALU | BPF_MOD | BPF_K: 1006 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1007 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1008 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); 1009 break; 1010 1011 case BPF_ALU64 | BPF_ADD | BPF_K: 1012 case BPF_ALU64 | BPF_SUB | BPF_K: 1013 case BPF_ALU64 | BPF_AND | BPF_K: 1014 case BPF_ALU64 | BPF_OR | BPF_K: 1015 case BPF_ALU64 | BPF_XOR | BPF_K: 1016 case BPF_ALU64 | BPF_MUL | BPF_K: 1017 case BPF_ALU64 | BPF_MOV | BPF_K: 1018 case BPF_ALU64 | BPF_DIV | BPF_K: 1019 case BPF_ALU64 | BPF_MOD | BPF_K: 1020 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1021 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1022 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); 1023 break; 1024 1025 case BPF_JMP | BPF_JEQ | BPF_K: 1026 case BPF_JMP | BPF_JNE | BPF_K: 1027 case BPF_JMP | BPF_JGT | BPF_K: 1028 case BPF_JMP | BPF_JLT | BPF_K: 1029 case BPF_JMP | BPF_JGE | BPF_K: 1030 case BPF_JMP | BPF_JLE | BPF_K: 1031 case BPF_JMP | BPF_JSGT | BPF_K: 1032 case BPF_JMP | BPF_JSLT | BPF_K: 1033 case BPF_JMP | BPF_JSGE | BPF_K: 1034 case BPF_JMP | BPF_JSLE | BPF_K: 1035 case BPF_JMP | BPF_JSET | BPF_K: 1036 /* Accommodate for extra offset in case of a backjump. */ 1037 off = from->off; 1038 if (off < 0) 1039 off -= 2; 1040 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1041 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1042 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 1043 break; 1044 1045 case BPF_JMP32 | BPF_JEQ | BPF_K: 1046 case BPF_JMP32 | BPF_JNE | BPF_K: 1047 case BPF_JMP32 | BPF_JGT | BPF_K: 1048 case BPF_JMP32 | BPF_JLT | BPF_K: 1049 case BPF_JMP32 | BPF_JGE | BPF_K: 1050 case BPF_JMP32 | BPF_JLE | BPF_K: 1051 case BPF_JMP32 | BPF_JSGT | BPF_K: 1052 case BPF_JMP32 | BPF_JSLT | BPF_K: 1053 case BPF_JMP32 | BPF_JSGE | BPF_K: 1054 case BPF_JMP32 | BPF_JSLE | BPF_K: 1055 case BPF_JMP32 | BPF_JSET | BPF_K: 1056 /* Accommodate for extra offset in case of a backjump. */ 1057 off = from->off; 1058 if (off < 0) 1059 off -= 2; 1060 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1061 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1062 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, 1063 off); 1064 break; 1065 1066 case BPF_LD | BPF_IMM | BPF_DW: 1067 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 1068 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1069 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 1070 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 1071 break; 1072 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1073 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1074 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1075 if (emit_zext) 1076 *to++ = BPF_ZEXT_REG(BPF_REG_AX); 1077 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1078 break; 1079 1080 case BPF_ST | BPF_MEM | BPF_DW: 1081 case BPF_ST | BPF_MEM | BPF_W: 1082 case BPF_ST | BPF_MEM | BPF_H: 1083 case BPF_ST | BPF_MEM | BPF_B: 1084 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1085 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1086 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 1087 break; 1088 } 1089 out: 1090 return to - to_buff; 1091 } 1092 1093 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 1094 gfp_t gfp_extra_flags) 1095 { 1096 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 1097 struct bpf_prog *fp; 1098 1099 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); 1100 if (fp != NULL) { 1101 /* aux->prog still points to the fp_other one, so 1102 * when promoting the clone to the real program, 1103 * this still needs to be adapted. 1104 */ 1105 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 1106 } 1107 1108 return fp; 1109 } 1110 1111 static void bpf_prog_clone_free(struct bpf_prog *fp) 1112 { 1113 /* aux was stolen by the other clone, so we cannot free 1114 * it from this path! It will be freed eventually by the 1115 * other program on release. 1116 * 1117 * At this point, we don't need a deferred release since 1118 * clone is guaranteed to not be locked. 1119 */ 1120 fp->aux = NULL; 1121 __bpf_prog_free(fp); 1122 } 1123 1124 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 1125 { 1126 /* We have to repoint aux->prog to self, as we don't 1127 * know whether fp here is the clone or the original. 1128 */ 1129 fp->aux->prog = fp; 1130 bpf_prog_clone_free(fp_other); 1131 } 1132 1133 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 1134 { 1135 struct bpf_insn insn_buff[16], aux[2]; 1136 struct bpf_prog *clone, *tmp; 1137 int insn_delta, insn_cnt; 1138 struct bpf_insn *insn; 1139 int i, rewritten; 1140 1141 if (!bpf_jit_blinding_enabled(prog) || prog->blinded) 1142 return prog; 1143 1144 clone = bpf_prog_clone_create(prog, GFP_USER); 1145 if (!clone) 1146 return ERR_PTR(-ENOMEM); 1147 1148 insn_cnt = clone->len; 1149 insn = clone->insnsi; 1150 1151 for (i = 0; i < insn_cnt; i++, insn++) { 1152 /* We temporarily need to hold the original ld64 insn 1153 * so that we can still access the first part in the 1154 * second blinding run. 1155 */ 1156 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 1157 insn[1].code == 0) 1158 memcpy(aux, insn, sizeof(aux)); 1159 1160 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, 1161 clone->aux->verifier_zext); 1162 if (!rewritten) 1163 continue; 1164 1165 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 1166 if (IS_ERR(tmp)) { 1167 /* Patching may have repointed aux->prog during 1168 * realloc from the original one, so we need to 1169 * fix it up here on error. 1170 */ 1171 bpf_jit_prog_release_other(prog, clone); 1172 return tmp; 1173 } 1174 1175 clone = tmp; 1176 insn_delta = rewritten - 1; 1177 1178 /* Walk new program and skip insns we just inserted. */ 1179 insn = clone->insnsi + i + insn_delta; 1180 insn_cnt += insn_delta; 1181 i += insn_delta; 1182 } 1183 1184 clone->blinded = 1; 1185 return clone; 1186 } 1187 #endif /* CONFIG_BPF_JIT */ 1188 1189 /* Base function for offset calculation. Needs to go into .text section, 1190 * therefore keeping it non-static as well; will also be used by JITs 1191 * anyway later on, so do not let the compiler omit it. This also needs 1192 * to go into kallsyms for correlation from e.g. bpftool, so naming 1193 * must not change. 1194 */ 1195 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1196 { 1197 return 0; 1198 } 1199 EXPORT_SYMBOL_GPL(__bpf_call_base); 1200 1201 /* All UAPI available opcodes. */ 1202 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 1203 /* 32 bit ALU operations. */ \ 1204 /* Register based. */ \ 1205 INSN_3(ALU, ADD, X), \ 1206 INSN_3(ALU, SUB, X), \ 1207 INSN_3(ALU, AND, X), \ 1208 INSN_3(ALU, OR, X), \ 1209 INSN_3(ALU, LSH, X), \ 1210 INSN_3(ALU, RSH, X), \ 1211 INSN_3(ALU, XOR, X), \ 1212 INSN_3(ALU, MUL, X), \ 1213 INSN_3(ALU, MOV, X), \ 1214 INSN_3(ALU, ARSH, X), \ 1215 INSN_3(ALU, DIV, X), \ 1216 INSN_3(ALU, MOD, X), \ 1217 INSN_2(ALU, NEG), \ 1218 INSN_3(ALU, END, TO_BE), \ 1219 INSN_3(ALU, END, TO_LE), \ 1220 /* Immediate based. */ \ 1221 INSN_3(ALU, ADD, K), \ 1222 INSN_3(ALU, SUB, K), \ 1223 INSN_3(ALU, AND, K), \ 1224 INSN_3(ALU, OR, K), \ 1225 INSN_3(ALU, LSH, K), \ 1226 INSN_3(ALU, RSH, K), \ 1227 INSN_3(ALU, XOR, K), \ 1228 INSN_3(ALU, MUL, K), \ 1229 INSN_3(ALU, MOV, K), \ 1230 INSN_3(ALU, ARSH, K), \ 1231 INSN_3(ALU, DIV, K), \ 1232 INSN_3(ALU, MOD, K), \ 1233 /* 64 bit ALU operations. */ \ 1234 /* Register based. */ \ 1235 INSN_3(ALU64, ADD, X), \ 1236 INSN_3(ALU64, SUB, X), \ 1237 INSN_3(ALU64, AND, X), \ 1238 INSN_3(ALU64, OR, X), \ 1239 INSN_3(ALU64, LSH, X), \ 1240 INSN_3(ALU64, RSH, X), \ 1241 INSN_3(ALU64, XOR, X), \ 1242 INSN_3(ALU64, MUL, X), \ 1243 INSN_3(ALU64, MOV, X), \ 1244 INSN_3(ALU64, ARSH, X), \ 1245 INSN_3(ALU64, DIV, X), \ 1246 INSN_3(ALU64, MOD, X), \ 1247 INSN_2(ALU64, NEG), \ 1248 /* Immediate based. */ \ 1249 INSN_3(ALU64, ADD, K), \ 1250 INSN_3(ALU64, SUB, K), \ 1251 INSN_3(ALU64, AND, K), \ 1252 INSN_3(ALU64, OR, K), \ 1253 INSN_3(ALU64, LSH, K), \ 1254 INSN_3(ALU64, RSH, K), \ 1255 INSN_3(ALU64, XOR, K), \ 1256 INSN_3(ALU64, MUL, K), \ 1257 INSN_3(ALU64, MOV, K), \ 1258 INSN_3(ALU64, ARSH, K), \ 1259 INSN_3(ALU64, DIV, K), \ 1260 INSN_3(ALU64, MOD, K), \ 1261 /* Call instruction. */ \ 1262 INSN_2(JMP, CALL), \ 1263 /* Exit instruction. */ \ 1264 INSN_2(JMP, EXIT), \ 1265 /* 32-bit Jump instructions. */ \ 1266 /* Register based. */ \ 1267 INSN_3(JMP32, JEQ, X), \ 1268 INSN_3(JMP32, JNE, X), \ 1269 INSN_3(JMP32, JGT, X), \ 1270 INSN_3(JMP32, JLT, X), \ 1271 INSN_3(JMP32, JGE, X), \ 1272 INSN_3(JMP32, JLE, X), \ 1273 INSN_3(JMP32, JSGT, X), \ 1274 INSN_3(JMP32, JSLT, X), \ 1275 INSN_3(JMP32, JSGE, X), \ 1276 INSN_3(JMP32, JSLE, X), \ 1277 INSN_3(JMP32, JSET, X), \ 1278 /* Immediate based. */ \ 1279 INSN_3(JMP32, JEQ, K), \ 1280 INSN_3(JMP32, JNE, K), \ 1281 INSN_3(JMP32, JGT, K), \ 1282 INSN_3(JMP32, JLT, K), \ 1283 INSN_3(JMP32, JGE, K), \ 1284 INSN_3(JMP32, JLE, K), \ 1285 INSN_3(JMP32, JSGT, K), \ 1286 INSN_3(JMP32, JSLT, K), \ 1287 INSN_3(JMP32, JSGE, K), \ 1288 INSN_3(JMP32, JSLE, K), \ 1289 INSN_3(JMP32, JSET, K), \ 1290 /* Jump instructions. */ \ 1291 /* Register based. */ \ 1292 INSN_3(JMP, JEQ, X), \ 1293 INSN_3(JMP, JNE, X), \ 1294 INSN_3(JMP, JGT, X), \ 1295 INSN_3(JMP, JLT, X), \ 1296 INSN_3(JMP, JGE, X), \ 1297 INSN_3(JMP, JLE, X), \ 1298 INSN_3(JMP, JSGT, X), \ 1299 INSN_3(JMP, JSLT, X), \ 1300 INSN_3(JMP, JSGE, X), \ 1301 INSN_3(JMP, JSLE, X), \ 1302 INSN_3(JMP, JSET, X), \ 1303 /* Immediate based. */ \ 1304 INSN_3(JMP, JEQ, K), \ 1305 INSN_3(JMP, JNE, K), \ 1306 INSN_3(JMP, JGT, K), \ 1307 INSN_3(JMP, JLT, K), \ 1308 INSN_3(JMP, JGE, K), \ 1309 INSN_3(JMP, JLE, K), \ 1310 INSN_3(JMP, JSGT, K), \ 1311 INSN_3(JMP, JSLT, K), \ 1312 INSN_3(JMP, JSGE, K), \ 1313 INSN_3(JMP, JSLE, K), \ 1314 INSN_3(JMP, JSET, K), \ 1315 INSN_2(JMP, JA), \ 1316 /* Store instructions. */ \ 1317 /* Register based. */ \ 1318 INSN_3(STX, MEM, B), \ 1319 INSN_3(STX, MEM, H), \ 1320 INSN_3(STX, MEM, W), \ 1321 INSN_3(STX, MEM, DW), \ 1322 INSN_3(STX, ATOMIC, W), \ 1323 INSN_3(STX, ATOMIC, DW), \ 1324 /* Immediate based. */ \ 1325 INSN_3(ST, MEM, B), \ 1326 INSN_3(ST, MEM, H), \ 1327 INSN_3(ST, MEM, W), \ 1328 INSN_3(ST, MEM, DW), \ 1329 /* Load instructions. */ \ 1330 /* Register based. */ \ 1331 INSN_3(LDX, MEM, B), \ 1332 INSN_3(LDX, MEM, H), \ 1333 INSN_3(LDX, MEM, W), \ 1334 INSN_3(LDX, MEM, DW), \ 1335 /* Immediate based. */ \ 1336 INSN_3(LD, IMM, DW) 1337 1338 bool bpf_opcode_in_insntable(u8 code) 1339 { 1340 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1341 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1342 static const bool public_insntable[256] = { 1343 [0 ... 255] = false, 1344 /* Now overwrite non-defaults ... */ 1345 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1346 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1347 [BPF_LD | BPF_ABS | BPF_B] = true, 1348 [BPF_LD | BPF_ABS | BPF_H] = true, 1349 [BPF_LD | BPF_ABS | BPF_W] = true, 1350 [BPF_LD | BPF_IND | BPF_B] = true, 1351 [BPF_LD | BPF_IND | BPF_H] = true, 1352 [BPF_LD | BPF_IND | BPF_W] = true, 1353 }; 1354 #undef BPF_INSN_3_TBL 1355 #undef BPF_INSN_2_TBL 1356 return public_insntable[code]; 1357 } 1358 1359 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1360 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 1361 { 1362 memset(dst, 0, size); 1363 return -EFAULT; 1364 } 1365 1366 /** 1367 * __bpf_prog_run - run eBPF program on a given context 1368 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers 1369 * @insn: is the array of eBPF instructions 1370 * @stack: is the eBPF storage stack 1371 * 1372 * Decode and execute eBPF instructions. 1373 */ 1374 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) 1375 { 1376 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1377 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1378 static const void * const jumptable[256] __annotate_jump_table = { 1379 [0 ... 255] = &&default_label, 1380 /* Now overwrite non-defaults ... */ 1381 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1382 /* Non-UAPI available opcodes. */ 1383 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1384 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1385 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B, 1386 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H, 1387 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W, 1388 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW, 1389 }; 1390 #undef BPF_INSN_3_LBL 1391 #undef BPF_INSN_2_LBL 1392 u32 tail_call_cnt = 0; 1393 1394 #define CONT ({ insn++; goto select_insn; }) 1395 #define CONT_JMP ({ insn++; goto select_insn; }) 1396 1397 select_insn: 1398 goto *jumptable[insn->code]; 1399 1400 /* ALU */ 1401 #define ALU(OPCODE, OP) \ 1402 ALU64_##OPCODE##_X: \ 1403 DST = DST OP SRC; \ 1404 CONT; \ 1405 ALU_##OPCODE##_X: \ 1406 DST = (u32) DST OP (u32) SRC; \ 1407 CONT; \ 1408 ALU64_##OPCODE##_K: \ 1409 DST = DST OP IMM; \ 1410 CONT; \ 1411 ALU_##OPCODE##_K: \ 1412 DST = (u32) DST OP (u32) IMM; \ 1413 CONT; 1414 1415 ALU(ADD, +) 1416 ALU(SUB, -) 1417 ALU(AND, &) 1418 ALU(OR, |) 1419 ALU(LSH, <<) 1420 ALU(RSH, >>) 1421 ALU(XOR, ^) 1422 ALU(MUL, *) 1423 #undef ALU 1424 ALU_NEG: 1425 DST = (u32) -DST; 1426 CONT; 1427 ALU64_NEG: 1428 DST = -DST; 1429 CONT; 1430 ALU_MOV_X: 1431 DST = (u32) SRC; 1432 CONT; 1433 ALU_MOV_K: 1434 DST = (u32) IMM; 1435 CONT; 1436 ALU64_MOV_X: 1437 DST = SRC; 1438 CONT; 1439 ALU64_MOV_K: 1440 DST = IMM; 1441 CONT; 1442 LD_IMM_DW: 1443 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1444 insn++; 1445 CONT; 1446 ALU_ARSH_X: 1447 DST = (u64) (u32) (((s32) DST) >> SRC); 1448 CONT; 1449 ALU_ARSH_K: 1450 DST = (u64) (u32) (((s32) DST) >> IMM); 1451 CONT; 1452 ALU64_ARSH_X: 1453 (*(s64 *) &DST) >>= SRC; 1454 CONT; 1455 ALU64_ARSH_K: 1456 (*(s64 *) &DST) >>= IMM; 1457 CONT; 1458 ALU64_MOD_X: 1459 div64_u64_rem(DST, SRC, &AX); 1460 DST = AX; 1461 CONT; 1462 ALU_MOD_X: 1463 AX = (u32) DST; 1464 DST = do_div(AX, (u32) SRC); 1465 CONT; 1466 ALU64_MOD_K: 1467 div64_u64_rem(DST, IMM, &AX); 1468 DST = AX; 1469 CONT; 1470 ALU_MOD_K: 1471 AX = (u32) DST; 1472 DST = do_div(AX, (u32) IMM); 1473 CONT; 1474 ALU64_DIV_X: 1475 DST = div64_u64(DST, SRC); 1476 CONT; 1477 ALU_DIV_X: 1478 AX = (u32) DST; 1479 do_div(AX, (u32) SRC); 1480 DST = (u32) AX; 1481 CONT; 1482 ALU64_DIV_K: 1483 DST = div64_u64(DST, IMM); 1484 CONT; 1485 ALU_DIV_K: 1486 AX = (u32) DST; 1487 do_div(AX, (u32) IMM); 1488 DST = (u32) AX; 1489 CONT; 1490 ALU_END_TO_BE: 1491 switch (IMM) { 1492 case 16: 1493 DST = (__force u16) cpu_to_be16(DST); 1494 break; 1495 case 32: 1496 DST = (__force u32) cpu_to_be32(DST); 1497 break; 1498 case 64: 1499 DST = (__force u64) cpu_to_be64(DST); 1500 break; 1501 } 1502 CONT; 1503 ALU_END_TO_LE: 1504 switch (IMM) { 1505 case 16: 1506 DST = (__force u16) cpu_to_le16(DST); 1507 break; 1508 case 32: 1509 DST = (__force u32) cpu_to_le32(DST); 1510 break; 1511 case 64: 1512 DST = (__force u64) cpu_to_le64(DST); 1513 break; 1514 } 1515 CONT; 1516 1517 /* CALL */ 1518 JMP_CALL: 1519 /* Function call scratches BPF_R1-BPF_R5 registers, 1520 * preserves BPF_R6-BPF_R9, and stores return value 1521 * into BPF_R0. 1522 */ 1523 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1524 BPF_R4, BPF_R5); 1525 CONT; 1526 1527 JMP_CALL_ARGS: 1528 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 1529 BPF_R3, BPF_R4, 1530 BPF_R5, 1531 insn + insn->off + 1); 1532 CONT; 1533 1534 JMP_TAIL_CALL: { 1535 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1536 struct bpf_array *array = container_of(map, struct bpf_array, map); 1537 struct bpf_prog *prog; 1538 u32 index = BPF_R3; 1539 1540 if (unlikely(index >= array->map.max_entries)) 1541 goto out; 1542 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) 1543 goto out; 1544 1545 tail_call_cnt++; 1546 1547 prog = READ_ONCE(array->ptrs[index]); 1548 if (!prog) 1549 goto out; 1550 1551 /* ARG1 at this point is guaranteed to point to CTX from 1552 * the verifier side due to the fact that the tail call is 1553 * handled like a helper, that is, bpf_tail_call_proto, 1554 * where arg1_type is ARG_PTR_TO_CTX. 1555 */ 1556 insn = prog->insnsi; 1557 goto select_insn; 1558 out: 1559 CONT; 1560 } 1561 JMP_JA: 1562 insn += insn->off; 1563 CONT; 1564 JMP_EXIT: 1565 return BPF_R0; 1566 /* JMP */ 1567 #define COND_JMP(SIGN, OPCODE, CMP_OP) \ 1568 JMP_##OPCODE##_X: \ 1569 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ 1570 insn += insn->off; \ 1571 CONT_JMP; \ 1572 } \ 1573 CONT; \ 1574 JMP32_##OPCODE##_X: \ 1575 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ 1576 insn += insn->off; \ 1577 CONT_JMP; \ 1578 } \ 1579 CONT; \ 1580 JMP_##OPCODE##_K: \ 1581 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ 1582 insn += insn->off; \ 1583 CONT_JMP; \ 1584 } \ 1585 CONT; \ 1586 JMP32_##OPCODE##_K: \ 1587 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ 1588 insn += insn->off; \ 1589 CONT_JMP; \ 1590 } \ 1591 CONT; 1592 COND_JMP(u, JEQ, ==) 1593 COND_JMP(u, JNE, !=) 1594 COND_JMP(u, JGT, >) 1595 COND_JMP(u, JLT, <) 1596 COND_JMP(u, JGE, >=) 1597 COND_JMP(u, JLE, <=) 1598 COND_JMP(u, JSET, &) 1599 COND_JMP(s, JSGT, >) 1600 COND_JMP(s, JSLT, <) 1601 COND_JMP(s, JSGE, >=) 1602 COND_JMP(s, JSLE, <=) 1603 #undef COND_JMP 1604 /* STX and ST and LDX*/ 1605 #define LDST(SIZEOP, SIZE) \ 1606 STX_MEM_##SIZEOP: \ 1607 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 1608 CONT; \ 1609 ST_MEM_##SIZEOP: \ 1610 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 1611 CONT; \ 1612 LDX_MEM_##SIZEOP: \ 1613 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1614 CONT; 1615 1616 LDST(B, u8) 1617 LDST(H, u16) 1618 LDST(W, u32) 1619 LDST(DW, u64) 1620 #undef LDST 1621 #define LDX_PROBE(SIZEOP, SIZE) \ 1622 LDX_PROBE_MEM_##SIZEOP: \ 1623 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \ 1624 CONT; 1625 LDX_PROBE(B, 1) 1626 LDX_PROBE(H, 2) 1627 LDX_PROBE(W, 4) 1628 LDX_PROBE(DW, 8) 1629 #undef LDX_PROBE 1630 1631 #define ATOMIC_ALU_OP(BOP, KOP) \ 1632 case BOP: \ 1633 if (BPF_SIZE(insn->code) == BPF_W) \ 1634 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \ 1635 (DST + insn->off)); \ 1636 else \ 1637 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \ 1638 (DST + insn->off)); \ 1639 break; \ 1640 case BOP | BPF_FETCH: \ 1641 if (BPF_SIZE(insn->code) == BPF_W) \ 1642 SRC = (u32) atomic_fetch_##KOP( \ 1643 (u32) SRC, \ 1644 (atomic_t *)(unsigned long) (DST + insn->off)); \ 1645 else \ 1646 SRC = (u64) atomic64_fetch_##KOP( \ 1647 (u64) SRC, \ 1648 (atomic64_t *)(unsigned long) (DST + insn->off)); \ 1649 break; 1650 1651 STX_ATOMIC_DW: 1652 STX_ATOMIC_W: 1653 switch (IMM) { 1654 ATOMIC_ALU_OP(BPF_ADD, add) 1655 ATOMIC_ALU_OP(BPF_AND, and) 1656 ATOMIC_ALU_OP(BPF_OR, or) 1657 ATOMIC_ALU_OP(BPF_XOR, xor) 1658 #undef ATOMIC_ALU_OP 1659 1660 case BPF_XCHG: 1661 if (BPF_SIZE(insn->code) == BPF_W) 1662 SRC = (u32) atomic_xchg( 1663 (atomic_t *)(unsigned long) (DST + insn->off), 1664 (u32) SRC); 1665 else 1666 SRC = (u64) atomic64_xchg( 1667 (atomic64_t *)(unsigned long) (DST + insn->off), 1668 (u64) SRC); 1669 break; 1670 case BPF_CMPXCHG: 1671 if (BPF_SIZE(insn->code) == BPF_W) 1672 BPF_R0 = (u32) atomic_cmpxchg( 1673 (atomic_t *)(unsigned long) (DST + insn->off), 1674 (u32) BPF_R0, (u32) SRC); 1675 else 1676 BPF_R0 = (u64) atomic64_cmpxchg( 1677 (atomic64_t *)(unsigned long) (DST + insn->off), 1678 (u64) BPF_R0, (u64) SRC); 1679 break; 1680 1681 default: 1682 goto default_label; 1683 } 1684 CONT; 1685 1686 default_label: 1687 /* If we ever reach this, we have a bug somewhere. Die hard here 1688 * instead of just returning 0; we could be somewhere in a subprog, 1689 * so execution could continue otherwise which we do /not/ want. 1690 * 1691 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 1692 */ 1693 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n", 1694 insn->code, insn->imm); 1695 BUG_ON(1); 1696 return 0; 1697 } 1698 1699 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 1700 #define DEFINE_BPF_PROG_RUN(stack_size) \ 1701 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 1702 { \ 1703 u64 stack[stack_size / sizeof(u64)]; \ 1704 u64 regs[MAX_BPF_EXT_REG]; \ 1705 \ 1706 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1707 ARG1 = (u64) (unsigned long) ctx; \ 1708 return ___bpf_prog_run(regs, insn, stack); \ 1709 } 1710 1711 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 1712 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 1713 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 1714 const struct bpf_insn *insn) \ 1715 { \ 1716 u64 stack[stack_size / sizeof(u64)]; \ 1717 u64 regs[MAX_BPF_EXT_REG]; \ 1718 \ 1719 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1720 BPF_R1 = r1; \ 1721 BPF_R2 = r2; \ 1722 BPF_R3 = r3; \ 1723 BPF_R4 = r4; \ 1724 BPF_R5 = r5; \ 1725 return ___bpf_prog_run(regs, insn, stack); \ 1726 } 1727 1728 #define EVAL1(FN, X) FN(X) 1729 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 1730 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 1731 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 1732 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 1733 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 1734 1735 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 1736 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 1737 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 1738 1739 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 1740 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 1741 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 1742 1743 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 1744 1745 static unsigned int (*interpreters[])(const void *ctx, 1746 const struct bpf_insn *insn) = { 1747 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1748 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1749 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1750 }; 1751 #undef PROG_NAME_LIST 1752 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 1753 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 1754 const struct bpf_insn *insn) = { 1755 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1756 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1757 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1758 }; 1759 #undef PROG_NAME_LIST 1760 1761 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 1762 { 1763 stack_depth = max_t(u32, stack_depth, 1); 1764 insn->off = (s16) insn->imm; 1765 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 1766 __bpf_call_base_args; 1767 insn->code = BPF_JMP | BPF_CALL_ARGS; 1768 } 1769 1770 #else 1771 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 1772 const struct bpf_insn *insn) 1773 { 1774 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 1775 * is not working properly, so warn about it! 1776 */ 1777 WARN_ON_ONCE(1); 1778 return 0; 1779 } 1780 #endif 1781 1782 bool bpf_prog_array_compatible(struct bpf_array *array, 1783 const struct bpf_prog *fp) 1784 { 1785 if (fp->kprobe_override) 1786 return false; 1787 1788 if (!array->aux->type) { 1789 /* There's no owner yet where we could check for 1790 * compatibility. 1791 */ 1792 array->aux->type = fp->type; 1793 array->aux->jited = fp->jited; 1794 return true; 1795 } 1796 1797 return array->aux->type == fp->type && 1798 array->aux->jited == fp->jited; 1799 } 1800 1801 static int bpf_check_tail_call(const struct bpf_prog *fp) 1802 { 1803 struct bpf_prog_aux *aux = fp->aux; 1804 int i, ret = 0; 1805 1806 mutex_lock(&aux->used_maps_mutex); 1807 for (i = 0; i < aux->used_map_cnt; i++) { 1808 struct bpf_map *map = aux->used_maps[i]; 1809 struct bpf_array *array; 1810 1811 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1812 continue; 1813 1814 array = container_of(map, struct bpf_array, map); 1815 if (!bpf_prog_array_compatible(array, fp)) { 1816 ret = -EINVAL; 1817 goto out; 1818 } 1819 } 1820 1821 out: 1822 mutex_unlock(&aux->used_maps_mutex); 1823 return ret; 1824 } 1825 1826 static void bpf_prog_select_func(struct bpf_prog *fp) 1827 { 1828 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1829 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1830 1831 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1832 #else 1833 fp->bpf_func = __bpf_prog_ret0_warn; 1834 #endif 1835 } 1836 1837 /** 1838 * bpf_prog_select_runtime - select exec runtime for BPF program 1839 * @fp: bpf_prog populated with internal BPF program 1840 * @err: pointer to error variable 1841 * 1842 * Try to JIT eBPF program, if JIT is not available, use interpreter. 1843 * The BPF program will be executed via BPF_PROG_RUN() macro. 1844 */ 1845 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1846 { 1847 /* In case of BPF to BPF calls, verifier did all the prep 1848 * work with regards to JITing, etc. 1849 */ 1850 if (fp->bpf_func) 1851 goto finalize; 1852 1853 bpf_prog_select_func(fp); 1854 1855 /* eBPF JITs can rewrite the program in case constant 1856 * blinding is active. However, in case of error during 1857 * blinding, bpf_int_jit_compile() must always return a 1858 * valid program, which in this case would simply not 1859 * be JITed, but falls back to the interpreter. 1860 */ 1861 if (!bpf_prog_is_dev_bound(fp->aux)) { 1862 *err = bpf_prog_alloc_jited_linfo(fp); 1863 if (*err) 1864 return fp; 1865 1866 fp = bpf_int_jit_compile(fp); 1867 if (!fp->jited) { 1868 bpf_prog_free_jited_linfo(fp); 1869 #ifdef CONFIG_BPF_JIT_ALWAYS_ON 1870 *err = -ENOTSUPP; 1871 return fp; 1872 #endif 1873 } else { 1874 bpf_prog_free_unused_jited_linfo(fp); 1875 } 1876 } else { 1877 *err = bpf_prog_offload_compile(fp); 1878 if (*err) 1879 return fp; 1880 } 1881 1882 finalize: 1883 bpf_prog_lock_ro(fp); 1884 1885 /* The tail call compatibility check can only be done at 1886 * this late stage as we need to determine, if we deal 1887 * with JITed or non JITed program concatenations and not 1888 * all eBPF JITs might immediately support all features. 1889 */ 1890 *err = bpf_check_tail_call(fp); 1891 1892 return fp; 1893 } 1894 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1895 1896 static unsigned int __bpf_prog_ret1(const void *ctx, 1897 const struct bpf_insn *insn) 1898 { 1899 return 1; 1900 } 1901 1902 static struct bpf_prog_dummy { 1903 struct bpf_prog prog; 1904 } dummy_bpf_prog = { 1905 .prog = { 1906 .bpf_func = __bpf_prog_ret1, 1907 }, 1908 }; 1909 1910 /* to avoid allocating empty bpf_prog_array for cgroups that 1911 * don't have bpf program attached use one global 'empty_prog_array' 1912 * It will not be modified the caller of bpf_prog_array_alloc() 1913 * (since caller requested prog_cnt == 0) 1914 * that pointer should be 'freed' by bpf_prog_array_free() 1915 */ 1916 static struct { 1917 struct bpf_prog_array hdr; 1918 struct bpf_prog *null_prog; 1919 } empty_prog_array = { 1920 .null_prog = NULL, 1921 }; 1922 1923 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 1924 { 1925 if (prog_cnt) 1926 return kzalloc(sizeof(struct bpf_prog_array) + 1927 sizeof(struct bpf_prog_array_item) * 1928 (prog_cnt + 1), 1929 flags); 1930 1931 return &empty_prog_array.hdr; 1932 } 1933 1934 void bpf_prog_array_free(struct bpf_prog_array *progs) 1935 { 1936 if (!progs || progs == &empty_prog_array.hdr) 1937 return; 1938 kfree_rcu(progs, rcu); 1939 } 1940 1941 int bpf_prog_array_length(struct bpf_prog_array *array) 1942 { 1943 struct bpf_prog_array_item *item; 1944 u32 cnt = 0; 1945 1946 for (item = array->items; item->prog; item++) 1947 if (item->prog != &dummy_bpf_prog.prog) 1948 cnt++; 1949 return cnt; 1950 } 1951 1952 bool bpf_prog_array_is_empty(struct bpf_prog_array *array) 1953 { 1954 struct bpf_prog_array_item *item; 1955 1956 for (item = array->items; item->prog; item++) 1957 if (item->prog != &dummy_bpf_prog.prog) 1958 return false; 1959 return true; 1960 } 1961 1962 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, 1963 u32 *prog_ids, 1964 u32 request_cnt) 1965 { 1966 struct bpf_prog_array_item *item; 1967 int i = 0; 1968 1969 for (item = array->items; item->prog; item++) { 1970 if (item->prog == &dummy_bpf_prog.prog) 1971 continue; 1972 prog_ids[i] = item->prog->aux->id; 1973 if (++i == request_cnt) { 1974 item++; 1975 break; 1976 } 1977 } 1978 1979 return !!(item->prog); 1980 } 1981 1982 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, 1983 __u32 __user *prog_ids, u32 cnt) 1984 { 1985 unsigned long err = 0; 1986 bool nospc; 1987 u32 *ids; 1988 1989 /* users of this function are doing: 1990 * cnt = bpf_prog_array_length(); 1991 * if (cnt > 0) 1992 * bpf_prog_array_copy_to_user(..., cnt); 1993 * so below kcalloc doesn't need extra cnt > 0 check. 1994 */ 1995 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 1996 if (!ids) 1997 return -ENOMEM; 1998 nospc = bpf_prog_array_copy_core(array, ids, cnt); 1999 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 2000 kfree(ids); 2001 if (err) 2002 return -EFAULT; 2003 if (nospc) 2004 return -ENOSPC; 2005 return 0; 2006 } 2007 2008 void bpf_prog_array_delete_safe(struct bpf_prog_array *array, 2009 struct bpf_prog *old_prog) 2010 { 2011 struct bpf_prog_array_item *item; 2012 2013 for (item = array->items; item->prog; item++) 2014 if (item->prog == old_prog) { 2015 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 2016 break; 2017 } 2018 } 2019 2020 /** 2021 * bpf_prog_array_delete_safe_at() - Replaces the program at the given 2022 * index into the program array with 2023 * a dummy no-op program. 2024 * @array: a bpf_prog_array 2025 * @index: the index of the program to replace 2026 * 2027 * Skips over dummy programs, by not counting them, when calculating 2028 * the position of the program to replace. 2029 * 2030 * Return: 2031 * * 0 - Success 2032 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2033 * * -ENOENT - Index out of range 2034 */ 2035 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index) 2036 { 2037 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog); 2038 } 2039 2040 /** 2041 * bpf_prog_array_update_at() - Updates the program at the given index 2042 * into the program array. 2043 * @array: a bpf_prog_array 2044 * @index: the index of the program to update 2045 * @prog: the program to insert into the array 2046 * 2047 * Skips over dummy programs, by not counting them, when calculating 2048 * the position of the program to update. 2049 * 2050 * Return: 2051 * * 0 - Success 2052 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2053 * * -ENOENT - Index out of range 2054 */ 2055 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 2056 struct bpf_prog *prog) 2057 { 2058 struct bpf_prog_array_item *item; 2059 2060 if (unlikely(index < 0)) 2061 return -EINVAL; 2062 2063 for (item = array->items; item->prog; item++) { 2064 if (item->prog == &dummy_bpf_prog.prog) 2065 continue; 2066 if (!index) { 2067 WRITE_ONCE(item->prog, prog); 2068 return 0; 2069 } 2070 index--; 2071 } 2072 return -ENOENT; 2073 } 2074 2075 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 2076 struct bpf_prog *exclude_prog, 2077 struct bpf_prog *include_prog, 2078 struct bpf_prog_array **new_array) 2079 { 2080 int new_prog_cnt, carry_prog_cnt = 0; 2081 struct bpf_prog_array_item *existing; 2082 struct bpf_prog_array *array; 2083 bool found_exclude = false; 2084 int new_prog_idx = 0; 2085 2086 /* Figure out how many existing progs we need to carry over to 2087 * the new array. 2088 */ 2089 if (old_array) { 2090 existing = old_array->items; 2091 for (; existing->prog; existing++) { 2092 if (existing->prog == exclude_prog) { 2093 found_exclude = true; 2094 continue; 2095 } 2096 if (existing->prog != &dummy_bpf_prog.prog) 2097 carry_prog_cnt++; 2098 if (existing->prog == include_prog) 2099 return -EEXIST; 2100 } 2101 } 2102 2103 if (exclude_prog && !found_exclude) 2104 return -ENOENT; 2105 2106 /* How many progs (not NULL) will be in the new array? */ 2107 new_prog_cnt = carry_prog_cnt; 2108 if (include_prog) 2109 new_prog_cnt += 1; 2110 2111 /* Do we have any prog (not NULL) in the new array? */ 2112 if (!new_prog_cnt) { 2113 *new_array = NULL; 2114 return 0; 2115 } 2116 2117 /* +1 as the end of prog_array is marked with NULL */ 2118 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 2119 if (!array) 2120 return -ENOMEM; 2121 2122 /* Fill in the new prog array */ 2123 if (carry_prog_cnt) { 2124 existing = old_array->items; 2125 for (; existing->prog; existing++) 2126 if (existing->prog != exclude_prog && 2127 existing->prog != &dummy_bpf_prog.prog) { 2128 array->items[new_prog_idx++].prog = 2129 existing->prog; 2130 } 2131 } 2132 if (include_prog) 2133 array->items[new_prog_idx++].prog = include_prog; 2134 array->items[new_prog_idx].prog = NULL; 2135 *new_array = array; 2136 return 0; 2137 } 2138 2139 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 2140 u32 *prog_ids, u32 request_cnt, 2141 u32 *prog_cnt) 2142 { 2143 u32 cnt = 0; 2144 2145 if (array) 2146 cnt = bpf_prog_array_length(array); 2147 2148 *prog_cnt = cnt; 2149 2150 /* return early if user requested only program count or nothing to copy */ 2151 if (!request_cnt || !cnt) 2152 return 0; 2153 2154 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 2155 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 2156 : 0; 2157 } 2158 2159 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2160 struct bpf_map **used_maps, u32 len) 2161 { 2162 struct bpf_map *map; 2163 u32 i; 2164 2165 for (i = 0; i < len; i++) { 2166 map = used_maps[i]; 2167 if (map->ops->map_poke_untrack) 2168 map->ops->map_poke_untrack(map, aux); 2169 bpf_map_put(map); 2170 } 2171 } 2172 2173 static void bpf_free_used_maps(struct bpf_prog_aux *aux) 2174 { 2175 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt); 2176 kfree(aux->used_maps); 2177 } 2178 2179 void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 2180 struct btf_mod_pair *used_btfs, u32 len) 2181 { 2182 #ifdef CONFIG_BPF_SYSCALL 2183 struct btf_mod_pair *btf_mod; 2184 u32 i; 2185 2186 for (i = 0; i < len; i++) { 2187 btf_mod = &used_btfs[i]; 2188 if (btf_mod->module) 2189 module_put(btf_mod->module); 2190 btf_put(btf_mod->btf); 2191 } 2192 #endif 2193 } 2194 2195 static void bpf_free_used_btfs(struct bpf_prog_aux *aux) 2196 { 2197 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt); 2198 kfree(aux->used_btfs); 2199 } 2200 2201 static void bpf_prog_free_deferred(struct work_struct *work) 2202 { 2203 struct bpf_prog_aux *aux; 2204 int i; 2205 2206 aux = container_of(work, struct bpf_prog_aux, work); 2207 bpf_free_used_maps(aux); 2208 bpf_free_used_btfs(aux); 2209 if (bpf_prog_is_dev_bound(aux)) 2210 bpf_prog_offload_destroy(aux->prog); 2211 #ifdef CONFIG_PERF_EVENTS 2212 if (aux->prog->has_callchain_buf) 2213 put_callchain_buffers(); 2214 #endif 2215 if (aux->dst_trampoline) 2216 bpf_trampoline_put(aux->dst_trampoline); 2217 for (i = 0; i < aux->func_cnt; i++) 2218 bpf_jit_free(aux->func[i]); 2219 if (aux->func_cnt) { 2220 kfree(aux->func); 2221 bpf_prog_unlock_free(aux->prog); 2222 } else { 2223 bpf_jit_free(aux->prog); 2224 } 2225 } 2226 2227 /* Free internal BPF program */ 2228 void bpf_prog_free(struct bpf_prog *fp) 2229 { 2230 struct bpf_prog_aux *aux = fp->aux; 2231 2232 if (aux->dst_prog) 2233 bpf_prog_put(aux->dst_prog); 2234 INIT_WORK(&aux->work, bpf_prog_free_deferred); 2235 schedule_work(&aux->work); 2236 } 2237 EXPORT_SYMBOL_GPL(bpf_prog_free); 2238 2239 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 2240 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 2241 2242 void bpf_user_rnd_init_once(void) 2243 { 2244 prandom_init_once(&bpf_user_rnd_state); 2245 } 2246 2247 BPF_CALL_0(bpf_user_rnd_u32) 2248 { 2249 /* Should someone ever have the rather unwise idea to use some 2250 * of the registers passed into this function, then note that 2251 * this function is called from native eBPF and classic-to-eBPF 2252 * transformations. Register assignments from both sides are 2253 * different, f.e. classic always sets fn(ctx, A, X) here. 2254 */ 2255 struct rnd_state *state; 2256 u32 res; 2257 2258 state = &get_cpu_var(bpf_user_rnd_state); 2259 res = prandom_u32_state(state); 2260 put_cpu_var(bpf_user_rnd_state); 2261 2262 return res; 2263 } 2264 2265 BPF_CALL_0(bpf_get_raw_cpu_id) 2266 { 2267 return raw_smp_processor_id(); 2268 } 2269 2270 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 2271 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 2272 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 2273 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 2274 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 2275 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 2276 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 2277 const struct bpf_func_proto bpf_spin_lock_proto __weak; 2278 const struct bpf_func_proto bpf_spin_unlock_proto __weak; 2279 const struct bpf_func_proto bpf_jiffies64_proto __weak; 2280 2281 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 2282 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 2283 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 2284 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 2285 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak; 2286 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak; 2287 2288 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 2289 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 2290 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 2291 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 2292 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak; 2293 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 2294 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak; 2295 const struct bpf_func_proto bpf_snprintf_btf_proto __weak; 2296 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak; 2297 2298 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 2299 { 2300 return NULL; 2301 } 2302 2303 u64 __weak 2304 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 2305 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 2306 { 2307 return -ENOTSUPP; 2308 } 2309 EXPORT_SYMBOL_GPL(bpf_event_output); 2310 2311 /* Always built-in helper functions. */ 2312 const struct bpf_func_proto bpf_tail_call_proto = { 2313 .func = NULL, 2314 .gpl_only = false, 2315 .ret_type = RET_VOID, 2316 .arg1_type = ARG_PTR_TO_CTX, 2317 .arg2_type = ARG_CONST_MAP_PTR, 2318 .arg3_type = ARG_ANYTHING, 2319 }; 2320 2321 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 2322 * It is encouraged to implement bpf_int_jit_compile() instead, so that 2323 * eBPF and implicitly also cBPF can get JITed! 2324 */ 2325 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 2326 { 2327 return prog; 2328 } 2329 2330 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 2331 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 2332 */ 2333 void __weak bpf_jit_compile(struct bpf_prog *prog) 2334 { 2335 } 2336 2337 bool __weak bpf_helper_changes_pkt_data(void *func) 2338 { 2339 return false; 2340 } 2341 2342 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage 2343 * analysis code and wants explicit zero extension inserted by verifier. 2344 * Otherwise, return FALSE. 2345 */ 2346 bool __weak bpf_jit_needs_zext(void) 2347 { 2348 return false; 2349 } 2350 2351 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 2352 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 2353 */ 2354 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 2355 int len) 2356 { 2357 return -EFAULT; 2358 } 2359 2360 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2361 void *addr1, void *addr2) 2362 { 2363 return -ENOTSUPP; 2364 } 2365 2366 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); 2367 EXPORT_SYMBOL(bpf_stats_enabled_key); 2368 2369 /* All definitions of tracepoints related to BPF. */ 2370 #define CREATE_TRACE_POINTS 2371 #include <linux/bpf_trace.h> 2372 2373 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 2374 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx); 2375