1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <uapi/linux/btf.h> 21 #include <linux/filter.h> 22 #include <linux/skbuff.h> 23 #include <linux/vmalloc.h> 24 #include <linux/random.h> 25 #include <linux/moduleloader.h> 26 #include <linux/bpf.h> 27 #include <linux/btf.h> 28 #include <linux/objtool.h> 29 #include <linux/rbtree_latch.h> 30 #include <linux/kallsyms.h> 31 #include <linux/rcupdate.h> 32 #include <linux/perf_event.h> 33 #include <linux/extable.h> 34 #include <linux/log2.h> 35 #include <linux/bpf_verifier.h> 36 #include <linux/nodemask.h> 37 #include <linux/nospec.h> 38 #include <linux/bpf_mem_alloc.h> 39 #include <linux/memcontrol.h> 40 41 #include <asm/barrier.h> 42 #include <asm/unaligned.h> 43 44 /* Registers */ 45 #define BPF_R0 regs[BPF_REG_0] 46 #define BPF_R1 regs[BPF_REG_1] 47 #define BPF_R2 regs[BPF_REG_2] 48 #define BPF_R3 regs[BPF_REG_3] 49 #define BPF_R4 regs[BPF_REG_4] 50 #define BPF_R5 regs[BPF_REG_5] 51 #define BPF_R6 regs[BPF_REG_6] 52 #define BPF_R7 regs[BPF_REG_7] 53 #define BPF_R8 regs[BPF_REG_8] 54 #define BPF_R9 regs[BPF_REG_9] 55 #define BPF_R10 regs[BPF_REG_10] 56 57 /* Named registers */ 58 #define DST regs[insn->dst_reg] 59 #define SRC regs[insn->src_reg] 60 #define FP regs[BPF_REG_FP] 61 #define AX regs[BPF_REG_AX] 62 #define ARG1 regs[BPF_REG_ARG1] 63 #define CTX regs[BPF_REG_CTX] 64 #define OFF insn->off 65 #define IMM insn->imm 66 67 struct bpf_mem_alloc bpf_global_ma; 68 bool bpf_global_ma_set; 69 70 /* No hurry in this branch 71 * 72 * Exported for the bpf jit load helper. 73 */ 74 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 75 { 76 u8 *ptr = NULL; 77 78 if (k >= SKF_NET_OFF) { 79 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 80 } else if (k >= SKF_LL_OFF) { 81 if (unlikely(!skb_mac_header_was_set(skb))) 82 return NULL; 83 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 84 } 85 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 86 return ptr; 87 88 return NULL; 89 } 90 91 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags) 92 { 93 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 94 struct bpf_prog_aux *aux; 95 struct bpf_prog *fp; 96 97 size = round_up(size, PAGE_SIZE); 98 fp = __vmalloc(size, gfp_flags); 99 if (fp == NULL) 100 return NULL; 101 102 aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); 103 if (aux == NULL) { 104 vfree(fp); 105 return NULL; 106 } 107 fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); 108 if (!fp->active) { 109 vfree(fp); 110 kfree(aux); 111 return NULL; 112 } 113 114 fp->pages = size / PAGE_SIZE; 115 fp->aux = aux; 116 fp->aux->prog = fp; 117 fp->jit_requested = ebpf_jit_enabled(); 118 fp->blinding_requested = bpf_jit_blinding_enabled(fp); 119 #ifdef CONFIG_CGROUP_BPF 120 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID; 121 #endif 122 123 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); 124 #ifdef CONFIG_FINEIBT 125 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode); 126 #endif 127 mutex_init(&fp->aux->used_maps_mutex); 128 mutex_init(&fp->aux->dst_mutex); 129 130 return fp; 131 } 132 133 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 134 { 135 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 136 struct bpf_prog *prog; 137 int cpu; 138 139 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags); 140 if (!prog) 141 return NULL; 142 143 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); 144 if (!prog->stats) { 145 free_percpu(prog->active); 146 kfree(prog->aux); 147 vfree(prog); 148 return NULL; 149 } 150 151 for_each_possible_cpu(cpu) { 152 struct bpf_prog_stats *pstats; 153 154 pstats = per_cpu_ptr(prog->stats, cpu); 155 u64_stats_init(&pstats->syncp); 156 } 157 return prog; 158 } 159 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 160 161 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) 162 { 163 if (!prog->aux->nr_linfo || !prog->jit_requested) 164 return 0; 165 166 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo, 167 sizeof(*prog->aux->jited_linfo), 168 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN)); 169 if (!prog->aux->jited_linfo) 170 return -ENOMEM; 171 172 return 0; 173 } 174 175 void bpf_prog_jit_attempt_done(struct bpf_prog *prog) 176 { 177 if (prog->aux->jited_linfo && 178 (!prog->jited || !prog->aux->jited_linfo[0])) { 179 kvfree(prog->aux->jited_linfo); 180 prog->aux->jited_linfo = NULL; 181 } 182 183 kfree(prog->aux->kfunc_tab); 184 prog->aux->kfunc_tab = NULL; 185 } 186 187 /* The jit engine is responsible to provide an array 188 * for insn_off to the jited_off mapping (insn_to_jit_off). 189 * 190 * The idx to this array is the insn_off. Hence, the insn_off 191 * here is relative to the prog itself instead of the main prog. 192 * This array has one entry for each xlated bpf insn. 193 * 194 * jited_off is the byte off to the end of the jited insn. 195 * 196 * Hence, with 197 * insn_start: 198 * The first bpf insn off of the prog. The insn off 199 * here is relative to the main prog. 200 * e.g. if prog is a subprog, insn_start > 0 201 * linfo_idx: 202 * The prog's idx to prog->aux->linfo and jited_linfo 203 * 204 * jited_linfo[linfo_idx] = prog->bpf_func 205 * 206 * For i > linfo_idx, 207 * 208 * jited_linfo[i] = prog->bpf_func + 209 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1] 210 */ 211 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, 212 const u32 *insn_to_jit_off) 213 { 214 u32 linfo_idx, insn_start, insn_end, nr_linfo, i; 215 const struct bpf_line_info *linfo; 216 void **jited_linfo; 217 218 if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt) 219 /* Userspace did not provide linfo */ 220 return; 221 222 linfo_idx = prog->aux->linfo_idx; 223 linfo = &prog->aux->linfo[linfo_idx]; 224 insn_start = linfo[0].insn_off; 225 insn_end = insn_start + prog->len; 226 227 jited_linfo = &prog->aux->jited_linfo[linfo_idx]; 228 jited_linfo[0] = prog->bpf_func; 229 230 nr_linfo = prog->aux->nr_linfo - linfo_idx; 231 232 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++) 233 /* The verifier ensures that linfo[i].insn_off is 234 * strictly increasing 235 */ 236 jited_linfo[i] = prog->bpf_func + 237 insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; 238 } 239 240 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 241 gfp_t gfp_extra_flags) 242 { 243 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 244 struct bpf_prog *fp; 245 u32 pages; 246 247 size = round_up(size, PAGE_SIZE); 248 pages = size / PAGE_SIZE; 249 if (pages <= fp_old->pages) 250 return fp_old; 251 252 fp = __vmalloc(size, gfp_flags); 253 if (fp) { 254 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 255 fp->pages = pages; 256 fp->aux->prog = fp; 257 258 /* We keep fp->aux from fp_old around in the new 259 * reallocated structure. 260 */ 261 fp_old->aux = NULL; 262 fp_old->stats = NULL; 263 fp_old->active = NULL; 264 __bpf_prog_free(fp_old); 265 } 266 267 return fp; 268 } 269 270 void __bpf_prog_free(struct bpf_prog *fp) 271 { 272 if (fp->aux) { 273 mutex_destroy(&fp->aux->used_maps_mutex); 274 mutex_destroy(&fp->aux->dst_mutex); 275 kfree(fp->aux->poke_tab); 276 kfree(fp->aux); 277 } 278 free_percpu(fp->stats); 279 free_percpu(fp->active); 280 vfree(fp); 281 } 282 283 int bpf_prog_calc_tag(struct bpf_prog *fp) 284 { 285 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64); 286 u32 raw_size = bpf_prog_tag_scratch_size(fp); 287 u32 digest[SHA1_DIGEST_WORDS]; 288 u32 ws[SHA1_WORKSPACE_WORDS]; 289 u32 i, bsize, psize, blocks; 290 struct bpf_insn *dst; 291 bool was_ld_map; 292 u8 *raw, *todo; 293 __be32 *result; 294 __be64 *bits; 295 296 raw = vmalloc(raw_size); 297 if (!raw) 298 return -ENOMEM; 299 300 sha1_init(digest); 301 memset(ws, 0, sizeof(ws)); 302 303 /* We need to take out the map fd for the digest calculation 304 * since they are unstable from user space side. 305 */ 306 dst = (void *)raw; 307 for (i = 0, was_ld_map = false; i < fp->len; i++) { 308 dst[i] = fp->insnsi[i]; 309 if (!was_ld_map && 310 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 311 (dst[i].src_reg == BPF_PSEUDO_MAP_FD || 312 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { 313 was_ld_map = true; 314 dst[i].imm = 0; 315 } else if (was_ld_map && 316 dst[i].code == 0 && 317 dst[i].dst_reg == 0 && 318 dst[i].src_reg == 0 && 319 dst[i].off == 0) { 320 was_ld_map = false; 321 dst[i].imm = 0; 322 } else { 323 was_ld_map = false; 324 } 325 } 326 327 psize = bpf_prog_insn_size(fp); 328 memset(&raw[psize], 0, raw_size - psize); 329 raw[psize++] = 0x80; 330 331 bsize = round_up(psize, SHA1_BLOCK_SIZE); 332 blocks = bsize / SHA1_BLOCK_SIZE; 333 todo = raw; 334 if (bsize - psize >= sizeof(__be64)) { 335 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 336 } else { 337 bits = (__be64 *)(todo + bsize + bits_offset); 338 blocks++; 339 } 340 *bits = cpu_to_be64((psize - 1) << 3); 341 342 while (blocks--) { 343 sha1_transform(digest, todo, ws); 344 todo += SHA1_BLOCK_SIZE; 345 } 346 347 result = (__force __be32 *)digest; 348 for (i = 0; i < SHA1_DIGEST_WORDS; i++) 349 result[i] = cpu_to_be32(digest[i]); 350 memcpy(fp->tag, result, sizeof(fp->tag)); 351 352 vfree(raw); 353 return 0; 354 } 355 356 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, 357 s32 end_new, s32 curr, const bool probe_pass) 358 { 359 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 360 s32 delta = end_new - end_old; 361 s64 imm = insn->imm; 362 363 if (curr < pos && curr + imm + 1 >= end_old) 364 imm += delta; 365 else if (curr >= end_new && curr + imm + 1 < end_new) 366 imm -= delta; 367 if (imm < imm_min || imm > imm_max) 368 return -ERANGE; 369 if (!probe_pass) 370 insn->imm = imm; 371 return 0; 372 } 373 374 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, 375 s32 end_new, s32 curr, const bool probe_pass) 376 { 377 s64 off_min, off_max, off; 378 s32 delta = end_new - end_old; 379 380 if (insn->code == (BPF_JMP32 | BPF_JA)) { 381 off = insn->imm; 382 off_min = S32_MIN; 383 off_max = S32_MAX; 384 } else { 385 off = insn->off; 386 off_min = S16_MIN; 387 off_max = S16_MAX; 388 } 389 390 if (curr < pos && curr + off + 1 >= end_old) 391 off += delta; 392 else if (curr >= end_new && curr + off + 1 < end_new) 393 off -= delta; 394 if (off < off_min || off > off_max) 395 return -ERANGE; 396 if (!probe_pass) { 397 if (insn->code == (BPF_JMP32 | BPF_JA)) 398 insn->imm = off; 399 else 400 insn->off = off; 401 } 402 return 0; 403 } 404 405 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, 406 s32 end_new, const bool probe_pass) 407 { 408 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); 409 struct bpf_insn *insn = prog->insnsi; 410 int ret = 0; 411 412 for (i = 0; i < insn_cnt; i++, insn++) { 413 u8 code; 414 415 /* In the probing pass we still operate on the original, 416 * unpatched image in order to check overflows before we 417 * do any other adjustments. Therefore skip the patchlet. 418 */ 419 if (probe_pass && i == pos) { 420 i = end_new; 421 insn = prog->insnsi + end_old; 422 } 423 if (bpf_pseudo_func(insn)) { 424 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 425 end_new, i, probe_pass); 426 if (ret) 427 return ret; 428 continue; 429 } 430 code = insn->code; 431 if ((BPF_CLASS(code) != BPF_JMP && 432 BPF_CLASS(code) != BPF_JMP32) || 433 BPF_OP(code) == BPF_EXIT) 434 continue; 435 /* Adjust offset of jmps if we cross patch boundaries. */ 436 if (BPF_OP(code) == BPF_CALL) { 437 if (insn->src_reg != BPF_PSEUDO_CALL) 438 continue; 439 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 440 end_new, i, probe_pass); 441 } else { 442 ret = bpf_adj_delta_to_off(insn, pos, end_old, 443 end_new, i, probe_pass); 444 } 445 if (ret) 446 break; 447 } 448 449 return ret; 450 } 451 452 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta) 453 { 454 struct bpf_line_info *linfo; 455 u32 i, nr_linfo; 456 457 nr_linfo = prog->aux->nr_linfo; 458 if (!nr_linfo || !delta) 459 return; 460 461 linfo = prog->aux->linfo; 462 463 for (i = 0; i < nr_linfo; i++) 464 if (off < linfo[i].insn_off) 465 break; 466 467 /* Push all off < linfo[i].insn_off by delta */ 468 for (; i < nr_linfo; i++) 469 linfo[i].insn_off += delta; 470 } 471 472 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 473 const struct bpf_insn *patch, u32 len) 474 { 475 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 476 const u32 cnt_max = S16_MAX; 477 struct bpf_prog *prog_adj; 478 int err; 479 480 /* Since our patchlet doesn't expand the image, we're done. */ 481 if (insn_delta == 0) { 482 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 483 return prog; 484 } 485 486 insn_adj_cnt = prog->len + insn_delta; 487 488 /* Reject anything that would potentially let the insn->off 489 * target overflow when we have excessive program expansions. 490 * We need to probe here before we do any reallocation where 491 * we afterwards may not fail anymore. 492 */ 493 if (insn_adj_cnt > cnt_max && 494 (err = bpf_adj_branches(prog, off, off + 1, off + len, true))) 495 return ERR_PTR(err); 496 497 /* Several new instructions need to be inserted. Make room 498 * for them. Likely, there's no need for a new allocation as 499 * last page could have large enough tailroom. 500 */ 501 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 502 GFP_USER); 503 if (!prog_adj) 504 return ERR_PTR(-ENOMEM); 505 506 prog_adj->len = insn_adj_cnt; 507 508 /* Patching happens in 3 steps: 509 * 510 * 1) Move over tail of insnsi from next instruction onwards, 511 * so we can patch the single target insn with one or more 512 * new ones (patching is always from 1 to n insns, n > 0). 513 * 2) Inject new instructions at the target location. 514 * 3) Adjust branch offsets if necessary. 515 */ 516 insn_rest = insn_adj_cnt - off - len; 517 518 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 519 sizeof(*patch) * insn_rest); 520 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 521 522 /* We are guaranteed to not fail at this point, otherwise 523 * the ship has sailed to reverse to the original state. An 524 * overflow cannot happen at this point. 525 */ 526 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false)); 527 528 bpf_adj_linfo(prog_adj, off, insn_delta); 529 530 return prog_adj; 531 } 532 533 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) 534 { 535 /* Branch offsets can't overflow when program is shrinking, no need 536 * to call bpf_adj_branches(..., true) here 537 */ 538 memmove(prog->insnsi + off, prog->insnsi + off + cnt, 539 sizeof(struct bpf_insn) * (prog->len - off - cnt)); 540 prog->len -= cnt; 541 542 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); 543 } 544 545 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 546 { 547 int i; 548 549 for (i = 0; i < fp->aux->real_func_cnt; i++) 550 bpf_prog_kallsyms_del(fp->aux->func[i]); 551 } 552 553 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 554 { 555 bpf_prog_kallsyms_del_subprogs(fp); 556 bpf_prog_kallsyms_del(fp); 557 } 558 559 #ifdef CONFIG_BPF_JIT 560 /* All BPF JIT sysctl knobs here. */ 561 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 562 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 563 int bpf_jit_harden __read_mostly; 564 long bpf_jit_limit __read_mostly; 565 long bpf_jit_limit_max __read_mostly; 566 567 static void 568 bpf_prog_ksym_set_addr(struct bpf_prog *prog) 569 { 570 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 571 572 prog->aux->ksym.start = (unsigned long) prog->bpf_func; 573 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len; 574 } 575 576 static void 577 bpf_prog_ksym_set_name(struct bpf_prog *prog) 578 { 579 char *sym = prog->aux->ksym.name; 580 const char *end = sym + KSYM_NAME_LEN; 581 const struct btf_type *type; 582 const char *func_name; 583 584 BUILD_BUG_ON(sizeof("bpf_prog_") + 585 sizeof(prog->tag) * 2 + 586 /* name has been null terminated. 587 * We should need +1 for the '_' preceding 588 * the name. However, the null character 589 * is double counted between the name and the 590 * sizeof("bpf_prog_") above, so we omit 591 * the +1 here. 592 */ 593 sizeof(prog->aux->name) > KSYM_NAME_LEN); 594 595 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 596 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 597 598 /* prog->aux->name will be ignored if full btf name is available */ 599 if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) { 600 type = btf_type_by_id(prog->aux->btf, 601 prog->aux->func_info[prog->aux->func_idx].type_id); 602 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); 603 snprintf(sym, (size_t)(end - sym), "_%s", func_name); 604 return; 605 } 606 607 if (prog->aux->name[0]) 608 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 609 else 610 *sym = 0; 611 } 612 613 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n) 614 { 615 return container_of(n, struct bpf_ksym, tnode)->start; 616 } 617 618 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 619 struct latch_tree_node *b) 620 { 621 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b); 622 } 623 624 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 625 { 626 unsigned long val = (unsigned long)key; 627 const struct bpf_ksym *ksym; 628 629 ksym = container_of(n, struct bpf_ksym, tnode); 630 631 if (val < ksym->start) 632 return -1; 633 /* Ensure that we detect return addresses as part of the program, when 634 * the final instruction is a call for a program part of the stack 635 * trace. Therefore, do val > ksym->end instead of val >= ksym->end. 636 */ 637 if (val > ksym->end) 638 return 1; 639 640 return 0; 641 } 642 643 static const struct latch_tree_ops bpf_tree_ops = { 644 .less = bpf_tree_less, 645 .comp = bpf_tree_comp, 646 }; 647 648 static DEFINE_SPINLOCK(bpf_lock); 649 static LIST_HEAD(bpf_kallsyms); 650 static struct latch_tree_root bpf_tree __cacheline_aligned; 651 652 void bpf_ksym_add(struct bpf_ksym *ksym) 653 { 654 spin_lock_bh(&bpf_lock); 655 WARN_ON_ONCE(!list_empty(&ksym->lnode)); 656 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms); 657 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 658 spin_unlock_bh(&bpf_lock); 659 } 660 661 static void __bpf_ksym_del(struct bpf_ksym *ksym) 662 { 663 if (list_empty(&ksym->lnode)) 664 return; 665 666 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 667 list_del_rcu(&ksym->lnode); 668 } 669 670 void bpf_ksym_del(struct bpf_ksym *ksym) 671 { 672 spin_lock_bh(&bpf_lock); 673 __bpf_ksym_del(ksym); 674 spin_unlock_bh(&bpf_lock); 675 } 676 677 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 678 { 679 return fp->jited && !bpf_prog_was_classic(fp); 680 } 681 682 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 683 { 684 if (!bpf_prog_kallsyms_candidate(fp) || 685 !bpf_token_capable(fp->aux->token, CAP_BPF)) 686 return; 687 688 bpf_prog_ksym_set_addr(fp); 689 bpf_prog_ksym_set_name(fp); 690 fp->aux->ksym.prog = true; 691 692 bpf_ksym_add(&fp->aux->ksym); 693 694 #ifdef CONFIG_FINEIBT 695 /* 696 * When FineIBT, code in the __cfi_foo() symbols can get executed 697 * and hence unwinder needs help. 698 */ 699 if (cfi_mode != CFI_FINEIBT) 700 return; 701 702 snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN, 703 "__cfi_%s", fp->aux->ksym.name); 704 705 fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16; 706 fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func; 707 708 bpf_ksym_add(&fp->aux->ksym_prefix); 709 #endif 710 } 711 712 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 713 { 714 if (!bpf_prog_kallsyms_candidate(fp)) 715 return; 716 717 bpf_ksym_del(&fp->aux->ksym); 718 #ifdef CONFIG_FINEIBT 719 if (cfi_mode != CFI_FINEIBT) 720 return; 721 bpf_ksym_del(&fp->aux->ksym_prefix); 722 #endif 723 } 724 725 static struct bpf_ksym *bpf_ksym_find(unsigned long addr) 726 { 727 struct latch_tree_node *n; 728 729 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 730 return n ? container_of(n, struct bpf_ksym, tnode) : NULL; 731 } 732 733 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 734 unsigned long *off, char *sym) 735 { 736 struct bpf_ksym *ksym; 737 char *ret = NULL; 738 739 rcu_read_lock(); 740 ksym = bpf_ksym_find(addr); 741 if (ksym) { 742 unsigned long symbol_start = ksym->start; 743 unsigned long symbol_end = ksym->end; 744 745 strncpy(sym, ksym->name, KSYM_NAME_LEN); 746 747 ret = sym; 748 if (size) 749 *size = symbol_end - symbol_start; 750 if (off) 751 *off = addr - symbol_start; 752 } 753 rcu_read_unlock(); 754 755 return ret; 756 } 757 758 bool is_bpf_text_address(unsigned long addr) 759 { 760 bool ret; 761 762 rcu_read_lock(); 763 ret = bpf_ksym_find(addr) != NULL; 764 rcu_read_unlock(); 765 766 return ret; 767 } 768 769 struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) 770 { 771 struct bpf_ksym *ksym = bpf_ksym_find(addr); 772 773 return ksym && ksym->prog ? 774 container_of(ksym, struct bpf_prog_aux, ksym)->prog : 775 NULL; 776 } 777 778 const struct exception_table_entry *search_bpf_extables(unsigned long addr) 779 { 780 const struct exception_table_entry *e = NULL; 781 struct bpf_prog *prog; 782 783 rcu_read_lock(); 784 prog = bpf_prog_ksym_find(addr); 785 if (!prog) 786 goto out; 787 if (!prog->aux->num_exentries) 788 goto out; 789 790 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); 791 out: 792 rcu_read_unlock(); 793 return e; 794 } 795 796 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 797 char *sym) 798 { 799 struct bpf_ksym *ksym; 800 unsigned int it = 0; 801 int ret = -ERANGE; 802 803 if (!bpf_jit_kallsyms_enabled()) 804 return ret; 805 806 rcu_read_lock(); 807 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) { 808 if (it++ != symnum) 809 continue; 810 811 strncpy(sym, ksym->name, KSYM_NAME_LEN); 812 813 *value = ksym->start; 814 *type = BPF_SYM_ELF_TYPE; 815 816 ret = 0; 817 break; 818 } 819 rcu_read_unlock(); 820 821 return ret; 822 } 823 824 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, 825 struct bpf_jit_poke_descriptor *poke) 826 { 827 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 828 static const u32 poke_tab_max = 1024; 829 u32 slot = prog->aux->size_poke_tab; 830 u32 size = slot + 1; 831 832 if (size > poke_tab_max) 833 return -ENOSPC; 834 if (poke->tailcall_target || poke->tailcall_target_stable || 835 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr) 836 return -EINVAL; 837 838 switch (poke->reason) { 839 case BPF_POKE_REASON_TAIL_CALL: 840 if (!poke->tail_call.map) 841 return -EINVAL; 842 break; 843 default: 844 return -EINVAL; 845 } 846 847 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL); 848 if (!tab) 849 return -ENOMEM; 850 851 memcpy(&tab[slot], poke, sizeof(*poke)); 852 prog->aux->size_poke_tab = size; 853 prog->aux->poke_tab = tab; 854 855 return slot; 856 } 857 858 /* 859 * BPF program pack allocator. 860 * 861 * Most BPF programs are pretty small. Allocating a hole page for each 862 * program is sometime a waste. Many small bpf program also adds pressure 863 * to instruction TLB. To solve this issue, we introduce a BPF program pack 864 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86) 865 * to host BPF programs. 866 */ 867 #define BPF_PROG_CHUNK_SHIFT 6 868 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT) 869 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1)) 870 871 struct bpf_prog_pack { 872 struct list_head list; 873 void *ptr; 874 unsigned long bitmap[]; 875 }; 876 877 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size) 878 { 879 memset(area, 0, size); 880 } 881 882 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) 883 884 static DEFINE_MUTEX(pack_mutex); 885 static LIST_HEAD(pack_list); 886 887 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with 888 * CONFIG_MMU=n. Use PAGE_SIZE in these cases. 889 */ 890 #ifdef PMD_SIZE 891 #define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes()) 892 #else 893 #define BPF_PROG_PACK_SIZE PAGE_SIZE 894 #endif 895 896 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE) 897 898 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns) 899 { 900 struct bpf_prog_pack *pack; 901 902 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)), 903 GFP_KERNEL); 904 if (!pack) 905 return NULL; 906 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE); 907 if (!pack->ptr) { 908 kfree(pack); 909 return NULL; 910 } 911 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); 912 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); 913 list_add_tail(&pack->list, &pack_list); 914 915 set_vm_flush_reset_perms(pack->ptr); 916 set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); 917 return pack; 918 } 919 920 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) 921 { 922 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size); 923 struct bpf_prog_pack *pack; 924 unsigned long pos; 925 void *ptr = NULL; 926 927 mutex_lock(&pack_mutex); 928 if (size > BPF_PROG_PACK_SIZE) { 929 size = round_up(size, PAGE_SIZE); 930 ptr = bpf_jit_alloc_exec(size); 931 if (ptr) { 932 bpf_fill_ill_insns(ptr, size); 933 set_vm_flush_reset_perms(ptr); 934 set_memory_rox((unsigned long)ptr, size / PAGE_SIZE); 935 } 936 goto out; 937 } 938 list_for_each_entry(pack, &pack_list, list) { 939 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, 940 nbits, 0); 941 if (pos < BPF_PROG_CHUNK_COUNT) 942 goto found_free_area; 943 } 944 945 pack = alloc_new_pack(bpf_fill_ill_insns); 946 if (!pack) 947 goto out; 948 949 pos = 0; 950 951 found_free_area: 952 bitmap_set(pack->bitmap, pos, nbits); 953 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT); 954 955 out: 956 mutex_unlock(&pack_mutex); 957 return ptr; 958 } 959 960 void bpf_prog_pack_free(void *ptr, u32 size) 961 { 962 struct bpf_prog_pack *pack = NULL, *tmp; 963 unsigned int nbits; 964 unsigned long pos; 965 966 mutex_lock(&pack_mutex); 967 if (size > BPF_PROG_PACK_SIZE) { 968 bpf_jit_free_exec(ptr); 969 goto out; 970 } 971 972 list_for_each_entry(tmp, &pack_list, list) { 973 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) { 974 pack = tmp; 975 break; 976 } 977 } 978 979 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n")) 980 goto out; 981 982 nbits = BPF_PROG_SIZE_TO_NBITS(size); 983 pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT; 984 985 WARN_ONCE(bpf_arch_text_invalidate(ptr, size), 986 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n"); 987 988 bitmap_clear(pack->bitmap, pos, nbits); 989 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, 990 BPF_PROG_CHUNK_COUNT, 0) == 0) { 991 list_del(&pack->list); 992 bpf_jit_free_exec(pack->ptr); 993 kfree(pack); 994 } 995 out: 996 mutex_unlock(&pack_mutex); 997 } 998 999 static atomic_long_t bpf_jit_current; 1000 1001 /* Can be overridden by an arch's JIT compiler if it has a custom, 1002 * dedicated BPF backend memory area, or if neither of the two 1003 * below apply. 1004 */ 1005 u64 __weak bpf_jit_alloc_exec_limit(void) 1006 { 1007 #if defined(MODULES_VADDR) 1008 return MODULES_END - MODULES_VADDR; 1009 #else 1010 return VMALLOC_END - VMALLOC_START; 1011 #endif 1012 } 1013 1014 static int __init bpf_jit_charge_init(void) 1015 { 1016 /* Only used as heuristic here to derive limit. */ 1017 bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); 1018 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1, 1019 PAGE_SIZE), LONG_MAX); 1020 return 0; 1021 } 1022 pure_initcall(bpf_jit_charge_init); 1023 1024 int bpf_jit_charge_modmem(u32 size) 1025 { 1026 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) { 1027 if (!bpf_capable()) { 1028 atomic_long_sub(size, &bpf_jit_current); 1029 return -EPERM; 1030 } 1031 } 1032 1033 return 0; 1034 } 1035 1036 void bpf_jit_uncharge_modmem(u32 size) 1037 { 1038 atomic_long_sub(size, &bpf_jit_current); 1039 } 1040 1041 void *__weak bpf_jit_alloc_exec(unsigned long size) 1042 { 1043 return module_alloc(size); 1044 } 1045 1046 void __weak bpf_jit_free_exec(void *addr) 1047 { 1048 module_memfree(addr); 1049 } 1050 1051 struct bpf_binary_header * 1052 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 1053 unsigned int alignment, 1054 bpf_jit_fill_hole_t bpf_fill_ill_insns) 1055 { 1056 struct bpf_binary_header *hdr; 1057 u32 size, hole, start; 1058 1059 WARN_ON_ONCE(!is_power_of_2(alignment) || 1060 alignment > BPF_IMAGE_ALIGNMENT); 1061 1062 /* Most of BPF filters are really small, but if some of them 1063 * fill a page, allow at least 128 extra bytes to insert a 1064 * random section of illegal instructions. 1065 */ 1066 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 1067 1068 if (bpf_jit_charge_modmem(size)) 1069 return NULL; 1070 hdr = bpf_jit_alloc_exec(size); 1071 if (!hdr) { 1072 bpf_jit_uncharge_modmem(size); 1073 return NULL; 1074 } 1075 1076 /* Fill space with illegal/arch-dep instructions. */ 1077 bpf_fill_ill_insns(hdr, size); 1078 1079 hdr->size = size; 1080 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 1081 PAGE_SIZE - sizeof(*hdr)); 1082 start = get_random_u32_below(hole) & ~(alignment - 1); 1083 1084 /* Leave a random number of instructions before BPF code. */ 1085 *image_ptr = &hdr->image[start]; 1086 1087 return hdr; 1088 } 1089 1090 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 1091 { 1092 u32 size = hdr->size; 1093 1094 bpf_jit_free_exec(hdr); 1095 bpf_jit_uncharge_modmem(size); 1096 } 1097 1098 /* Allocate jit binary from bpf_prog_pack allocator. 1099 * Since the allocated memory is RO+X, the JIT engine cannot write directly 1100 * to the memory. To solve this problem, a RW buffer is also allocated at 1101 * as the same time. The JIT engine should calculate offsets based on the 1102 * RO memory address, but write JITed program to the RW buffer. Once the 1103 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies 1104 * the JITed program to the RO memory. 1105 */ 1106 struct bpf_binary_header * 1107 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr, 1108 unsigned int alignment, 1109 struct bpf_binary_header **rw_header, 1110 u8 **rw_image, 1111 bpf_jit_fill_hole_t bpf_fill_ill_insns) 1112 { 1113 struct bpf_binary_header *ro_header; 1114 u32 size, hole, start; 1115 1116 WARN_ON_ONCE(!is_power_of_2(alignment) || 1117 alignment > BPF_IMAGE_ALIGNMENT); 1118 1119 /* add 16 bytes for a random section of illegal instructions */ 1120 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE); 1121 1122 if (bpf_jit_charge_modmem(size)) 1123 return NULL; 1124 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns); 1125 if (!ro_header) { 1126 bpf_jit_uncharge_modmem(size); 1127 return NULL; 1128 } 1129 1130 *rw_header = kvmalloc(size, GFP_KERNEL); 1131 if (!*rw_header) { 1132 bpf_prog_pack_free(ro_header, size); 1133 bpf_jit_uncharge_modmem(size); 1134 return NULL; 1135 } 1136 1137 /* Fill space with illegal/arch-dep instructions. */ 1138 bpf_fill_ill_insns(*rw_header, size); 1139 (*rw_header)->size = size; 1140 1141 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)), 1142 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header)); 1143 start = get_random_u32_below(hole) & ~(alignment - 1); 1144 1145 *image_ptr = &ro_header->image[start]; 1146 *rw_image = &(*rw_header)->image[start]; 1147 1148 return ro_header; 1149 } 1150 1151 /* Copy JITed text from rw_header to its final location, the ro_header. */ 1152 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, 1153 struct bpf_binary_header *ro_header, 1154 struct bpf_binary_header *rw_header) 1155 { 1156 void *ptr; 1157 1158 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size); 1159 1160 kvfree(rw_header); 1161 1162 if (IS_ERR(ptr)) { 1163 bpf_prog_pack_free(ro_header, ro_header->size); 1164 return PTR_ERR(ptr); 1165 } 1166 return 0; 1167 } 1168 1169 /* bpf_jit_binary_pack_free is called in two different scenarios: 1170 * 1) when the program is freed after; 1171 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize). 1172 * For case 2), we need to free both the RO memory and the RW buffer. 1173 * 1174 * bpf_jit_binary_pack_free requires proper ro_header->size. However, 1175 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size 1176 * must be set with either bpf_jit_binary_pack_finalize (normal path) or 1177 * bpf_arch_text_copy (when jit fails). 1178 */ 1179 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, 1180 struct bpf_binary_header *rw_header) 1181 { 1182 u32 size = ro_header->size; 1183 1184 bpf_prog_pack_free(ro_header, size); 1185 kvfree(rw_header); 1186 bpf_jit_uncharge_modmem(size); 1187 } 1188 1189 struct bpf_binary_header * 1190 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp) 1191 { 1192 unsigned long real_start = (unsigned long)fp->bpf_func; 1193 unsigned long addr; 1194 1195 addr = real_start & BPF_PROG_CHUNK_MASK; 1196 return (void *)addr; 1197 } 1198 1199 static inline struct bpf_binary_header * 1200 bpf_jit_binary_hdr(const struct bpf_prog *fp) 1201 { 1202 unsigned long real_start = (unsigned long)fp->bpf_func; 1203 unsigned long addr; 1204 1205 addr = real_start & PAGE_MASK; 1206 return (void *)addr; 1207 } 1208 1209 /* This symbol is only overridden by archs that have different 1210 * requirements than the usual eBPF JITs, f.e. when they only 1211 * implement cBPF JIT, do not set images read-only, etc. 1212 */ 1213 void __weak bpf_jit_free(struct bpf_prog *fp) 1214 { 1215 if (fp->jited) { 1216 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 1217 1218 bpf_jit_binary_free(hdr); 1219 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 1220 } 1221 1222 bpf_prog_unlock_free(fp); 1223 } 1224 1225 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 1226 const struct bpf_insn *insn, bool extra_pass, 1227 u64 *func_addr, bool *func_addr_fixed) 1228 { 1229 s16 off = insn->off; 1230 s32 imm = insn->imm; 1231 u8 *addr; 1232 int err; 1233 1234 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 1235 if (!*func_addr_fixed) { 1236 /* Place-holder address till the last pass has collected 1237 * all addresses for JITed subprograms in which case we 1238 * can pick them up from prog->aux. 1239 */ 1240 if (!extra_pass) 1241 addr = NULL; 1242 else if (prog->aux->func && 1243 off >= 0 && off < prog->aux->real_func_cnt) 1244 addr = (u8 *)prog->aux->func[off]->bpf_func; 1245 else 1246 return -EINVAL; 1247 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && 1248 bpf_jit_supports_far_kfunc_call()) { 1249 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr); 1250 if (err) 1251 return err; 1252 } else { 1253 /* Address of a BPF helper call. Since part of the core 1254 * kernel, it's always at a fixed location. __bpf_call_base 1255 * and the helper with imm relative to it are both in core 1256 * kernel. 1257 */ 1258 addr = (u8 *)__bpf_call_base + imm; 1259 } 1260 1261 *func_addr = (unsigned long)addr; 1262 return 0; 1263 } 1264 1265 static int bpf_jit_blind_insn(const struct bpf_insn *from, 1266 const struct bpf_insn *aux, 1267 struct bpf_insn *to_buff, 1268 bool emit_zext) 1269 { 1270 struct bpf_insn *to = to_buff; 1271 u32 imm_rnd = get_random_u32(); 1272 s16 off; 1273 1274 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 1275 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 1276 1277 /* Constraints on AX register: 1278 * 1279 * AX register is inaccessible from user space. It is mapped in 1280 * all JITs, and used here for constant blinding rewrites. It is 1281 * typically "stateless" meaning its contents are only valid within 1282 * the executed instruction, but not across several instructions. 1283 * There are a few exceptions however which are further detailed 1284 * below. 1285 * 1286 * Constant blinding is only used by JITs, not in the interpreter. 1287 * The interpreter uses AX in some occasions as a local temporary 1288 * register e.g. in DIV or MOD instructions. 1289 * 1290 * In restricted circumstances, the verifier can also use the AX 1291 * register for rewrites as long as they do not interfere with 1292 * the above cases! 1293 */ 1294 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) 1295 goto out; 1296 1297 if (from->imm == 0 && 1298 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 1299 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 1300 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 1301 goto out; 1302 } 1303 1304 switch (from->code) { 1305 case BPF_ALU | BPF_ADD | BPF_K: 1306 case BPF_ALU | BPF_SUB | BPF_K: 1307 case BPF_ALU | BPF_AND | BPF_K: 1308 case BPF_ALU | BPF_OR | BPF_K: 1309 case BPF_ALU | BPF_XOR | BPF_K: 1310 case BPF_ALU | BPF_MUL | BPF_K: 1311 case BPF_ALU | BPF_MOV | BPF_K: 1312 case BPF_ALU | BPF_DIV | BPF_K: 1313 case BPF_ALU | BPF_MOD | BPF_K: 1314 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1315 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1316 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off); 1317 break; 1318 1319 case BPF_ALU64 | BPF_ADD | BPF_K: 1320 case BPF_ALU64 | BPF_SUB | BPF_K: 1321 case BPF_ALU64 | BPF_AND | BPF_K: 1322 case BPF_ALU64 | BPF_OR | BPF_K: 1323 case BPF_ALU64 | BPF_XOR | BPF_K: 1324 case BPF_ALU64 | BPF_MUL | BPF_K: 1325 case BPF_ALU64 | BPF_MOV | BPF_K: 1326 case BPF_ALU64 | BPF_DIV | BPF_K: 1327 case BPF_ALU64 | BPF_MOD | BPF_K: 1328 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1329 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1330 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off); 1331 break; 1332 1333 case BPF_JMP | BPF_JEQ | BPF_K: 1334 case BPF_JMP | BPF_JNE | BPF_K: 1335 case BPF_JMP | BPF_JGT | BPF_K: 1336 case BPF_JMP | BPF_JLT | BPF_K: 1337 case BPF_JMP | BPF_JGE | BPF_K: 1338 case BPF_JMP | BPF_JLE | BPF_K: 1339 case BPF_JMP | BPF_JSGT | BPF_K: 1340 case BPF_JMP | BPF_JSLT | BPF_K: 1341 case BPF_JMP | BPF_JSGE | BPF_K: 1342 case BPF_JMP | BPF_JSLE | BPF_K: 1343 case BPF_JMP | BPF_JSET | BPF_K: 1344 /* Accommodate for extra offset in case of a backjump. */ 1345 off = from->off; 1346 if (off < 0) 1347 off -= 2; 1348 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1349 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1350 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 1351 break; 1352 1353 case BPF_JMP32 | BPF_JEQ | BPF_K: 1354 case BPF_JMP32 | BPF_JNE | BPF_K: 1355 case BPF_JMP32 | BPF_JGT | BPF_K: 1356 case BPF_JMP32 | BPF_JLT | BPF_K: 1357 case BPF_JMP32 | BPF_JGE | BPF_K: 1358 case BPF_JMP32 | BPF_JLE | BPF_K: 1359 case BPF_JMP32 | BPF_JSGT | BPF_K: 1360 case BPF_JMP32 | BPF_JSLT | BPF_K: 1361 case BPF_JMP32 | BPF_JSGE | BPF_K: 1362 case BPF_JMP32 | BPF_JSLE | BPF_K: 1363 case BPF_JMP32 | BPF_JSET | BPF_K: 1364 /* Accommodate for extra offset in case of a backjump. */ 1365 off = from->off; 1366 if (off < 0) 1367 off -= 2; 1368 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1369 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1370 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, 1371 off); 1372 break; 1373 1374 case BPF_LD | BPF_IMM | BPF_DW: 1375 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 1376 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1377 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 1378 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 1379 break; 1380 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1381 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1382 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1383 if (emit_zext) 1384 *to++ = BPF_ZEXT_REG(BPF_REG_AX); 1385 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1386 break; 1387 1388 case BPF_ST | BPF_MEM | BPF_DW: 1389 case BPF_ST | BPF_MEM | BPF_W: 1390 case BPF_ST | BPF_MEM | BPF_H: 1391 case BPF_ST | BPF_MEM | BPF_B: 1392 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1393 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1394 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 1395 break; 1396 } 1397 out: 1398 return to - to_buff; 1399 } 1400 1401 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 1402 gfp_t gfp_extra_flags) 1403 { 1404 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 1405 struct bpf_prog *fp; 1406 1407 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); 1408 if (fp != NULL) { 1409 /* aux->prog still points to the fp_other one, so 1410 * when promoting the clone to the real program, 1411 * this still needs to be adapted. 1412 */ 1413 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 1414 } 1415 1416 return fp; 1417 } 1418 1419 static void bpf_prog_clone_free(struct bpf_prog *fp) 1420 { 1421 /* aux was stolen by the other clone, so we cannot free 1422 * it from this path! It will be freed eventually by the 1423 * other program on release. 1424 * 1425 * At this point, we don't need a deferred release since 1426 * clone is guaranteed to not be locked. 1427 */ 1428 fp->aux = NULL; 1429 fp->stats = NULL; 1430 fp->active = NULL; 1431 __bpf_prog_free(fp); 1432 } 1433 1434 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 1435 { 1436 /* We have to repoint aux->prog to self, as we don't 1437 * know whether fp here is the clone or the original. 1438 */ 1439 fp->aux->prog = fp; 1440 bpf_prog_clone_free(fp_other); 1441 } 1442 1443 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 1444 { 1445 struct bpf_insn insn_buff[16], aux[2]; 1446 struct bpf_prog *clone, *tmp; 1447 int insn_delta, insn_cnt; 1448 struct bpf_insn *insn; 1449 int i, rewritten; 1450 1451 if (!prog->blinding_requested || prog->blinded) 1452 return prog; 1453 1454 clone = bpf_prog_clone_create(prog, GFP_USER); 1455 if (!clone) 1456 return ERR_PTR(-ENOMEM); 1457 1458 insn_cnt = clone->len; 1459 insn = clone->insnsi; 1460 1461 for (i = 0; i < insn_cnt; i++, insn++) { 1462 if (bpf_pseudo_func(insn)) { 1463 /* ld_imm64 with an address of bpf subprog is not 1464 * a user controlled constant. Don't randomize it, 1465 * since it will conflict with jit_subprogs() logic. 1466 */ 1467 insn++; 1468 i++; 1469 continue; 1470 } 1471 1472 /* We temporarily need to hold the original ld64 insn 1473 * so that we can still access the first part in the 1474 * second blinding run. 1475 */ 1476 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 1477 insn[1].code == 0) 1478 memcpy(aux, insn, sizeof(aux)); 1479 1480 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, 1481 clone->aux->verifier_zext); 1482 if (!rewritten) 1483 continue; 1484 1485 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 1486 if (IS_ERR(tmp)) { 1487 /* Patching may have repointed aux->prog during 1488 * realloc from the original one, so we need to 1489 * fix it up here on error. 1490 */ 1491 bpf_jit_prog_release_other(prog, clone); 1492 return tmp; 1493 } 1494 1495 clone = tmp; 1496 insn_delta = rewritten - 1; 1497 1498 /* Walk new program and skip insns we just inserted. */ 1499 insn = clone->insnsi + i + insn_delta; 1500 insn_cnt += insn_delta; 1501 i += insn_delta; 1502 } 1503 1504 clone->blinded = 1; 1505 return clone; 1506 } 1507 #endif /* CONFIG_BPF_JIT */ 1508 1509 /* Base function for offset calculation. Needs to go into .text section, 1510 * therefore keeping it non-static as well; will also be used by JITs 1511 * anyway later on, so do not let the compiler omit it. This also needs 1512 * to go into kallsyms for correlation from e.g. bpftool, so naming 1513 * must not change. 1514 */ 1515 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1516 { 1517 return 0; 1518 } 1519 EXPORT_SYMBOL_GPL(__bpf_call_base); 1520 1521 /* All UAPI available opcodes. */ 1522 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 1523 /* 32 bit ALU operations. */ \ 1524 /* Register based. */ \ 1525 INSN_3(ALU, ADD, X), \ 1526 INSN_3(ALU, SUB, X), \ 1527 INSN_3(ALU, AND, X), \ 1528 INSN_3(ALU, OR, X), \ 1529 INSN_3(ALU, LSH, X), \ 1530 INSN_3(ALU, RSH, X), \ 1531 INSN_3(ALU, XOR, X), \ 1532 INSN_3(ALU, MUL, X), \ 1533 INSN_3(ALU, MOV, X), \ 1534 INSN_3(ALU, ARSH, X), \ 1535 INSN_3(ALU, DIV, X), \ 1536 INSN_3(ALU, MOD, X), \ 1537 INSN_2(ALU, NEG), \ 1538 INSN_3(ALU, END, TO_BE), \ 1539 INSN_3(ALU, END, TO_LE), \ 1540 /* Immediate based. */ \ 1541 INSN_3(ALU, ADD, K), \ 1542 INSN_3(ALU, SUB, K), \ 1543 INSN_3(ALU, AND, K), \ 1544 INSN_3(ALU, OR, K), \ 1545 INSN_3(ALU, LSH, K), \ 1546 INSN_3(ALU, RSH, K), \ 1547 INSN_3(ALU, XOR, K), \ 1548 INSN_3(ALU, MUL, K), \ 1549 INSN_3(ALU, MOV, K), \ 1550 INSN_3(ALU, ARSH, K), \ 1551 INSN_3(ALU, DIV, K), \ 1552 INSN_3(ALU, MOD, K), \ 1553 /* 64 bit ALU operations. */ \ 1554 /* Register based. */ \ 1555 INSN_3(ALU64, ADD, X), \ 1556 INSN_3(ALU64, SUB, X), \ 1557 INSN_3(ALU64, AND, X), \ 1558 INSN_3(ALU64, OR, X), \ 1559 INSN_3(ALU64, LSH, X), \ 1560 INSN_3(ALU64, RSH, X), \ 1561 INSN_3(ALU64, XOR, X), \ 1562 INSN_3(ALU64, MUL, X), \ 1563 INSN_3(ALU64, MOV, X), \ 1564 INSN_3(ALU64, ARSH, X), \ 1565 INSN_3(ALU64, DIV, X), \ 1566 INSN_3(ALU64, MOD, X), \ 1567 INSN_2(ALU64, NEG), \ 1568 INSN_3(ALU64, END, TO_LE), \ 1569 /* Immediate based. */ \ 1570 INSN_3(ALU64, ADD, K), \ 1571 INSN_3(ALU64, SUB, K), \ 1572 INSN_3(ALU64, AND, K), \ 1573 INSN_3(ALU64, OR, K), \ 1574 INSN_3(ALU64, LSH, K), \ 1575 INSN_3(ALU64, RSH, K), \ 1576 INSN_3(ALU64, XOR, K), \ 1577 INSN_3(ALU64, MUL, K), \ 1578 INSN_3(ALU64, MOV, K), \ 1579 INSN_3(ALU64, ARSH, K), \ 1580 INSN_3(ALU64, DIV, K), \ 1581 INSN_3(ALU64, MOD, K), \ 1582 /* Call instruction. */ \ 1583 INSN_2(JMP, CALL), \ 1584 /* Exit instruction. */ \ 1585 INSN_2(JMP, EXIT), \ 1586 /* 32-bit Jump instructions. */ \ 1587 /* Register based. */ \ 1588 INSN_3(JMP32, JEQ, X), \ 1589 INSN_3(JMP32, JNE, X), \ 1590 INSN_3(JMP32, JGT, X), \ 1591 INSN_3(JMP32, JLT, X), \ 1592 INSN_3(JMP32, JGE, X), \ 1593 INSN_3(JMP32, JLE, X), \ 1594 INSN_3(JMP32, JSGT, X), \ 1595 INSN_3(JMP32, JSLT, X), \ 1596 INSN_3(JMP32, JSGE, X), \ 1597 INSN_3(JMP32, JSLE, X), \ 1598 INSN_3(JMP32, JSET, X), \ 1599 /* Immediate based. */ \ 1600 INSN_3(JMP32, JEQ, K), \ 1601 INSN_3(JMP32, JNE, K), \ 1602 INSN_3(JMP32, JGT, K), \ 1603 INSN_3(JMP32, JLT, K), \ 1604 INSN_3(JMP32, JGE, K), \ 1605 INSN_3(JMP32, JLE, K), \ 1606 INSN_3(JMP32, JSGT, K), \ 1607 INSN_3(JMP32, JSLT, K), \ 1608 INSN_3(JMP32, JSGE, K), \ 1609 INSN_3(JMP32, JSLE, K), \ 1610 INSN_3(JMP32, JSET, K), \ 1611 /* Jump instructions. */ \ 1612 /* Register based. */ \ 1613 INSN_3(JMP, JEQ, X), \ 1614 INSN_3(JMP, JNE, X), \ 1615 INSN_3(JMP, JGT, X), \ 1616 INSN_3(JMP, JLT, X), \ 1617 INSN_3(JMP, JGE, X), \ 1618 INSN_3(JMP, JLE, X), \ 1619 INSN_3(JMP, JSGT, X), \ 1620 INSN_3(JMP, JSLT, X), \ 1621 INSN_3(JMP, JSGE, X), \ 1622 INSN_3(JMP, JSLE, X), \ 1623 INSN_3(JMP, JSET, X), \ 1624 /* Immediate based. */ \ 1625 INSN_3(JMP, JEQ, K), \ 1626 INSN_3(JMP, JNE, K), \ 1627 INSN_3(JMP, JGT, K), \ 1628 INSN_3(JMP, JLT, K), \ 1629 INSN_3(JMP, JGE, K), \ 1630 INSN_3(JMP, JLE, K), \ 1631 INSN_3(JMP, JSGT, K), \ 1632 INSN_3(JMP, JSLT, K), \ 1633 INSN_3(JMP, JSGE, K), \ 1634 INSN_3(JMP, JSLE, K), \ 1635 INSN_3(JMP, JSET, K), \ 1636 INSN_2(JMP, JA), \ 1637 INSN_2(JMP32, JA), \ 1638 /* Store instructions. */ \ 1639 /* Register based. */ \ 1640 INSN_3(STX, MEM, B), \ 1641 INSN_3(STX, MEM, H), \ 1642 INSN_3(STX, MEM, W), \ 1643 INSN_3(STX, MEM, DW), \ 1644 INSN_3(STX, ATOMIC, W), \ 1645 INSN_3(STX, ATOMIC, DW), \ 1646 /* Immediate based. */ \ 1647 INSN_3(ST, MEM, B), \ 1648 INSN_3(ST, MEM, H), \ 1649 INSN_3(ST, MEM, W), \ 1650 INSN_3(ST, MEM, DW), \ 1651 /* Load instructions. */ \ 1652 /* Register based. */ \ 1653 INSN_3(LDX, MEM, B), \ 1654 INSN_3(LDX, MEM, H), \ 1655 INSN_3(LDX, MEM, W), \ 1656 INSN_3(LDX, MEM, DW), \ 1657 INSN_3(LDX, MEMSX, B), \ 1658 INSN_3(LDX, MEMSX, H), \ 1659 INSN_3(LDX, MEMSX, W), \ 1660 /* Immediate based. */ \ 1661 INSN_3(LD, IMM, DW) 1662 1663 bool bpf_opcode_in_insntable(u8 code) 1664 { 1665 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1666 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1667 static const bool public_insntable[256] = { 1668 [0 ... 255] = false, 1669 /* Now overwrite non-defaults ... */ 1670 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1671 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1672 [BPF_LD | BPF_ABS | BPF_B] = true, 1673 [BPF_LD | BPF_ABS | BPF_H] = true, 1674 [BPF_LD | BPF_ABS | BPF_W] = true, 1675 [BPF_LD | BPF_IND | BPF_B] = true, 1676 [BPF_LD | BPF_IND | BPF_H] = true, 1677 [BPF_LD | BPF_IND | BPF_W] = true, 1678 }; 1679 #undef BPF_INSN_3_TBL 1680 #undef BPF_INSN_2_TBL 1681 return public_insntable[code]; 1682 } 1683 1684 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1685 /** 1686 * ___bpf_prog_run - run eBPF program on a given context 1687 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers 1688 * @insn: is the array of eBPF instructions 1689 * 1690 * Decode and execute eBPF instructions. 1691 * 1692 * Return: whatever value is in %BPF_R0 at program exit 1693 */ 1694 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) 1695 { 1696 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1697 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1698 static const void * const jumptable[256] __annotate_jump_table = { 1699 [0 ... 255] = &&default_label, 1700 /* Now overwrite non-defaults ... */ 1701 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1702 /* Non-UAPI available opcodes. */ 1703 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1704 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1705 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC, 1706 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B, 1707 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H, 1708 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W, 1709 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW, 1710 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B, 1711 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H, 1712 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W, 1713 }; 1714 #undef BPF_INSN_3_LBL 1715 #undef BPF_INSN_2_LBL 1716 u32 tail_call_cnt = 0; 1717 1718 #define CONT ({ insn++; goto select_insn; }) 1719 #define CONT_JMP ({ insn++; goto select_insn; }) 1720 1721 select_insn: 1722 goto *jumptable[insn->code]; 1723 1724 /* Explicitly mask the register-based shift amounts with 63 or 31 1725 * to avoid undefined behavior. Normally this won't affect the 1726 * generated code, for example, in case of native 64 bit archs such 1727 * as x86-64 or arm64, the compiler is optimizing the AND away for 1728 * the interpreter. In case of JITs, each of the JIT backends compiles 1729 * the BPF shift operations to machine instructions which produce 1730 * implementation-defined results in such a case; the resulting 1731 * contents of the register may be arbitrary, but program behaviour 1732 * as a whole remains defined. In other words, in case of JIT backends, 1733 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation. 1734 */ 1735 /* ALU (shifts) */ 1736 #define SHT(OPCODE, OP) \ 1737 ALU64_##OPCODE##_X: \ 1738 DST = DST OP (SRC & 63); \ 1739 CONT; \ 1740 ALU_##OPCODE##_X: \ 1741 DST = (u32) DST OP ((u32) SRC & 31); \ 1742 CONT; \ 1743 ALU64_##OPCODE##_K: \ 1744 DST = DST OP IMM; \ 1745 CONT; \ 1746 ALU_##OPCODE##_K: \ 1747 DST = (u32) DST OP (u32) IMM; \ 1748 CONT; 1749 /* ALU (rest) */ 1750 #define ALU(OPCODE, OP) \ 1751 ALU64_##OPCODE##_X: \ 1752 DST = DST OP SRC; \ 1753 CONT; \ 1754 ALU_##OPCODE##_X: \ 1755 DST = (u32) DST OP (u32) SRC; \ 1756 CONT; \ 1757 ALU64_##OPCODE##_K: \ 1758 DST = DST OP IMM; \ 1759 CONT; \ 1760 ALU_##OPCODE##_K: \ 1761 DST = (u32) DST OP (u32) IMM; \ 1762 CONT; 1763 ALU(ADD, +) 1764 ALU(SUB, -) 1765 ALU(AND, &) 1766 ALU(OR, |) 1767 ALU(XOR, ^) 1768 ALU(MUL, *) 1769 SHT(LSH, <<) 1770 SHT(RSH, >>) 1771 #undef SHT 1772 #undef ALU 1773 ALU_NEG: 1774 DST = (u32) -DST; 1775 CONT; 1776 ALU64_NEG: 1777 DST = -DST; 1778 CONT; 1779 ALU_MOV_X: 1780 switch (OFF) { 1781 case 0: 1782 DST = (u32) SRC; 1783 break; 1784 case 8: 1785 DST = (u32)(s8) SRC; 1786 break; 1787 case 16: 1788 DST = (u32)(s16) SRC; 1789 break; 1790 } 1791 CONT; 1792 ALU_MOV_K: 1793 DST = (u32) IMM; 1794 CONT; 1795 ALU64_MOV_X: 1796 switch (OFF) { 1797 case 0: 1798 DST = SRC; 1799 break; 1800 case 8: 1801 DST = (s8) SRC; 1802 break; 1803 case 16: 1804 DST = (s16) SRC; 1805 break; 1806 case 32: 1807 DST = (s32) SRC; 1808 break; 1809 } 1810 CONT; 1811 ALU64_MOV_K: 1812 DST = IMM; 1813 CONT; 1814 LD_IMM_DW: 1815 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1816 insn++; 1817 CONT; 1818 ALU_ARSH_X: 1819 DST = (u64) (u32) (((s32) DST) >> (SRC & 31)); 1820 CONT; 1821 ALU_ARSH_K: 1822 DST = (u64) (u32) (((s32) DST) >> IMM); 1823 CONT; 1824 ALU64_ARSH_X: 1825 (*(s64 *) &DST) >>= (SRC & 63); 1826 CONT; 1827 ALU64_ARSH_K: 1828 (*(s64 *) &DST) >>= IMM; 1829 CONT; 1830 ALU64_MOD_X: 1831 switch (OFF) { 1832 case 0: 1833 div64_u64_rem(DST, SRC, &AX); 1834 DST = AX; 1835 break; 1836 case 1: 1837 AX = div64_s64(DST, SRC); 1838 DST = DST - AX * SRC; 1839 break; 1840 } 1841 CONT; 1842 ALU_MOD_X: 1843 switch (OFF) { 1844 case 0: 1845 AX = (u32) DST; 1846 DST = do_div(AX, (u32) SRC); 1847 break; 1848 case 1: 1849 AX = abs((s32)DST); 1850 AX = do_div(AX, abs((s32)SRC)); 1851 if ((s32)DST < 0) 1852 DST = (u32)-AX; 1853 else 1854 DST = (u32)AX; 1855 break; 1856 } 1857 CONT; 1858 ALU64_MOD_K: 1859 switch (OFF) { 1860 case 0: 1861 div64_u64_rem(DST, IMM, &AX); 1862 DST = AX; 1863 break; 1864 case 1: 1865 AX = div64_s64(DST, IMM); 1866 DST = DST - AX * IMM; 1867 break; 1868 } 1869 CONT; 1870 ALU_MOD_K: 1871 switch (OFF) { 1872 case 0: 1873 AX = (u32) DST; 1874 DST = do_div(AX, (u32) IMM); 1875 break; 1876 case 1: 1877 AX = abs((s32)DST); 1878 AX = do_div(AX, abs((s32)IMM)); 1879 if ((s32)DST < 0) 1880 DST = (u32)-AX; 1881 else 1882 DST = (u32)AX; 1883 break; 1884 } 1885 CONT; 1886 ALU64_DIV_X: 1887 switch (OFF) { 1888 case 0: 1889 DST = div64_u64(DST, SRC); 1890 break; 1891 case 1: 1892 DST = div64_s64(DST, SRC); 1893 break; 1894 } 1895 CONT; 1896 ALU_DIV_X: 1897 switch (OFF) { 1898 case 0: 1899 AX = (u32) DST; 1900 do_div(AX, (u32) SRC); 1901 DST = (u32) AX; 1902 break; 1903 case 1: 1904 AX = abs((s32)DST); 1905 do_div(AX, abs((s32)SRC)); 1906 if (((s32)DST < 0) == ((s32)SRC < 0)) 1907 DST = (u32)AX; 1908 else 1909 DST = (u32)-AX; 1910 break; 1911 } 1912 CONT; 1913 ALU64_DIV_K: 1914 switch (OFF) { 1915 case 0: 1916 DST = div64_u64(DST, IMM); 1917 break; 1918 case 1: 1919 DST = div64_s64(DST, IMM); 1920 break; 1921 } 1922 CONT; 1923 ALU_DIV_K: 1924 switch (OFF) { 1925 case 0: 1926 AX = (u32) DST; 1927 do_div(AX, (u32) IMM); 1928 DST = (u32) AX; 1929 break; 1930 case 1: 1931 AX = abs((s32)DST); 1932 do_div(AX, abs((s32)IMM)); 1933 if (((s32)DST < 0) == ((s32)IMM < 0)) 1934 DST = (u32)AX; 1935 else 1936 DST = (u32)-AX; 1937 break; 1938 } 1939 CONT; 1940 ALU_END_TO_BE: 1941 switch (IMM) { 1942 case 16: 1943 DST = (__force u16) cpu_to_be16(DST); 1944 break; 1945 case 32: 1946 DST = (__force u32) cpu_to_be32(DST); 1947 break; 1948 case 64: 1949 DST = (__force u64) cpu_to_be64(DST); 1950 break; 1951 } 1952 CONT; 1953 ALU_END_TO_LE: 1954 switch (IMM) { 1955 case 16: 1956 DST = (__force u16) cpu_to_le16(DST); 1957 break; 1958 case 32: 1959 DST = (__force u32) cpu_to_le32(DST); 1960 break; 1961 case 64: 1962 DST = (__force u64) cpu_to_le64(DST); 1963 break; 1964 } 1965 CONT; 1966 ALU64_END_TO_LE: 1967 switch (IMM) { 1968 case 16: 1969 DST = (__force u16) __swab16(DST); 1970 break; 1971 case 32: 1972 DST = (__force u32) __swab32(DST); 1973 break; 1974 case 64: 1975 DST = (__force u64) __swab64(DST); 1976 break; 1977 } 1978 CONT; 1979 1980 /* CALL */ 1981 JMP_CALL: 1982 /* Function call scratches BPF_R1-BPF_R5 registers, 1983 * preserves BPF_R6-BPF_R9, and stores return value 1984 * into BPF_R0. 1985 */ 1986 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1987 BPF_R4, BPF_R5); 1988 CONT; 1989 1990 JMP_CALL_ARGS: 1991 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 1992 BPF_R3, BPF_R4, 1993 BPF_R5, 1994 insn + insn->off + 1); 1995 CONT; 1996 1997 JMP_TAIL_CALL: { 1998 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1999 struct bpf_array *array = container_of(map, struct bpf_array, map); 2000 struct bpf_prog *prog; 2001 u32 index = BPF_R3; 2002 2003 if (unlikely(index >= array->map.max_entries)) 2004 goto out; 2005 2006 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT)) 2007 goto out; 2008 2009 tail_call_cnt++; 2010 2011 prog = READ_ONCE(array->ptrs[index]); 2012 if (!prog) 2013 goto out; 2014 2015 /* ARG1 at this point is guaranteed to point to CTX from 2016 * the verifier side due to the fact that the tail call is 2017 * handled like a helper, that is, bpf_tail_call_proto, 2018 * where arg1_type is ARG_PTR_TO_CTX. 2019 */ 2020 insn = prog->insnsi; 2021 goto select_insn; 2022 out: 2023 CONT; 2024 } 2025 JMP_JA: 2026 insn += insn->off; 2027 CONT; 2028 JMP32_JA: 2029 insn += insn->imm; 2030 CONT; 2031 JMP_EXIT: 2032 return BPF_R0; 2033 /* JMP */ 2034 #define COND_JMP(SIGN, OPCODE, CMP_OP) \ 2035 JMP_##OPCODE##_X: \ 2036 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ 2037 insn += insn->off; \ 2038 CONT_JMP; \ 2039 } \ 2040 CONT; \ 2041 JMP32_##OPCODE##_X: \ 2042 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ 2043 insn += insn->off; \ 2044 CONT_JMP; \ 2045 } \ 2046 CONT; \ 2047 JMP_##OPCODE##_K: \ 2048 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ 2049 insn += insn->off; \ 2050 CONT_JMP; \ 2051 } \ 2052 CONT; \ 2053 JMP32_##OPCODE##_K: \ 2054 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ 2055 insn += insn->off; \ 2056 CONT_JMP; \ 2057 } \ 2058 CONT; 2059 COND_JMP(u, JEQ, ==) 2060 COND_JMP(u, JNE, !=) 2061 COND_JMP(u, JGT, >) 2062 COND_JMP(u, JLT, <) 2063 COND_JMP(u, JGE, >=) 2064 COND_JMP(u, JLE, <=) 2065 COND_JMP(u, JSET, &) 2066 COND_JMP(s, JSGT, >) 2067 COND_JMP(s, JSLT, <) 2068 COND_JMP(s, JSGE, >=) 2069 COND_JMP(s, JSLE, <=) 2070 #undef COND_JMP 2071 /* ST, STX and LDX*/ 2072 ST_NOSPEC: 2073 /* Speculation barrier for mitigating Speculative Store Bypass. 2074 * In case of arm64, we rely on the firmware mitigation as 2075 * controlled via the ssbd kernel parameter. Whenever the 2076 * mitigation is enabled, it works for all of the kernel code 2077 * with no need to provide any additional instructions here. 2078 * In case of x86, we use 'lfence' insn for mitigation. We 2079 * reuse preexisting logic from Spectre v1 mitigation that 2080 * happens to produce the required code on x86 for v4 as well. 2081 */ 2082 barrier_nospec(); 2083 CONT; 2084 #define LDST(SIZEOP, SIZE) \ 2085 STX_MEM_##SIZEOP: \ 2086 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 2087 CONT; \ 2088 ST_MEM_##SIZEOP: \ 2089 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 2090 CONT; \ 2091 LDX_MEM_##SIZEOP: \ 2092 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 2093 CONT; \ 2094 LDX_PROBE_MEM_##SIZEOP: \ 2095 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \ 2096 (const void *)(long) (SRC + insn->off)); \ 2097 DST = *((SIZE *)&DST); \ 2098 CONT; 2099 2100 LDST(B, u8) 2101 LDST(H, u16) 2102 LDST(W, u32) 2103 LDST(DW, u64) 2104 #undef LDST 2105 2106 #define LDSX(SIZEOP, SIZE) \ 2107 LDX_MEMSX_##SIZEOP: \ 2108 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 2109 CONT; \ 2110 LDX_PROBE_MEMSX_##SIZEOP: \ 2111 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \ 2112 (const void *)(long) (SRC + insn->off)); \ 2113 DST = *((SIZE *)&DST); \ 2114 CONT; 2115 2116 LDSX(B, s8) 2117 LDSX(H, s16) 2118 LDSX(W, s32) 2119 #undef LDSX 2120 2121 #define ATOMIC_ALU_OP(BOP, KOP) \ 2122 case BOP: \ 2123 if (BPF_SIZE(insn->code) == BPF_W) \ 2124 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \ 2125 (DST + insn->off)); \ 2126 else \ 2127 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \ 2128 (DST + insn->off)); \ 2129 break; \ 2130 case BOP | BPF_FETCH: \ 2131 if (BPF_SIZE(insn->code) == BPF_W) \ 2132 SRC = (u32) atomic_fetch_##KOP( \ 2133 (u32) SRC, \ 2134 (atomic_t *)(unsigned long) (DST + insn->off)); \ 2135 else \ 2136 SRC = (u64) atomic64_fetch_##KOP( \ 2137 (u64) SRC, \ 2138 (atomic64_t *)(unsigned long) (DST + insn->off)); \ 2139 break; 2140 2141 STX_ATOMIC_DW: 2142 STX_ATOMIC_W: 2143 switch (IMM) { 2144 ATOMIC_ALU_OP(BPF_ADD, add) 2145 ATOMIC_ALU_OP(BPF_AND, and) 2146 ATOMIC_ALU_OP(BPF_OR, or) 2147 ATOMIC_ALU_OP(BPF_XOR, xor) 2148 #undef ATOMIC_ALU_OP 2149 2150 case BPF_XCHG: 2151 if (BPF_SIZE(insn->code) == BPF_W) 2152 SRC = (u32) atomic_xchg( 2153 (atomic_t *)(unsigned long) (DST + insn->off), 2154 (u32) SRC); 2155 else 2156 SRC = (u64) atomic64_xchg( 2157 (atomic64_t *)(unsigned long) (DST + insn->off), 2158 (u64) SRC); 2159 break; 2160 case BPF_CMPXCHG: 2161 if (BPF_SIZE(insn->code) == BPF_W) 2162 BPF_R0 = (u32) atomic_cmpxchg( 2163 (atomic_t *)(unsigned long) (DST + insn->off), 2164 (u32) BPF_R0, (u32) SRC); 2165 else 2166 BPF_R0 = (u64) atomic64_cmpxchg( 2167 (atomic64_t *)(unsigned long) (DST + insn->off), 2168 (u64) BPF_R0, (u64) SRC); 2169 break; 2170 2171 default: 2172 goto default_label; 2173 } 2174 CONT; 2175 2176 default_label: 2177 /* If we ever reach this, we have a bug somewhere. Die hard here 2178 * instead of just returning 0; we could be somewhere in a subprog, 2179 * so execution could continue otherwise which we do /not/ want. 2180 * 2181 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 2182 */ 2183 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n", 2184 insn->code, insn->imm); 2185 BUG_ON(1); 2186 return 0; 2187 } 2188 2189 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 2190 #define DEFINE_BPF_PROG_RUN(stack_size) \ 2191 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 2192 { \ 2193 u64 stack[stack_size / sizeof(u64)]; \ 2194 u64 regs[MAX_BPF_EXT_REG] = {}; \ 2195 \ 2196 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 2197 ARG1 = (u64) (unsigned long) ctx; \ 2198 return ___bpf_prog_run(regs, insn); \ 2199 } 2200 2201 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 2202 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 2203 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 2204 const struct bpf_insn *insn) \ 2205 { \ 2206 u64 stack[stack_size / sizeof(u64)]; \ 2207 u64 regs[MAX_BPF_EXT_REG]; \ 2208 \ 2209 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 2210 BPF_R1 = r1; \ 2211 BPF_R2 = r2; \ 2212 BPF_R3 = r3; \ 2213 BPF_R4 = r4; \ 2214 BPF_R5 = r5; \ 2215 return ___bpf_prog_run(regs, insn); \ 2216 } 2217 2218 #define EVAL1(FN, X) FN(X) 2219 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 2220 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 2221 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 2222 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 2223 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 2224 2225 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 2226 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 2227 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 2228 2229 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 2230 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 2231 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 2232 2233 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 2234 2235 static unsigned int (*interpreters[])(const void *ctx, 2236 const struct bpf_insn *insn) = { 2237 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 2238 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 2239 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 2240 }; 2241 #undef PROG_NAME_LIST 2242 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 2243 static __maybe_unused 2244 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 2245 const struct bpf_insn *insn) = { 2246 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 2247 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 2248 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 2249 }; 2250 #undef PROG_NAME_LIST 2251 2252 #ifdef CONFIG_BPF_SYSCALL 2253 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 2254 { 2255 stack_depth = max_t(u32, stack_depth, 1); 2256 insn->off = (s16) insn->imm; 2257 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 2258 __bpf_call_base_args; 2259 insn->code = BPF_JMP | BPF_CALL_ARGS; 2260 } 2261 #endif 2262 #else 2263 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 2264 const struct bpf_insn *insn) 2265 { 2266 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 2267 * is not working properly, so warn about it! 2268 */ 2269 WARN_ON_ONCE(1); 2270 return 0; 2271 } 2272 #endif 2273 2274 bool bpf_prog_map_compatible(struct bpf_map *map, 2275 const struct bpf_prog *fp) 2276 { 2277 enum bpf_prog_type prog_type = resolve_prog_type(fp); 2278 bool ret; 2279 2280 if (fp->kprobe_override) 2281 return false; 2282 2283 /* XDP programs inserted into maps are not guaranteed to run on 2284 * a particular netdev (and can run outside driver context entirely 2285 * in the case of devmap and cpumap). Until device checks 2286 * are implemented, prohibit adding dev-bound programs to program maps. 2287 */ 2288 if (bpf_prog_is_dev_bound(fp->aux)) 2289 return false; 2290 2291 spin_lock(&map->owner.lock); 2292 if (!map->owner.type) { 2293 /* There's no owner yet where we could check for 2294 * compatibility. 2295 */ 2296 map->owner.type = prog_type; 2297 map->owner.jited = fp->jited; 2298 map->owner.xdp_has_frags = fp->aux->xdp_has_frags; 2299 ret = true; 2300 } else { 2301 ret = map->owner.type == prog_type && 2302 map->owner.jited == fp->jited && 2303 map->owner.xdp_has_frags == fp->aux->xdp_has_frags; 2304 } 2305 spin_unlock(&map->owner.lock); 2306 2307 return ret; 2308 } 2309 2310 static int bpf_check_tail_call(const struct bpf_prog *fp) 2311 { 2312 struct bpf_prog_aux *aux = fp->aux; 2313 int i, ret = 0; 2314 2315 mutex_lock(&aux->used_maps_mutex); 2316 for (i = 0; i < aux->used_map_cnt; i++) { 2317 struct bpf_map *map = aux->used_maps[i]; 2318 2319 if (!map_type_contains_progs(map)) 2320 continue; 2321 2322 if (!bpf_prog_map_compatible(map, fp)) { 2323 ret = -EINVAL; 2324 goto out; 2325 } 2326 } 2327 2328 out: 2329 mutex_unlock(&aux->used_maps_mutex); 2330 return ret; 2331 } 2332 2333 static void bpf_prog_select_func(struct bpf_prog *fp) 2334 { 2335 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 2336 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 2337 2338 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 2339 #else 2340 fp->bpf_func = __bpf_prog_ret0_warn; 2341 #endif 2342 } 2343 2344 /** 2345 * bpf_prog_select_runtime - select exec runtime for BPF program 2346 * @fp: bpf_prog populated with BPF program 2347 * @err: pointer to error variable 2348 * 2349 * Try to JIT eBPF program, if JIT is not available, use interpreter. 2350 * The BPF program will be executed via bpf_prog_run() function. 2351 * 2352 * Return: the &fp argument along with &err set to 0 for success or 2353 * a negative errno code on failure 2354 */ 2355 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 2356 { 2357 /* In case of BPF to BPF calls, verifier did all the prep 2358 * work with regards to JITing, etc. 2359 */ 2360 bool jit_needed = false; 2361 2362 if (fp->bpf_func) 2363 goto finalize; 2364 2365 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) || 2366 bpf_prog_has_kfunc_call(fp)) 2367 jit_needed = true; 2368 2369 bpf_prog_select_func(fp); 2370 2371 /* eBPF JITs can rewrite the program in case constant 2372 * blinding is active. However, in case of error during 2373 * blinding, bpf_int_jit_compile() must always return a 2374 * valid program, which in this case would simply not 2375 * be JITed, but falls back to the interpreter. 2376 */ 2377 if (!bpf_prog_is_offloaded(fp->aux)) { 2378 *err = bpf_prog_alloc_jited_linfo(fp); 2379 if (*err) 2380 return fp; 2381 2382 fp = bpf_int_jit_compile(fp); 2383 bpf_prog_jit_attempt_done(fp); 2384 if (!fp->jited && jit_needed) { 2385 *err = -ENOTSUPP; 2386 return fp; 2387 } 2388 } else { 2389 *err = bpf_prog_offload_compile(fp); 2390 if (*err) 2391 return fp; 2392 } 2393 2394 finalize: 2395 bpf_prog_lock_ro(fp); 2396 2397 /* The tail call compatibility check can only be done at 2398 * this late stage as we need to determine, if we deal 2399 * with JITed or non JITed program concatenations and not 2400 * all eBPF JITs might immediately support all features. 2401 */ 2402 *err = bpf_check_tail_call(fp); 2403 2404 return fp; 2405 } 2406 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 2407 2408 static unsigned int __bpf_prog_ret1(const void *ctx, 2409 const struct bpf_insn *insn) 2410 { 2411 return 1; 2412 } 2413 2414 static struct bpf_prog_dummy { 2415 struct bpf_prog prog; 2416 } dummy_bpf_prog = { 2417 .prog = { 2418 .bpf_func = __bpf_prog_ret1, 2419 }, 2420 }; 2421 2422 struct bpf_empty_prog_array bpf_empty_prog_array = { 2423 .null_prog = NULL, 2424 }; 2425 EXPORT_SYMBOL(bpf_empty_prog_array); 2426 2427 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 2428 { 2429 if (prog_cnt) 2430 return kzalloc(sizeof(struct bpf_prog_array) + 2431 sizeof(struct bpf_prog_array_item) * 2432 (prog_cnt + 1), 2433 flags); 2434 2435 return &bpf_empty_prog_array.hdr; 2436 } 2437 2438 void bpf_prog_array_free(struct bpf_prog_array *progs) 2439 { 2440 if (!progs || progs == &bpf_empty_prog_array.hdr) 2441 return; 2442 kfree_rcu(progs, rcu); 2443 } 2444 2445 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu) 2446 { 2447 struct bpf_prog_array *progs; 2448 2449 /* If RCU Tasks Trace grace period implies RCU grace period, there is 2450 * no need to call kfree_rcu(), just call kfree() directly. 2451 */ 2452 progs = container_of(rcu, struct bpf_prog_array, rcu); 2453 if (rcu_trace_implies_rcu_gp()) 2454 kfree(progs); 2455 else 2456 kfree_rcu(progs, rcu); 2457 } 2458 2459 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs) 2460 { 2461 if (!progs || progs == &bpf_empty_prog_array.hdr) 2462 return; 2463 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb); 2464 } 2465 2466 int bpf_prog_array_length(struct bpf_prog_array *array) 2467 { 2468 struct bpf_prog_array_item *item; 2469 u32 cnt = 0; 2470 2471 for (item = array->items; item->prog; item++) 2472 if (item->prog != &dummy_bpf_prog.prog) 2473 cnt++; 2474 return cnt; 2475 } 2476 2477 bool bpf_prog_array_is_empty(struct bpf_prog_array *array) 2478 { 2479 struct bpf_prog_array_item *item; 2480 2481 for (item = array->items; item->prog; item++) 2482 if (item->prog != &dummy_bpf_prog.prog) 2483 return false; 2484 return true; 2485 } 2486 2487 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, 2488 u32 *prog_ids, 2489 u32 request_cnt) 2490 { 2491 struct bpf_prog_array_item *item; 2492 int i = 0; 2493 2494 for (item = array->items; item->prog; item++) { 2495 if (item->prog == &dummy_bpf_prog.prog) 2496 continue; 2497 prog_ids[i] = item->prog->aux->id; 2498 if (++i == request_cnt) { 2499 item++; 2500 break; 2501 } 2502 } 2503 2504 return !!(item->prog); 2505 } 2506 2507 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, 2508 __u32 __user *prog_ids, u32 cnt) 2509 { 2510 unsigned long err = 0; 2511 bool nospc; 2512 u32 *ids; 2513 2514 /* users of this function are doing: 2515 * cnt = bpf_prog_array_length(); 2516 * if (cnt > 0) 2517 * bpf_prog_array_copy_to_user(..., cnt); 2518 * so below kcalloc doesn't need extra cnt > 0 check. 2519 */ 2520 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 2521 if (!ids) 2522 return -ENOMEM; 2523 nospc = bpf_prog_array_copy_core(array, ids, cnt); 2524 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 2525 kfree(ids); 2526 if (err) 2527 return -EFAULT; 2528 if (nospc) 2529 return -ENOSPC; 2530 return 0; 2531 } 2532 2533 void bpf_prog_array_delete_safe(struct bpf_prog_array *array, 2534 struct bpf_prog *old_prog) 2535 { 2536 struct bpf_prog_array_item *item; 2537 2538 for (item = array->items; item->prog; item++) 2539 if (item->prog == old_prog) { 2540 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 2541 break; 2542 } 2543 } 2544 2545 /** 2546 * bpf_prog_array_delete_safe_at() - Replaces the program at the given 2547 * index into the program array with 2548 * a dummy no-op program. 2549 * @array: a bpf_prog_array 2550 * @index: the index of the program to replace 2551 * 2552 * Skips over dummy programs, by not counting them, when calculating 2553 * the position of the program to replace. 2554 * 2555 * Return: 2556 * * 0 - Success 2557 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2558 * * -ENOENT - Index out of range 2559 */ 2560 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index) 2561 { 2562 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog); 2563 } 2564 2565 /** 2566 * bpf_prog_array_update_at() - Updates the program at the given index 2567 * into the program array. 2568 * @array: a bpf_prog_array 2569 * @index: the index of the program to update 2570 * @prog: the program to insert into the array 2571 * 2572 * Skips over dummy programs, by not counting them, when calculating 2573 * the position of the program to update. 2574 * 2575 * Return: 2576 * * 0 - Success 2577 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2578 * * -ENOENT - Index out of range 2579 */ 2580 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 2581 struct bpf_prog *prog) 2582 { 2583 struct bpf_prog_array_item *item; 2584 2585 if (unlikely(index < 0)) 2586 return -EINVAL; 2587 2588 for (item = array->items; item->prog; item++) { 2589 if (item->prog == &dummy_bpf_prog.prog) 2590 continue; 2591 if (!index) { 2592 WRITE_ONCE(item->prog, prog); 2593 return 0; 2594 } 2595 index--; 2596 } 2597 return -ENOENT; 2598 } 2599 2600 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 2601 struct bpf_prog *exclude_prog, 2602 struct bpf_prog *include_prog, 2603 u64 bpf_cookie, 2604 struct bpf_prog_array **new_array) 2605 { 2606 int new_prog_cnt, carry_prog_cnt = 0; 2607 struct bpf_prog_array_item *existing, *new; 2608 struct bpf_prog_array *array; 2609 bool found_exclude = false; 2610 2611 /* Figure out how many existing progs we need to carry over to 2612 * the new array. 2613 */ 2614 if (old_array) { 2615 existing = old_array->items; 2616 for (; existing->prog; existing++) { 2617 if (existing->prog == exclude_prog) { 2618 found_exclude = true; 2619 continue; 2620 } 2621 if (existing->prog != &dummy_bpf_prog.prog) 2622 carry_prog_cnt++; 2623 if (existing->prog == include_prog) 2624 return -EEXIST; 2625 } 2626 } 2627 2628 if (exclude_prog && !found_exclude) 2629 return -ENOENT; 2630 2631 /* How many progs (not NULL) will be in the new array? */ 2632 new_prog_cnt = carry_prog_cnt; 2633 if (include_prog) 2634 new_prog_cnt += 1; 2635 2636 /* Do we have any prog (not NULL) in the new array? */ 2637 if (!new_prog_cnt) { 2638 *new_array = NULL; 2639 return 0; 2640 } 2641 2642 /* +1 as the end of prog_array is marked with NULL */ 2643 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 2644 if (!array) 2645 return -ENOMEM; 2646 new = array->items; 2647 2648 /* Fill in the new prog array */ 2649 if (carry_prog_cnt) { 2650 existing = old_array->items; 2651 for (; existing->prog; existing++) { 2652 if (existing->prog == exclude_prog || 2653 existing->prog == &dummy_bpf_prog.prog) 2654 continue; 2655 2656 new->prog = existing->prog; 2657 new->bpf_cookie = existing->bpf_cookie; 2658 new++; 2659 } 2660 } 2661 if (include_prog) { 2662 new->prog = include_prog; 2663 new->bpf_cookie = bpf_cookie; 2664 new++; 2665 } 2666 new->prog = NULL; 2667 *new_array = array; 2668 return 0; 2669 } 2670 2671 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 2672 u32 *prog_ids, u32 request_cnt, 2673 u32 *prog_cnt) 2674 { 2675 u32 cnt = 0; 2676 2677 if (array) 2678 cnt = bpf_prog_array_length(array); 2679 2680 *prog_cnt = cnt; 2681 2682 /* return early if user requested only program count or nothing to copy */ 2683 if (!request_cnt || !cnt) 2684 return 0; 2685 2686 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 2687 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 2688 : 0; 2689 } 2690 2691 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2692 struct bpf_map **used_maps, u32 len) 2693 { 2694 struct bpf_map *map; 2695 bool sleepable; 2696 u32 i; 2697 2698 sleepable = aux->sleepable; 2699 for (i = 0; i < len; i++) { 2700 map = used_maps[i]; 2701 if (map->ops->map_poke_untrack) 2702 map->ops->map_poke_untrack(map, aux); 2703 if (sleepable) 2704 atomic64_dec(&map->sleepable_refcnt); 2705 bpf_map_put(map); 2706 } 2707 } 2708 2709 static void bpf_free_used_maps(struct bpf_prog_aux *aux) 2710 { 2711 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt); 2712 kfree(aux->used_maps); 2713 } 2714 2715 void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 2716 struct btf_mod_pair *used_btfs, u32 len) 2717 { 2718 #ifdef CONFIG_BPF_SYSCALL 2719 struct btf_mod_pair *btf_mod; 2720 u32 i; 2721 2722 for (i = 0; i < len; i++) { 2723 btf_mod = &used_btfs[i]; 2724 if (btf_mod->module) 2725 module_put(btf_mod->module); 2726 btf_put(btf_mod->btf); 2727 } 2728 #endif 2729 } 2730 2731 static void bpf_free_used_btfs(struct bpf_prog_aux *aux) 2732 { 2733 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt); 2734 kfree(aux->used_btfs); 2735 } 2736 2737 static void bpf_prog_free_deferred(struct work_struct *work) 2738 { 2739 struct bpf_prog_aux *aux; 2740 int i; 2741 2742 aux = container_of(work, struct bpf_prog_aux, work); 2743 #ifdef CONFIG_BPF_SYSCALL 2744 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab); 2745 #endif 2746 #ifdef CONFIG_CGROUP_BPF 2747 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID) 2748 bpf_cgroup_atype_put(aux->cgroup_atype); 2749 #endif 2750 bpf_free_used_maps(aux); 2751 bpf_free_used_btfs(aux); 2752 if (bpf_prog_is_dev_bound(aux)) 2753 bpf_prog_dev_bound_destroy(aux->prog); 2754 #ifdef CONFIG_PERF_EVENTS 2755 if (aux->prog->has_callchain_buf) 2756 put_callchain_buffers(); 2757 #endif 2758 if (aux->dst_trampoline) 2759 bpf_trampoline_put(aux->dst_trampoline); 2760 for (i = 0; i < aux->real_func_cnt; i++) { 2761 /* We can just unlink the subprog poke descriptor table as 2762 * it was originally linked to the main program and is also 2763 * released along with it. 2764 */ 2765 aux->func[i]->aux->poke_tab = NULL; 2766 bpf_jit_free(aux->func[i]); 2767 } 2768 if (aux->real_func_cnt) { 2769 kfree(aux->func); 2770 bpf_prog_unlock_free(aux->prog); 2771 } else { 2772 bpf_jit_free(aux->prog); 2773 } 2774 } 2775 2776 void bpf_prog_free(struct bpf_prog *fp) 2777 { 2778 struct bpf_prog_aux *aux = fp->aux; 2779 2780 if (aux->dst_prog) 2781 bpf_prog_put(aux->dst_prog); 2782 bpf_token_put(aux->token); 2783 INIT_WORK(&aux->work, bpf_prog_free_deferred); 2784 schedule_work(&aux->work); 2785 } 2786 EXPORT_SYMBOL_GPL(bpf_prog_free); 2787 2788 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 2789 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 2790 2791 void bpf_user_rnd_init_once(void) 2792 { 2793 prandom_init_once(&bpf_user_rnd_state); 2794 } 2795 2796 BPF_CALL_0(bpf_user_rnd_u32) 2797 { 2798 /* Should someone ever have the rather unwise idea to use some 2799 * of the registers passed into this function, then note that 2800 * this function is called from native eBPF and classic-to-eBPF 2801 * transformations. Register assignments from both sides are 2802 * different, f.e. classic always sets fn(ctx, A, X) here. 2803 */ 2804 struct rnd_state *state; 2805 u32 res; 2806 2807 state = &get_cpu_var(bpf_user_rnd_state); 2808 res = prandom_u32_state(state); 2809 put_cpu_var(bpf_user_rnd_state); 2810 2811 return res; 2812 } 2813 2814 BPF_CALL_0(bpf_get_raw_cpu_id) 2815 { 2816 return raw_smp_processor_id(); 2817 } 2818 2819 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 2820 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 2821 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 2822 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 2823 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 2824 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 2825 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 2826 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak; 2827 const struct bpf_func_proto bpf_spin_lock_proto __weak; 2828 const struct bpf_func_proto bpf_spin_unlock_proto __weak; 2829 const struct bpf_func_proto bpf_jiffies64_proto __weak; 2830 2831 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 2832 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 2833 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 2834 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 2835 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak; 2836 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak; 2837 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak; 2838 2839 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 2840 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 2841 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 2842 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 2843 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak; 2844 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 2845 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak; 2846 const struct bpf_func_proto bpf_snprintf_btf_proto __weak; 2847 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak; 2848 const struct bpf_func_proto bpf_set_retval_proto __weak; 2849 const struct bpf_func_proto bpf_get_retval_proto __weak; 2850 2851 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 2852 { 2853 return NULL; 2854 } 2855 2856 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void) 2857 { 2858 return NULL; 2859 } 2860 2861 u64 __weak 2862 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 2863 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 2864 { 2865 return -ENOTSUPP; 2866 } 2867 EXPORT_SYMBOL_GPL(bpf_event_output); 2868 2869 /* Always built-in helper functions. */ 2870 const struct bpf_func_proto bpf_tail_call_proto = { 2871 .func = NULL, 2872 .gpl_only = false, 2873 .ret_type = RET_VOID, 2874 .arg1_type = ARG_PTR_TO_CTX, 2875 .arg2_type = ARG_CONST_MAP_PTR, 2876 .arg3_type = ARG_ANYTHING, 2877 }; 2878 2879 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 2880 * It is encouraged to implement bpf_int_jit_compile() instead, so that 2881 * eBPF and implicitly also cBPF can get JITed! 2882 */ 2883 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 2884 { 2885 return prog; 2886 } 2887 2888 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 2889 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 2890 */ 2891 void __weak bpf_jit_compile(struct bpf_prog *prog) 2892 { 2893 } 2894 2895 bool __weak bpf_helper_changes_pkt_data(void *func) 2896 { 2897 return false; 2898 } 2899 2900 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage 2901 * analysis code and wants explicit zero extension inserted by verifier. 2902 * Otherwise, return FALSE. 2903 * 2904 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if 2905 * you don't override this. JITs that don't want these extra insns can detect 2906 * them using insn_is_zext. 2907 */ 2908 bool __weak bpf_jit_needs_zext(void) 2909 { 2910 return false; 2911 } 2912 2913 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */ 2914 bool __weak bpf_jit_supports_subprog_tailcalls(void) 2915 { 2916 return false; 2917 } 2918 2919 bool __weak bpf_jit_supports_kfunc_call(void) 2920 { 2921 return false; 2922 } 2923 2924 bool __weak bpf_jit_supports_far_kfunc_call(void) 2925 { 2926 return false; 2927 } 2928 2929 /* Return TRUE if the JIT backend satisfies the following two conditions: 2930 * 1) JIT backend supports atomic_xchg() on pointer-sized words. 2931 * 2) Under the specific arch, the implementation of xchg() is the same 2932 * as atomic_xchg() on pointer-sized words. 2933 */ 2934 bool __weak bpf_jit_supports_ptr_xchg(void) 2935 { 2936 return false; 2937 } 2938 2939 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 2940 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 2941 */ 2942 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 2943 int len) 2944 { 2945 return -EFAULT; 2946 } 2947 2948 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2949 void *addr1, void *addr2) 2950 { 2951 return -ENOTSUPP; 2952 } 2953 2954 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len) 2955 { 2956 return ERR_PTR(-ENOTSUPP); 2957 } 2958 2959 int __weak bpf_arch_text_invalidate(void *dst, size_t len) 2960 { 2961 return -ENOTSUPP; 2962 } 2963 2964 bool __weak bpf_jit_supports_exceptions(void) 2965 { 2966 return false; 2967 } 2968 2969 void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) 2970 { 2971 } 2972 2973 #ifdef CONFIG_BPF_SYSCALL 2974 static int __init bpf_global_ma_init(void) 2975 { 2976 int ret; 2977 2978 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false); 2979 bpf_global_ma_set = !ret; 2980 return ret; 2981 } 2982 late_initcall(bpf_global_ma_init); 2983 #endif 2984 2985 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); 2986 EXPORT_SYMBOL(bpf_stats_enabled_key); 2987 2988 /* All definitions of tracepoints related to BPF. */ 2989 #define CREATE_TRACE_POINTS 2990 #include <linux/bpf_trace.h> 2991 2992 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 2993 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx); 2994