1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */ 3 #include <linux/bpf.h> 4 #include <linux/btf.h> 5 #include <linux/bpf_verifier.h> 6 #include <linux/filter.h> 7 #include <linux/vmalloc.h> 8 #include <linux/bsearch.h> 9 #include <linux/sort.h> 10 #include <linux/perf_event.h> 11 #include <net/xdp.h> 12 #include "disasm.h" 13 14 #define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args) 15 16 static bool is_cmpxchg_insn(const struct bpf_insn *insn) 17 { 18 return BPF_CLASS(insn->code) == BPF_STX && 19 BPF_MODE(insn->code) == BPF_ATOMIC && 20 insn->imm == BPF_CMPXCHG; 21 } 22 23 /* Return the regno defined by the insn, or -1. */ 24 static int insn_def_regno(const struct bpf_insn *insn) 25 { 26 switch (BPF_CLASS(insn->code)) { 27 case BPF_JMP: 28 case BPF_JMP32: 29 case BPF_ST: 30 return -1; 31 case BPF_STX: 32 if (BPF_MODE(insn->code) == BPF_ATOMIC || 33 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) { 34 if (insn->imm == BPF_CMPXCHG) 35 return BPF_REG_0; 36 else if (insn->imm == BPF_LOAD_ACQ) 37 return insn->dst_reg; 38 else if (insn->imm & BPF_FETCH) 39 return insn->src_reg; 40 } 41 return -1; 42 default: 43 return insn->dst_reg; 44 } 45 } 46 47 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 48 static bool insn_has_def32(struct bpf_insn *insn) 49 { 50 int dst_reg = insn_def_regno(insn); 51 52 if (dst_reg == -1) 53 return false; 54 55 return !bpf_is_reg64(insn, dst_reg, NULL, DST_OP); 56 } 57 58 static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b) 59 { 60 const struct bpf_kfunc_desc *d0 = a; 61 const struct bpf_kfunc_desc *d1 = b; 62 63 if (d0->imm != d1->imm) 64 return d0->imm < d1->imm ? -1 : 1; 65 if (d0->offset != d1->offset) 66 return d0->offset < d1->offset ? -1 : 1; 67 return 0; 68 } 69 70 const struct btf_func_model * 71 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 72 const struct bpf_insn *insn) 73 { 74 const struct bpf_kfunc_desc desc = { 75 .imm = insn->imm, 76 .offset = insn->off, 77 }; 78 const struct bpf_kfunc_desc *res; 79 struct bpf_kfunc_desc_tab *tab; 80 81 tab = prog->aux->kfunc_tab; 82 res = bsearch(&desc, tab->descs, tab->nr_descs, 83 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off); 84 85 return res ? &res->func_model : NULL; 86 } 87 88 static int set_kfunc_desc_imm(struct bpf_verifier_env *env, struct bpf_kfunc_desc *desc) 89 { 90 unsigned long call_imm; 91 92 if (bpf_jit_supports_far_kfunc_call()) { 93 call_imm = desc->func_id; 94 } else { 95 call_imm = BPF_CALL_IMM(desc->addr); 96 /* Check whether the relative offset overflows desc->imm */ 97 if ((unsigned long)(s32)call_imm != call_imm) { 98 verbose(env, "address of kernel func_id %u is out of range\n", 99 desc->func_id); 100 return -EINVAL; 101 } 102 } 103 desc->imm = call_imm; 104 return 0; 105 } 106 107 static int sort_kfunc_descs_by_imm_off(struct bpf_verifier_env *env) 108 { 109 struct bpf_kfunc_desc_tab *tab; 110 int i, err; 111 112 tab = env->prog->aux->kfunc_tab; 113 if (!tab) 114 return 0; 115 116 for (i = 0; i < tab->nr_descs; i++) { 117 err = set_kfunc_desc_imm(env, &tab->descs[i]); 118 if (err) 119 return err; 120 } 121 122 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 123 kfunc_desc_cmp_by_imm_off, NULL); 124 return 0; 125 } 126 127 static int add_kfunc_in_insns(struct bpf_verifier_env *env, 128 struct bpf_insn *insn, int cnt) 129 { 130 int i, ret; 131 132 for (i = 0; i < cnt; i++, insn++) { 133 if (bpf_pseudo_kfunc_call(insn)) { 134 ret = bpf_add_kfunc_call(env, insn->imm, insn->off); 135 if (ret < 0) 136 return ret; 137 } 138 } 139 return 0; 140 } 141 142 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 143 static int get_callee_stack_depth(struct bpf_verifier_env *env, 144 const struct bpf_insn *insn, int idx) 145 { 146 int start = idx + insn->imm + 1, subprog; 147 148 subprog = bpf_find_subprog(env, start); 149 if (verifier_bug_if(subprog < 0, env, "get stack depth: no program at insn %d", start)) 150 return -EFAULT; 151 return env->subprog_info[subprog].stack_depth; 152 } 153 #endif 154 155 /* single env->prog->insni[off] instruction was replaced with the range 156 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 157 * [0, off) and [off, end) to new locations, so the patched range stays zero 158 */ 159 static void adjust_insn_aux_data(struct bpf_verifier_env *env, 160 struct bpf_prog *new_prog, u32 off, u32 cnt) 161 { 162 struct bpf_insn_aux_data *data = env->insn_aux_data; 163 struct bpf_insn *insn = new_prog->insnsi; 164 u32 old_seen = data[off].seen; 165 u32 prog_len; 166 int i; 167 168 /* aux info at OFF always needs adjustment, no matter fast path 169 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 170 * original insn at old prog. 171 */ 172 data[off].zext_dst = insn_has_def32(insn + off + cnt - 1); 173 174 if (cnt == 1) 175 return; 176 prog_len = new_prog->len; 177 178 memmove(data + off + cnt - 1, data + off, 179 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 180 memset(data + off, 0, sizeof(struct bpf_insn_aux_data) * (cnt - 1)); 181 for (i = off; i < off + cnt - 1; i++) { 182 /* Expand insni[off]'s seen count to the patched range. */ 183 data[i].seen = old_seen; 184 data[i].zext_dst = insn_has_def32(insn + i); 185 } 186 } 187 188 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 189 { 190 int i; 191 192 if (len == 1) 193 return; 194 /* NOTE: fake 'exit' subprog should be updated as well. */ 195 for (i = 0; i <= env->subprog_cnt; i++) { 196 if (env->subprog_info[i].start <= off) 197 continue; 198 env->subprog_info[i].start += len - 1; 199 } 200 } 201 202 static void adjust_insn_arrays(struct bpf_verifier_env *env, u32 off, u32 len) 203 { 204 int i; 205 206 if (len == 1) 207 return; 208 209 for (i = 0; i < env->insn_array_map_cnt; i++) 210 bpf_insn_array_adjust(env->insn_array_maps[i], off, len); 211 } 212 213 static void adjust_insn_arrays_after_remove(struct bpf_verifier_env *env, u32 off, u32 len) 214 { 215 int i; 216 217 for (i = 0; i < env->insn_array_map_cnt; i++) 218 bpf_insn_array_adjust_after_remove(env->insn_array_maps[i], off, len); 219 } 220 221 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) 222 { 223 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 224 int i, sz = prog->aux->size_poke_tab; 225 struct bpf_jit_poke_descriptor *desc; 226 227 for (i = 0; i < sz; i++) { 228 desc = &tab[i]; 229 if (desc->insn_idx <= off) 230 continue; 231 desc->insn_idx += len - 1; 232 } 233 } 234 235 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 236 const struct bpf_insn *patch, u32 len) 237 { 238 struct bpf_prog *new_prog; 239 struct bpf_insn_aux_data *new_data = NULL; 240 241 if (len > 1) { 242 new_data = vrealloc(env->insn_aux_data, 243 array_size(env->prog->len + len - 1, 244 sizeof(struct bpf_insn_aux_data)), 245 GFP_KERNEL_ACCOUNT | __GFP_ZERO); 246 if (!new_data) 247 return NULL; 248 249 env->insn_aux_data = new_data; 250 } 251 252 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 253 if (IS_ERR(new_prog)) { 254 if (PTR_ERR(new_prog) == -ERANGE) 255 verbose(env, 256 "insn %d cannot be patched due to 16-bit range\n", 257 env->insn_aux_data[off].orig_idx); 258 return NULL; 259 } 260 adjust_insn_aux_data(env, new_prog, off, len); 261 adjust_subprog_starts(env, off, len); 262 adjust_insn_arrays(env, off, len); 263 adjust_poke_descs(new_prog, off, len); 264 return new_prog; 265 } 266 267 /* 268 * For all jmp insns in a given 'prog' that point to 'tgt_idx' insn adjust the 269 * jump offset by 'delta'. 270 */ 271 static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) 272 { 273 struct bpf_insn *insn = prog->insnsi; 274 u32 insn_cnt = prog->len, i; 275 s32 imm; 276 s16 off; 277 278 for (i = 0; i < insn_cnt; i++, insn++) { 279 u8 code = insn->code; 280 281 if (tgt_idx <= i && i < tgt_idx + delta) 282 continue; 283 284 if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) || 285 BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT) 286 continue; 287 288 if (insn->code == (BPF_JMP32 | BPF_JA)) { 289 if (i + 1 + insn->imm != tgt_idx) 290 continue; 291 if (check_add_overflow(insn->imm, delta, &imm)) 292 return -ERANGE; 293 insn->imm = imm; 294 } else { 295 if (i + 1 + insn->off != tgt_idx) 296 continue; 297 if (check_add_overflow(insn->off, delta, &off)) 298 return -ERANGE; 299 insn->off = off; 300 } 301 } 302 return 0; 303 } 304 305 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 306 u32 off, u32 cnt) 307 { 308 int i, j; 309 310 /* find first prog starting at or after off (first to remove) */ 311 for (i = 0; i < env->subprog_cnt; i++) 312 if (env->subprog_info[i].start >= off) 313 break; 314 /* find first prog starting at or after off + cnt (first to stay) */ 315 for (j = i; j < env->subprog_cnt; j++) 316 if (env->subprog_info[j].start >= off + cnt) 317 break; 318 /* if j doesn't start exactly at off + cnt, we are just removing 319 * the front of previous prog 320 */ 321 if (env->subprog_info[j].start != off + cnt) 322 j--; 323 324 if (j > i) { 325 struct bpf_prog_aux *aux = env->prog->aux; 326 int move; 327 328 /* move fake 'exit' subprog as well */ 329 move = env->subprog_cnt + 1 - j; 330 331 memmove(env->subprog_info + i, 332 env->subprog_info + j, 333 sizeof(*env->subprog_info) * move); 334 env->subprog_cnt -= j - i; 335 336 /* remove func_info */ 337 if (aux->func_info) { 338 move = aux->func_info_cnt - j; 339 340 memmove(aux->func_info + i, 341 aux->func_info + j, 342 sizeof(*aux->func_info) * move); 343 aux->func_info_cnt -= j - i; 344 /* func_info->insn_off is set after all code rewrites, 345 * in adjust_btf_func() - no need to adjust 346 */ 347 } 348 } else { 349 /* convert i from "first prog to remove" to "first to adjust" */ 350 if (env->subprog_info[i].start == off) 351 i++; 352 } 353 354 /* update fake 'exit' subprog as well */ 355 for (; i <= env->subprog_cnt; i++) 356 env->subprog_info[i].start -= cnt; 357 358 return 0; 359 } 360 361 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 362 u32 cnt) 363 { 364 struct bpf_prog *prog = env->prog; 365 u32 i, l_off, l_cnt, nr_linfo; 366 struct bpf_line_info *linfo; 367 368 nr_linfo = prog->aux->nr_linfo; 369 if (!nr_linfo) 370 return 0; 371 372 linfo = prog->aux->linfo; 373 374 /* find first line info to remove, count lines to be removed */ 375 for (i = 0; i < nr_linfo; i++) 376 if (linfo[i].insn_off >= off) 377 break; 378 379 l_off = i; 380 l_cnt = 0; 381 for (; i < nr_linfo; i++) 382 if (linfo[i].insn_off < off + cnt) 383 l_cnt++; 384 else 385 break; 386 387 /* First live insn doesn't match first live linfo, it needs to "inherit" 388 * last removed linfo. prog is already modified, so prog->len == off 389 * means no live instructions after (tail of the program was removed). 390 */ 391 if (prog->len != off && l_cnt && 392 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 393 l_cnt--; 394 linfo[--i].insn_off = off + cnt; 395 } 396 397 /* remove the line info which refer to the removed instructions */ 398 if (l_cnt) { 399 memmove(linfo + l_off, linfo + i, 400 sizeof(*linfo) * (nr_linfo - i)); 401 402 prog->aux->nr_linfo -= l_cnt; 403 nr_linfo = prog->aux->nr_linfo; 404 } 405 406 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 407 for (i = l_off; i < nr_linfo; i++) 408 linfo[i].insn_off -= cnt; 409 410 /* fix up all subprogs (incl. 'exit') which start >= off */ 411 for (i = 0; i <= env->subprog_cnt; i++) 412 if (env->subprog_info[i].linfo_idx > l_off) { 413 /* program may have started in the removed region but 414 * may not be fully removed 415 */ 416 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 417 env->subprog_info[i].linfo_idx -= l_cnt; 418 else 419 env->subprog_info[i].linfo_idx = l_off; 420 } 421 422 return 0; 423 } 424 425 /* 426 * Clean up dynamically allocated fields of aux data for instructions [start, ...] 427 */ 428 void bpf_clear_insn_aux_data(struct bpf_verifier_env *env, int start, int len) 429 { 430 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 431 struct bpf_insn *insns = env->prog->insnsi; 432 int end = start + len; 433 int i; 434 435 for (i = start; i < end; i++) { 436 if (aux_data[i].jt) { 437 kvfree(aux_data[i].jt); 438 aux_data[i].jt = NULL; 439 } 440 441 if (bpf_is_ldimm64(&insns[i])) 442 i++; 443 } 444 } 445 446 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 447 { 448 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 449 unsigned int orig_prog_len = env->prog->len; 450 int err; 451 452 if (bpf_prog_is_offloaded(env->prog->aux)) 453 bpf_prog_offload_remove_insns(env, off, cnt); 454 455 /* Should be called before bpf_remove_insns, as it uses prog->insnsi */ 456 bpf_clear_insn_aux_data(env, off, cnt); 457 458 err = bpf_remove_insns(env->prog, off, cnt); 459 if (err) 460 return err; 461 462 err = adjust_subprog_starts_after_remove(env, off, cnt); 463 if (err) 464 return err; 465 466 err = bpf_adj_linfo_after_remove(env, off, cnt); 467 if (err) 468 return err; 469 470 adjust_insn_arrays_after_remove(env, off, cnt); 471 472 memmove(aux_data + off, aux_data + off + cnt, 473 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 474 475 return 0; 476 } 477 478 static const struct bpf_insn NOP = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 479 static const struct bpf_insn MAY_GOTO_0 = BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0, 0); 480 481 bool bpf_insn_is_cond_jump(u8 code) 482 { 483 u8 op; 484 485 op = BPF_OP(code); 486 if (BPF_CLASS(code) == BPF_JMP32) 487 return op != BPF_JA; 488 489 if (BPF_CLASS(code) != BPF_JMP) 490 return false; 491 492 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 493 } 494 495 void bpf_opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 496 { 497 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 498 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 499 struct bpf_insn *insn = env->prog->insnsi; 500 const int insn_cnt = env->prog->len; 501 int i; 502 503 for (i = 0; i < insn_cnt; i++, insn++) { 504 if (!bpf_insn_is_cond_jump(insn->code)) 505 continue; 506 507 if (!aux_data[i + 1].seen) 508 ja.off = insn->off; 509 else if (!aux_data[i + 1 + insn->off].seen) 510 ja.off = 0; 511 else 512 continue; 513 514 if (bpf_prog_is_offloaded(env->prog->aux)) 515 bpf_prog_offload_replace_insn(env, i, &ja); 516 517 memcpy(insn, &ja, sizeof(ja)); 518 } 519 } 520 521 int bpf_opt_remove_dead_code(struct bpf_verifier_env *env) 522 { 523 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 524 int insn_cnt = env->prog->len; 525 int i, err; 526 527 for (i = 0; i < insn_cnt; i++) { 528 int j; 529 530 j = 0; 531 while (i + j < insn_cnt && !aux_data[i + j].seen) 532 j++; 533 if (!j) 534 continue; 535 536 err = verifier_remove_insns(env, i, j); 537 if (err) 538 return err; 539 insn_cnt = env->prog->len; 540 } 541 542 return 0; 543 } 544 545 int bpf_opt_remove_nops(struct bpf_verifier_env *env) 546 { 547 struct bpf_insn *insn = env->prog->insnsi; 548 int insn_cnt = env->prog->len; 549 bool is_may_goto_0, is_ja; 550 int i, err; 551 552 for (i = 0; i < insn_cnt; i++) { 553 is_may_goto_0 = !memcmp(&insn[i], &MAY_GOTO_0, sizeof(MAY_GOTO_0)); 554 is_ja = !memcmp(&insn[i], &NOP, sizeof(NOP)); 555 556 if (!is_may_goto_0 && !is_ja) 557 continue; 558 559 err = verifier_remove_insns(env, i, 1); 560 if (err) 561 return err; 562 insn_cnt--; 563 /* Go back one insn to catch may_goto +1; may_goto +0 sequence */ 564 i -= (is_may_goto_0 && i > 0) ? 2 : 1; 565 } 566 567 return 0; 568 } 569 570 int bpf_opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 571 const union bpf_attr *attr) 572 { 573 struct bpf_insn *patch; 574 /* use env->insn_buf as two independent buffers */ 575 struct bpf_insn *zext_patch = env->insn_buf; 576 struct bpf_insn *rnd_hi32_patch = &env->insn_buf[2]; 577 struct bpf_insn_aux_data *aux = env->insn_aux_data; 578 int i, patch_len, delta = 0, len = env->prog->len; 579 struct bpf_insn *insns = env->prog->insnsi; 580 struct bpf_prog *new_prog; 581 bool rnd_hi32; 582 583 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 584 zext_patch[1] = BPF_ZEXT_REG(0); 585 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 586 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 587 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 588 for (i = 0; i < len; i++) { 589 int adj_idx = i + delta; 590 struct bpf_insn insn; 591 int load_reg; 592 593 insn = insns[adj_idx]; 594 load_reg = insn_def_regno(&insn); 595 if (!aux[adj_idx].zext_dst) { 596 u8 code, class; 597 u32 imm_rnd; 598 599 if (!rnd_hi32) 600 continue; 601 602 code = insn.code; 603 class = BPF_CLASS(code); 604 if (load_reg == -1) 605 continue; 606 607 /* NOTE: arg "reg" (the fourth one) is only used for 608 * BPF_STX + SRC_OP, so it is safe to pass NULL 609 * here. 610 */ 611 if (bpf_is_reg64(&insn, load_reg, NULL, DST_OP)) { 612 if (class == BPF_LD && 613 BPF_MODE(code) == BPF_IMM) 614 i++; 615 continue; 616 } 617 618 /* ctx load could be transformed into wider load. */ 619 if (class == BPF_LDX && 620 aux[adj_idx].ptr_type == PTR_TO_CTX) 621 continue; 622 623 imm_rnd = get_random_u32(); 624 rnd_hi32_patch[0] = insn; 625 rnd_hi32_patch[1].imm = imm_rnd; 626 rnd_hi32_patch[3].dst_reg = load_reg; 627 patch = rnd_hi32_patch; 628 patch_len = 4; 629 goto apply_patch_buffer; 630 } 631 632 /* Add in an zero-extend instruction if a) the JIT has requested 633 * it or b) it's a CMPXCHG. 634 * 635 * The latter is because: BPF_CMPXCHG always loads a value into 636 * R0, therefore always zero-extends. However some archs' 637 * equivalent instruction only does this load when the 638 * comparison is successful. This detail of CMPXCHG is 639 * orthogonal to the general zero-extension behaviour of the 640 * CPU, so it's treated independently of bpf_jit_needs_zext. 641 */ 642 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) 643 continue; 644 645 /* Zero-extension is done by the caller. */ 646 if (bpf_pseudo_kfunc_call(&insn)) 647 continue; 648 649 if (verifier_bug_if(load_reg == -1, env, 650 "zext_dst is set, but no reg is defined")) 651 return -EFAULT; 652 653 zext_patch[0] = insn; 654 zext_patch[1].dst_reg = load_reg; 655 zext_patch[1].src_reg = load_reg; 656 patch = zext_patch; 657 patch_len = 2; 658 apply_patch_buffer: 659 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 660 if (!new_prog) 661 return -ENOMEM; 662 env->prog = new_prog; 663 insns = new_prog->insnsi; 664 aux = env->insn_aux_data; 665 delta += patch_len - 1; 666 } 667 668 return 0; 669 } 670 671 /* convert load instructions that access fields of a context type into a 672 * sequence of instructions that access fields of the underlying structure: 673 * struct __sk_buff -> struct sk_buff 674 * struct bpf_sock_ops -> struct sock 675 */ 676 int bpf_convert_ctx_accesses(struct bpf_verifier_env *env) 677 { 678 struct bpf_subprog_info *subprogs = env->subprog_info; 679 const struct bpf_verifier_ops *ops = env->ops; 680 int i, cnt, size, ctx_field_size, ret, delta = 0, epilogue_cnt = 0; 681 const int insn_cnt = env->prog->len; 682 struct bpf_insn *epilogue_buf = env->epilogue_buf; 683 struct bpf_insn *insn_buf = env->insn_buf; 684 struct bpf_insn *insn; 685 u32 target_size, size_default, off; 686 struct bpf_prog *new_prog; 687 enum bpf_access_type type; 688 bool is_narrower_load; 689 int epilogue_idx = 0; 690 691 if (ops->gen_epilogue) { 692 epilogue_cnt = ops->gen_epilogue(epilogue_buf, env->prog, 693 -(subprogs[0].stack_depth + 8)); 694 if (epilogue_cnt >= INSN_BUF_SIZE) { 695 verifier_bug(env, "epilogue is too long"); 696 return -EFAULT; 697 } else if (epilogue_cnt) { 698 /* Save the ARG_PTR_TO_CTX for the epilogue to use */ 699 cnt = 0; 700 subprogs[0].stack_depth += 8; 701 insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_1, 702 -subprogs[0].stack_depth); 703 insn_buf[cnt++] = env->prog->insnsi[0]; 704 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 705 if (!new_prog) 706 return -ENOMEM; 707 env->prog = new_prog; 708 delta += cnt - 1; 709 710 ret = add_kfunc_in_insns(env, epilogue_buf, epilogue_cnt - 1); 711 if (ret < 0) 712 return ret; 713 } 714 } 715 716 if (ops->gen_prologue || env->seen_direct_write) { 717 if (!ops->gen_prologue) { 718 verifier_bug(env, "gen_prologue is null"); 719 return -EFAULT; 720 } 721 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 722 env->prog); 723 if (cnt >= INSN_BUF_SIZE) { 724 verifier_bug(env, "prologue is too long"); 725 return -EFAULT; 726 } else if (cnt) { 727 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 728 if (!new_prog) 729 return -ENOMEM; 730 731 env->prog = new_prog; 732 delta += cnt - 1; 733 734 ret = add_kfunc_in_insns(env, insn_buf, cnt - 1); 735 if (ret < 0) 736 return ret; 737 } 738 } 739 740 if (delta) 741 WARN_ON(adjust_jmp_off(env->prog, 0, delta)); 742 743 if (bpf_prog_is_offloaded(env->prog->aux)) 744 return 0; 745 746 insn = env->prog->insnsi + delta; 747 748 for (i = 0; i < insn_cnt; i++, insn++) { 749 bpf_convert_ctx_access_t convert_ctx_access; 750 u8 mode; 751 752 if (env->insn_aux_data[i + delta].nospec) { 753 WARN_ON_ONCE(env->insn_aux_data[i + delta].alu_state); 754 struct bpf_insn *patch = insn_buf; 755 756 *patch++ = BPF_ST_NOSPEC(); 757 *patch++ = *insn; 758 cnt = patch - insn_buf; 759 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 760 if (!new_prog) 761 return -ENOMEM; 762 763 delta += cnt - 1; 764 env->prog = new_prog; 765 insn = new_prog->insnsi + i + delta; 766 /* This can not be easily merged with the 767 * nospec_result-case, because an insn may require a 768 * nospec before and after itself. Therefore also do not 769 * 'continue' here but potentially apply further 770 * patching to insn. *insn should equal patch[1] now. 771 */ 772 } 773 774 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 775 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 776 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 777 insn->code == (BPF_LDX | BPF_MEM | BPF_DW) || 778 insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) || 779 insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) || 780 insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) { 781 type = BPF_READ; 782 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 783 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 784 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 785 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || 786 insn->code == (BPF_ST | BPF_MEM | BPF_B) || 787 insn->code == (BPF_ST | BPF_MEM | BPF_H) || 788 insn->code == (BPF_ST | BPF_MEM | BPF_W) || 789 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { 790 type = BPF_WRITE; 791 } else if ((insn->code == (BPF_STX | BPF_ATOMIC | BPF_B) || 792 insn->code == (BPF_STX | BPF_ATOMIC | BPF_H) || 793 insn->code == (BPF_STX | BPF_ATOMIC | BPF_W) || 794 insn->code == (BPF_STX | BPF_ATOMIC | BPF_DW)) && 795 env->insn_aux_data[i + delta].ptr_type == PTR_TO_ARENA) { 796 insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code); 797 env->prog->aux->num_exentries++; 798 continue; 799 } else if (insn->code == (BPF_JMP | BPF_EXIT) && 800 epilogue_cnt && 801 i + delta < subprogs[1].start) { 802 /* Generate epilogue for the main prog */ 803 if (epilogue_idx) { 804 /* jump back to the earlier generated epilogue */ 805 insn_buf[0] = BPF_JMP32_A(epilogue_idx - i - delta - 1); 806 cnt = 1; 807 } else { 808 memcpy(insn_buf, epilogue_buf, 809 epilogue_cnt * sizeof(*epilogue_buf)); 810 cnt = epilogue_cnt; 811 /* epilogue_idx cannot be 0. It must have at 812 * least one ctx ptr saving insn before the 813 * epilogue. 814 */ 815 epilogue_idx = i + delta; 816 } 817 goto patch_insn_buf; 818 } else { 819 continue; 820 } 821 822 if (type == BPF_WRITE && 823 env->insn_aux_data[i + delta].nospec_result) { 824 /* nospec_result is only used to mitigate Spectre v4 and 825 * to limit verification-time for Spectre v1. 826 */ 827 struct bpf_insn *patch = insn_buf; 828 829 *patch++ = *insn; 830 *patch++ = BPF_ST_NOSPEC(); 831 cnt = patch - insn_buf; 832 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 833 if (!new_prog) 834 return -ENOMEM; 835 836 delta += cnt - 1; 837 env->prog = new_prog; 838 insn = new_prog->insnsi + i + delta; 839 continue; 840 } 841 842 switch ((int)env->insn_aux_data[i + delta].ptr_type) { 843 case PTR_TO_CTX: 844 if (!ops->convert_ctx_access) 845 continue; 846 convert_ctx_access = ops->convert_ctx_access; 847 break; 848 case PTR_TO_SOCKET: 849 case PTR_TO_SOCK_COMMON: 850 convert_ctx_access = bpf_sock_convert_ctx_access; 851 break; 852 case PTR_TO_TCP_SOCK: 853 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 854 break; 855 case PTR_TO_XDP_SOCK: 856 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 857 break; 858 case PTR_TO_BTF_ID: 859 case PTR_TO_BTF_ID | PTR_UNTRUSTED: 860 /* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike 861 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot 862 * be said once it is marked PTR_UNTRUSTED, hence we must handle 863 * any faults for loads into such types. BPF_WRITE is disallowed 864 * for this case. 865 */ 866 case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED: 867 case PTR_TO_MEM | MEM_RDONLY | PTR_UNTRUSTED: 868 if (type == BPF_READ) { 869 if (BPF_MODE(insn->code) == BPF_MEM) 870 insn->code = BPF_LDX | BPF_PROBE_MEM | 871 BPF_SIZE((insn)->code); 872 else 873 insn->code = BPF_LDX | BPF_PROBE_MEMSX | 874 BPF_SIZE((insn)->code); 875 env->prog->aux->num_exentries++; 876 } 877 continue; 878 case PTR_TO_ARENA: 879 if (BPF_MODE(insn->code) == BPF_MEMSX) { 880 if (!bpf_jit_supports_insn(insn, true)) { 881 verbose(env, "sign extending loads from arena are not supported yet\n"); 882 return -EOPNOTSUPP; 883 } 884 insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32SX | BPF_SIZE(insn->code); 885 } else { 886 insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code); 887 } 888 env->prog->aux->num_exentries++; 889 continue; 890 default: 891 continue; 892 } 893 894 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 895 size = BPF_LDST_BYTES(insn); 896 mode = BPF_MODE(insn->code); 897 898 /* If the read access is a narrower load of the field, 899 * convert to a 4/8-byte load, to minimum program type specific 900 * convert_ctx_access changes. If conversion is successful, 901 * we will apply proper mask to the result. 902 */ 903 is_narrower_load = size < ctx_field_size; 904 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 905 off = insn->off; 906 if (is_narrower_load) { 907 u8 size_code; 908 909 if (type == BPF_WRITE) { 910 verifier_bug(env, "narrow ctx access misconfigured"); 911 return -EFAULT; 912 } 913 914 size_code = BPF_H; 915 if (ctx_field_size == 4) 916 size_code = BPF_W; 917 else if (ctx_field_size == 8) 918 size_code = BPF_DW; 919 920 insn->off = off & ~(size_default - 1); 921 insn->code = BPF_LDX | BPF_MEM | size_code; 922 } 923 924 target_size = 0; 925 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 926 &target_size); 927 if (cnt == 0 || cnt >= INSN_BUF_SIZE || 928 (ctx_field_size && !target_size)) { 929 verifier_bug(env, "error during ctx access conversion (%d)", cnt); 930 return -EFAULT; 931 } 932 933 if (is_narrower_load && size < target_size) { 934 u8 shift = bpf_ctx_narrow_access_offset( 935 off, size, size_default) * 8; 936 if (shift && cnt + 1 >= INSN_BUF_SIZE) { 937 verifier_bug(env, "narrow ctx load misconfigured"); 938 return -EFAULT; 939 } 940 if (ctx_field_size <= 4) { 941 if (shift) 942 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 943 insn->dst_reg, 944 shift); 945 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 946 (1 << size * 8) - 1); 947 } else { 948 if (shift) 949 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 950 insn->dst_reg, 951 shift); 952 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 953 (1ULL << size * 8) - 1); 954 } 955 } 956 if (mode == BPF_MEMSX) 957 insn_buf[cnt++] = BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_X, 958 insn->dst_reg, insn->dst_reg, 959 size * 8, 0); 960 961 patch_insn_buf: 962 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 963 if (!new_prog) 964 return -ENOMEM; 965 966 delta += cnt - 1; 967 968 /* keep walking new program and skip insns we just inserted */ 969 env->prog = new_prog; 970 insn = new_prog->insnsi + i + delta; 971 } 972 973 return 0; 974 } 975 976 int bpf_jit_subprogs(struct bpf_verifier_env *env) 977 { 978 struct bpf_prog *prog = env->prog, **func, *tmp; 979 int i, j, subprog_start, subprog_end = 0, len, subprog; 980 struct bpf_map *map_ptr; 981 struct bpf_insn *insn; 982 void *old_bpf_func; 983 int err, num_exentries; 984 int old_len, subprog_start_adjustment = 0; 985 986 if (env->subprog_cnt <= 1) 987 return 0; 988 989 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 990 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn)) 991 continue; 992 993 /* Upon error here we cannot fall back to interpreter but 994 * need a hard reject of the program. Thus -EFAULT is 995 * propagated in any case. 996 */ 997 subprog = bpf_find_subprog(env, i + insn->imm + 1); 998 if (verifier_bug_if(subprog < 0, env, "No program to jit at insn %d", 999 i + insn->imm + 1)) 1000 return -EFAULT; 1001 /* temporarily remember subprog id inside insn instead of 1002 * aux_data, since next loop will split up all insns into funcs 1003 */ 1004 insn->off = subprog; 1005 /* remember original imm in case JIT fails and fallback 1006 * to interpreter will be needed 1007 */ 1008 env->insn_aux_data[i].call_imm = insn->imm; 1009 /* point imm to __bpf_call_base+1 from JITs point of view */ 1010 insn->imm = 1; 1011 if (bpf_pseudo_func(insn)) { 1012 #if defined(MODULES_VADDR) 1013 u64 addr = MODULES_VADDR; 1014 #else 1015 u64 addr = VMALLOC_START; 1016 #endif 1017 /* jit (e.g. x86_64) may emit fewer instructions 1018 * if it learns a u32 imm is the same as a u64 imm. 1019 * Set close enough to possible prog address. 1020 */ 1021 insn[0].imm = (u32)addr; 1022 insn[1].imm = addr >> 32; 1023 } 1024 } 1025 1026 err = bpf_prog_alloc_jited_linfo(prog); 1027 if (err) 1028 goto out_undo_insn; 1029 1030 err = -ENOMEM; 1031 func = kzalloc_objs(prog, env->subprog_cnt); 1032 if (!func) 1033 goto out_undo_insn; 1034 1035 for (i = 0; i < env->subprog_cnt; i++) { 1036 subprog_start = subprog_end; 1037 subprog_end = env->subprog_info[i + 1].start; 1038 1039 len = subprog_end - subprog_start; 1040 /* bpf_prog_run() doesn't call subprogs directly, 1041 * hence main prog stats include the runtime of subprogs. 1042 * subprogs don't have IDs and not reachable via prog_get_next_id 1043 * func[i]->stats will never be accessed and stays NULL 1044 */ 1045 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 1046 if (!func[i]) 1047 goto out_free; 1048 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 1049 len * sizeof(struct bpf_insn)); 1050 func[i]->type = prog->type; 1051 func[i]->len = len; 1052 if (bpf_prog_calc_tag(func[i])) 1053 goto out_free; 1054 func[i]->is_func = 1; 1055 func[i]->sleepable = prog->sleepable; 1056 func[i]->aux->func_idx = i; 1057 /* Below members will be freed only at prog->aux */ 1058 func[i]->aux->btf = prog->aux->btf; 1059 func[i]->aux->subprog_start = subprog_start + subprog_start_adjustment; 1060 func[i]->aux->func_info = prog->aux->func_info; 1061 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; 1062 func[i]->aux->poke_tab = prog->aux->poke_tab; 1063 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; 1064 func[i]->aux->main_prog_aux = prog->aux; 1065 1066 for (j = 0; j < prog->aux->size_poke_tab; j++) { 1067 struct bpf_jit_poke_descriptor *poke; 1068 1069 poke = &prog->aux->poke_tab[j]; 1070 if (poke->insn_idx < subprog_end && 1071 poke->insn_idx >= subprog_start) 1072 poke->aux = func[i]->aux; 1073 } 1074 1075 func[i]->aux->name[0] = 'F'; 1076 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 1077 if (env->subprog_info[i].priv_stack_mode == PRIV_STACK_ADAPTIVE) 1078 func[i]->aux->jits_use_priv_stack = true; 1079 1080 func[i]->jit_requested = 1; 1081 func[i]->blinding_requested = prog->blinding_requested; 1082 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; 1083 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; 1084 func[i]->aux->linfo = prog->aux->linfo; 1085 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 1086 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 1087 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 1088 func[i]->aux->arena = prog->aux->arena; 1089 func[i]->aux->used_maps = env->used_maps; 1090 func[i]->aux->used_map_cnt = env->used_map_cnt; 1091 num_exentries = 0; 1092 insn = func[i]->insnsi; 1093 for (j = 0; j < func[i]->len; j++, insn++) { 1094 if (BPF_CLASS(insn->code) == BPF_LDX && 1095 (BPF_MODE(insn->code) == BPF_PROBE_MEM || 1096 BPF_MODE(insn->code) == BPF_PROBE_MEM32 || 1097 BPF_MODE(insn->code) == BPF_PROBE_MEM32SX || 1098 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) 1099 num_exentries++; 1100 if ((BPF_CLASS(insn->code) == BPF_STX || 1101 BPF_CLASS(insn->code) == BPF_ST) && 1102 BPF_MODE(insn->code) == BPF_PROBE_MEM32) 1103 num_exentries++; 1104 if (BPF_CLASS(insn->code) == BPF_STX && 1105 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) 1106 num_exentries++; 1107 } 1108 func[i]->aux->num_exentries = num_exentries; 1109 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; 1110 func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb; 1111 func[i]->aux->changes_pkt_data = env->subprog_info[i].changes_pkt_data; 1112 func[i]->aux->might_sleep = env->subprog_info[i].might_sleep; 1113 func[i]->aux->token = prog->aux->token; 1114 if (!i) 1115 func[i]->aux->exception_boundary = env->seen_exception; 1116 1117 /* 1118 * To properly pass the absolute subprog start to jit 1119 * all instruction adjustments should be accumulated 1120 */ 1121 old_len = func[i]->len; 1122 func[i] = bpf_int_jit_compile(func[i]); 1123 subprog_start_adjustment += func[i]->len - old_len; 1124 1125 if (!func[i]->jited) { 1126 err = -ENOTSUPP; 1127 goto out_free; 1128 } 1129 cond_resched(); 1130 } 1131 1132 /* at this point all bpf functions were successfully JITed 1133 * now populate all bpf_calls with correct addresses and 1134 * run last pass of JIT 1135 */ 1136 for (i = 0; i < env->subprog_cnt; i++) { 1137 insn = func[i]->insnsi; 1138 for (j = 0; j < func[i]->len; j++, insn++) { 1139 if (bpf_pseudo_func(insn)) { 1140 subprog = insn->off; 1141 insn[0].imm = (u32)(long)func[subprog]->bpf_func; 1142 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; 1143 continue; 1144 } 1145 if (!bpf_pseudo_call(insn)) 1146 continue; 1147 subprog = insn->off; 1148 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); 1149 } 1150 1151 /* we use the aux data to keep a list of the start addresses 1152 * of the JITed images for each function in the program 1153 * 1154 * for some architectures, such as powerpc64, the imm field 1155 * might not be large enough to hold the offset of the start 1156 * address of the callee's JITed image from __bpf_call_base 1157 * 1158 * in such cases, we can lookup the start address of a callee 1159 * by using its subprog id, available from the off field of 1160 * the call instruction, as an index for this list 1161 */ 1162 func[i]->aux->func = func; 1163 func[i]->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; 1164 func[i]->aux->real_func_cnt = env->subprog_cnt; 1165 } 1166 for (i = 0; i < env->subprog_cnt; i++) { 1167 old_bpf_func = func[i]->bpf_func; 1168 tmp = bpf_int_jit_compile(func[i]); 1169 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 1170 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 1171 err = -ENOTSUPP; 1172 goto out_free; 1173 } 1174 cond_resched(); 1175 } 1176 1177 /* 1178 * Cleanup func[i]->aux fields which aren't required 1179 * or can become invalid in future 1180 */ 1181 for (i = 0; i < env->subprog_cnt; i++) { 1182 func[i]->aux->used_maps = NULL; 1183 func[i]->aux->used_map_cnt = 0; 1184 } 1185 1186 /* finally lock prog and jit images for all functions and 1187 * populate kallsysm. Begin at the first subprogram, since 1188 * bpf_prog_load will add the kallsyms for the main program. 1189 */ 1190 for (i = 1; i < env->subprog_cnt; i++) { 1191 err = bpf_prog_lock_ro(func[i]); 1192 if (err) 1193 goto out_free; 1194 } 1195 1196 for (i = 1; i < env->subprog_cnt; i++) 1197 bpf_prog_kallsyms_add(func[i]); 1198 1199 /* Last step: make now unused interpreter insns from main 1200 * prog consistent for later dump requests, so they can 1201 * later look the same as if they were interpreted only. 1202 */ 1203 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 1204 if (bpf_pseudo_func(insn)) { 1205 insn[0].imm = env->insn_aux_data[i].call_imm; 1206 insn[1].imm = insn->off; 1207 insn->off = 0; 1208 continue; 1209 } 1210 if (!bpf_pseudo_call(insn)) 1211 continue; 1212 insn->off = env->insn_aux_data[i].call_imm; 1213 subprog = bpf_find_subprog(env, i + insn->off + 1); 1214 insn->imm = subprog; 1215 } 1216 1217 prog->jited = 1; 1218 prog->bpf_func = func[0]->bpf_func; 1219 prog->jited_len = func[0]->jited_len; 1220 prog->aux->extable = func[0]->aux->extable; 1221 prog->aux->num_exentries = func[0]->aux->num_exentries; 1222 prog->aux->func = func; 1223 prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; 1224 prog->aux->real_func_cnt = env->subprog_cnt; 1225 prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func; 1226 prog->aux->exception_boundary = func[0]->aux->exception_boundary; 1227 bpf_prog_jit_attempt_done(prog); 1228 return 0; 1229 out_free: 1230 /* We failed JIT'ing, so at this point we need to unregister poke 1231 * descriptors from subprogs, so that kernel is not attempting to 1232 * patch it anymore as we're freeing the subprog JIT memory. 1233 */ 1234 for (i = 0; i < prog->aux->size_poke_tab; i++) { 1235 map_ptr = prog->aux->poke_tab[i].tail_call.map; 1236 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); 1237 } 1238 /* At this point we're guaranteed that poke descriptors are not 1239 * live anymore. We can just unlink its descriptor table as it's 1240 * released with the main prog. 1241 */ 1242 for (i = 0; i < env->subprog_cnt; i++) { 1243 if (!func[i]) 1244 continue; 1245 func[i]->aux->poke_tab = NULL; 1246 bpf_jit_free(func[i]); 1247 } 1248 kfree(func); 1249 out_undo_insn: 1250 /* cleanup main prog to be interpreted */ 1251 prog->jit_requested = 0; 1252 prog->blinding_requested = 0; 1253 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 1254 if (!bpf_pseudo_call(insn)) 1255 continue; 1256 insn->off = 0; 1257 insn->imm = env->insn_aux_data[i].call_imm; 1258 } 1259 bpf_prog_jit_attempt_done(prog); 1260 return err; 1261 } 1262 1263 int bpf_fixup_call_args(struct bpf_verifier_env *env) 1264 { 1265 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1266 struct bpf_prog *prog = env->prog; 1267 struct bpf_insn *insn = prog->insnsi; 1268 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog); 1269 int i, depth; 1270 #endif 1271 int err = 0; 1272 1273 if (env->prog->jit_requested && 1274 !bpf_prog_is_offloaded(env->prog->aux)) { 1275 err = bpf_jit_subprogs(env); 1276 if (err == 0) 1277 return 0; 1278 if (err == -EFAULT) 1279 return err; 1280 } 1281 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1282 if (has_kfunc_call) { 1283 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); 1284 return -EINVAL; 1285 } 1286 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { 1287 /* When JIT fails the progs with bpf2bpf calls and tail_calls 1288 * have to be rejected, since interpreter doesn't support them yet. 1289 */ 1290 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 1291 return -EINVAL; 1292 } 1293 for (i = 0; i < prog->len; i++, insn++) { 1294 if (bpf_pseudo_func(insn)) { 1295 /* When JIT fails the progs with callback calls 1296 * have to be rejected, since interpreter doesn't support them yet. 1297 */ 1298 verbose(env, "callbacks are not allowed in non-JITed programs\n"); 1299 return -EINVAL; 1300 } 1301 1302 if (!bpf_pseudo_call(insn)) 1303 continue; 1304 depth = get_callee_stack_depth(env, insn, i); 1305 if (depth < 0) 1306 return depth; 1307 bpf_patch_call_args(insn, depth); 1308 } 1309 err = 0; 1310 #endif 1311 return err; 1312 } 1313 1314 1315 /* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */ 1316 static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *patch, int len) 1317 { 1318 struct bpf_subprog_info *info = env->subprog_info; 1319 int cnt = env->subprog_cnt; 1320 struct bpf_prog *prog; 1321 1322 /* We only reserve one slot for hidden subprogs in subprog_info. */ 1323 if (env->hidden_subprog_cnt) { 1324 verifier_bug(env, "only one hidden subprog supported"); 1325 return -EFAULT; 1326 } 1327 /* We're not patching any existing instruction, just appending the new 1328 * ones for the hidden subprog. Hence all of the adjustment operations 1329 * in bpf_patch_insn_data are no-ops. 1330 */ 1331 prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len); 1332 if (!prog) 1333 return -ENOMEM; 1334 env->prog = prog; 1335 info[cnt + 1].start = info[cnt].start; 1336 info[cnt].start = prog->len - len + 1; 1337 env->subprog_cnt++; 1338 env->hidden_subprog_cnt++; 1339 return 0; 1340 } 1341 1342 /* Do various post-verification rewrites in a single program pass. 1343 * These rewrites simplify JIT and interpreter implementations. 1344 */ 1345 int bpf_do_misc_fixups(struct bpf_verifier_env *env) 1346 { 1347 struct bpf_prog *prog = env->prog; 1348 enum bpf_attach_type eatype = prog->expected_attach_type; 1349 enum bpf_prog_type prog_type = resolve_prog_type(prog); 1350 struct bpf_insn *insn = prog->insnsi; 1351 const struct bpf_func_proto *fn; 1352 const int insn_cnt = prog->len; 1353 const struct bpf_map_ops *ops; 1354 struct bpf_insn_aux_data *aux; 1355 struct bpf_insn *insn_buf = env->insn_buf; 1356 struct bpf_prog *new_prog; 1357 struct bpf_map *map_ptr; 1358 int i, ret, cnt, delta = 0, cur_subprog = 0; 1359 struct bpf_subprog_info *subprogs = env->subprog_info; 1360 u16 stack_depth = subprogs[cur_subprog].stack_depth; 1361 u16 stack_depth_extra = 0; 1362 1363 if (env->seen_exception && !env->exception_callback_subprog) { 1364 struct bpf_insn *patch = insn_buf; 1365 1366 *patch++ = env->prog->insnsi[insn_cnt - 1]; 1367 *patch++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); 1368 *patch++ = BPF_EXIT_INSN(); 1369 ret = add_hidden_subprog(env, insn_buf, patch - insn_buf); 1370 if (ret < 0) 1371 return ret; 1372 prog = env->prog; 1373 insn = prog->insnsi; 1374 1375 env->exception_callback_subprog = env->subprog_cnt - 1; 1376 /* Don't update insn_cnt, as add_hidden_subprog always appends insns */ 1377 bpf_mark_subprog_exc_cb(env, env->exception_callback_subprog); 1378 } 1379 1380 for (i = 0; i < insn_cnt;) { 1381 if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) { 1382 if ((insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1) || 1383 (((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) { 1384 /* convert to 32-bit mov that clears upper 32-bit */ 1385 insn->code = BPF_ALU | BPF_MOV | BPF_X; 1386 /* clear off and imm, so it's a normal 'wX = wY' from JIT pov */ 1387 insn->off = 0; 1388 insn->imm = 0; 1389 } /* cast from as(0) to as(1) should be handled by JIT */ 1390 goto next_insn; 1391 } 1392 1393 if (env->insn_aux_data[i + delta].needs_zext) 1394 /* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */ 1395 insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code); 1396 1397 /* Make sdiv/smod divide-by-minus-one exceptions impossible. */ 1398 if ((insn->code == (BPF_ALU64 | BPF_MOD | BPF_K) || 1399 insn->code == (BPF_ALU64 | BPF_DIV | BPF_K) || 1400 insn->code == (BPF_ALU | BPF_MOD | BPF_K) || 1401 insn->code == (BPF_ALU | BPF_DIV | BPF_K)) && 1402 insn->off == 1 && insn->imm == -1) { 1403 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1404 bool isdiv = BPF_OP(insn->code) == BPF_DIV; 1405 struct bpf_insn *patch = insn_buf; 1406 1407 if (isdiv) 1408 *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | 1409 BPF_NEG | BPF_K, insn->dst_reg, 1410 0, 0, 0); 1411 else 1412 *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0); 1413 1414 cnt = patch - insn_buf; 1415 1416 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1417 if (!new_prog) 1418 return -ENOMEM; 1419 1420 delta += cnt - 1; 1421 env->prog = prog = new_prog; 1422 insn = new_prog->insnsi + i + delta; 1423 goto next_insn; 1424 } 1425 1426 /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */ 1427 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 1428 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 1429 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 1430 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 1431 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1432 bool isdiv = BPF_OP(insn->code) == BPF_DIV; 1433 bool is_sdiv = isdiv && insn->off == 1; 1434 bool is_smod = !isdiv && insn->off == 1; 1435 struct bpf_insn *patch = insn_buf; 1436 1437 if (is_sdiv) { 1438 /* [R,W]x sdiv 0 -> 0 1439 * LLONG_MIN sdiv -1 -> LLONG_MIN 1440 * INT_MIN sdiv -1 -> INT_MIN 1441 */ 1442 *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); 1443 *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | 1444 BPF_ADD | BPF_K, BPF_REG_AX, 1445 0, 0, 1); 1446 *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 1447 BPF_JGT | BPF_K, BPF_REG_AX, 1448 0, 4, 1); 1449 *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 1450 BPF_JEQ | BPF_K, BPF_REG_AX, 1451 0, 1, 0); 1452 *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | 1453 BPF_MOV | BPF_K, insn->dst_reg, 1454 0, 0, 0); 1455 /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */ 1456 *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | 1457 BPF_NEG | BPF_K, insn->dst_reg, 1458 0, 0, 0); 1459 *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 1460 *patch++ = *insn; 1461 cnt = patch - insn_buf; 1462 } else if (is_smod) { 1463 /* [R,W]x mod 0 -> [R,W]x */ 1464 /* [R,W]x mod -1 -> 0 */ 1465 *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); 1466 *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | 1467 BPF_ADD | BPF_K, BPF_REG_AX, 1468 0, 0, 1); 1469 *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 1470 BPF_JGT | BPF_K, BPF_REG_AX, 1471 0, 3, 1); 1472 *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 1473 BPF_JEQ | BPF_K, BPF_REG_AX, 1474 0, 3 + (is64 ? 0 : 1), 1); 1475 *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0); 1476 *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 1477 *patch++ = *insn; 1478 1479 if (!is64) { 1480 *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 1481 *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg); 1482 } 1483 cnt = patch - insn_buf; 1484 } else if (isdiv) { 1485 /* [R,W]x div 0 -> 0 */ 1486 *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 1487 BPF_JNE | BPF_K, insn->src_reg, 1488 0, 2, 0); 1489 *patch++ = BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg); 1490 *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 1491 *patch++ = *insn; 1492 cnt = patch - insn_buf; 1493 } else { 1494 /* [R,W]x mod 0 -> [R,W]x */ 1495 *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 1496 BPF_JEQ | BPF_K, insn->src_reg, 1497 0, 1 + (is64 ? 0 : 1), 0); 1498 *patch++ = *insn; 1499 1500 if (!is64) { 1501 *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 1502 *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg); 1503 } 1504 cnt = patch - insn_buf; 1505 } 1506 1507 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1508 if (!new_prog) 1509 return -ENOMEM; 1510 1511 delta += cnt - 1; 1512 env->prog = prog = new_prog; 1513 insn = new_prog->insnsi + i + delta; 1514 goto next_insn; 1515 } 1516 1517 /* Make it impossible to de-reference a userspace address */ 1518 if (BPF_CLASS(insn->code) == BPF_LDX && 1519 (BPF_MODE(insn->code) == BPF_PROBE_MEM || 1520 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) { 1521 struct bpf_insn *patch = insn_buf; 1522 u64 uaddress_limit = bpf_arch_uaddress_limit(); 1523 1524 if (!uaddress_limit) 1525 goto next_insn; 1526 1527 *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); 1528 if (insn->off) 1529 *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_AX, insn->off); 1530 *patch++ = BPF_ALU64_IMM(BPF_RSH, BPF_REG_AX, 32); 1531 *patch++ = BPF_JMP_IMM(BPF_JLE, BPF_REG_AX, uaddress_limit >> 32, 2); 1532 *patch++ = *insn; 1533 *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 1534 *patch++ = BPF_MOV64_IMM(insn->dst_reg, 0); 1535 1536 cnt = patch - insn_buf; 1537 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1538 if (!new_prog) 1539 return -ENOMEM; 1540 1541 delta += cnt - 1; 1542 env->prog = prog = new_prog; 1543 insn = new_prog->insnsi + i + delta; 1544 goto next_insn; 1545 } 1546 1547 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */ 1548 if (BPF_CLASS(insn->code) == BPF_LD && 1549 (BPF_MODE(insn->code) == BPF_ABS || 1550 BPF_MODE(insn->code) == BPF_IND)) { 1551 cnt = env->ops->gen_ld_abs(insn, insn_buf); 1552 if (cnt == 0 || cnt >= INSN_BUF_SIZE) { 1553 verifier_bug(env, "%d insns generated for ld_abs", cnt); 1554 return -EFAULT; 1555 } 1556 1557 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1558 if (!new_prog) 1559 return -ENOMEM; 1560 1561 delta += cnt - 1; 1562 env->prog = prog = new_prog; 1563 insn = new_prog->insnsi + i + delta; 1564 goto next_insn; 1565 } 1566 1567 /* Rewrite pointer arithmetic to mitigate speculation attacks. */ 1568 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 1569 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 1570 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 1571 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 1572 struct bpf_insn *patch = insn_buf; 1573 bool issrc, isneg, isimm; 1574 u32 off_reg; 1575 1576 aux = &env->insn_aux_data[i + delta]; 1577 if (!aux->alu_state || 1578 aux->alu_state == BPF_ALU_NON_POINTER) 1579 goto next_insn; 1580 1581 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 1582 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 1583 BPF_ALU_SANITIZE_SRC; 1584 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; 1585 1586 off_reg = issrc ? insn->src_reg : insn->dst_reg; 1587 if (isimm) { 1588 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 1589 } else { 1590 if (isneg) 1591 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 1592 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 1593 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 1594 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 1595 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 1596 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 1597 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); 1598 } 1599 if (!issrc) 1600 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); 1601 insn->src_reg = BPF_REG_AX; 1602 if (isneg) 1603 insn->code = insn->code == code_add ? 1604 code_sub : code_add; 1605 *patch++ = *insn; 1606 if (issrc && isneg && !isimm) 1607 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 1608 cnt = patch - insn_buf; 1609 1610 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1611 if (!new_prog) 1612 return -ENOMEM; 1613 1614 delta += cnt - 1; 1615 env->prog = prog = new_prog; 1616 insn = new_prog->insnsi + i + delta; 1617 goto next_insn; 1618 } 1619 1620 if (bpf_is_may_goto_insn(insn) && bpf_jit_supports_timed_may_goto()) { 1621 int stack_off_cnt = -stack_depth - 16; 1622 1623 /* 1624 * Two 8 byte slots, depth-16 stores the count, and 1625 * depth-8 stores the start timestamp of the loop. 1626 * 1627 * The starting value of count is BPF_MAX_TIMED_LOOPS 1628 * (0xffff). Every iteration loads it and subs it by 1, 1629 * until the value becomes 0 in AX (thus, 1 in stack), 1630 * after which we call arch_bpf_timed_may_goto, which 1631 * either sets AX to 0xffff to keep looping, or to 0 1632 * upon timeout. AX is then stored into the stack. In 1633 * the next iteration, we either see 0 and break out, or 1634 * continue iterating until the next time value is 0 1635 * after subtraction, rinse and repeat. 1636 */ 1637 stack_depth_extra = 16; 1638 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_AX, BPF_REG_10, stack_off_cnt); 1639 if (insn->off >= 0) 1640 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 5); 1641 else 1642 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1); 1643 insn_buf[2] = BPF_ALU64_IMM(BPF_SUB, BPF_REG_AX, 1); 1644 insn_buf[3] = BPF_JMP_IMM(BPF_JNE, BPF_REG_AX, 0, 2); 1645 /* 1646 * AX is used as an argument to pass in stack_off_cnt 1647 * (to add to r10/fp), and also as the return value of 1648 * the call to arch_bpf_timed_may_goto. 1649 */ 1650 insn_buf[4] = BPF_MOV64_IMM(BPF_REG_AX, stack_off_cnt); 1651 insn_buf[5] = BPF_EMIT_CALL(arch_bpf_timed_may_goto); 1652 insn_buf[6] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_AX, stack_off_cnt); 1653 cnt = 7; 1654 1655 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1656 if (!new_prog) 1657 return -ENOMEM; 1658 1659 delta += cnt - 1; 1660 env->prog = prog = new_prog; 1661 insn = new_prog->insnsi + i + delta; 1662 goto next_insn; 1663 } else if (bpf_is_may_goto_insn(insn)) { 1664 int stack_off = -stack_depth - 8; 1665 1666 stack_depth_extra = 8; 1667 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_AX, BPF_REG_10, stack_off); 1668 if (insn->off >= 0) 1669 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2); 1670 else 1671 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1); 1672 insn_buf[2] = BPF_ALU64_IMM(BPF_SUB, BPF_REG_AX, 1); 1673 insn_buf[3] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_AX, stack_off); 1674 cnt = 4; 1675 1676 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1677 if (!new_prog) 1678 return -ENOMEM; 1679 1680 delta += cnt - 1; 1681 env->prog = prog = new_prog; 1682 insn = new_prog->insnsi + i + delta; 1683 goto next_insn; 1684 } 1685 1686 if (insn->code != (BPF_JMP | BPF_CALL)) 1687 goto next_insn; 1688 if (insn->src_reg == BPF_PSEUDO_CALL) 1689 goto next_insn; 1690 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 1691 ret = bpf_fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt); 1692 if (ret) 1693 return ret; 1694 if (cnt == 0) 1695 goto next_insn; 1696 1697 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1698 if (!new_prog) 1699 return -ENOMEM; 1700 1701 delta += cnt - 1; 1702 env->prog = prog = new_prog; 1703 insn = new_prog->insnsi + i + delta; 1704 goto next_insn; 1705 } 1706 1707 /* Skip inlining the helper call if the JIT does it. */ 1708 if (bpf_jit_inlines_helper_call(insn->imm)) 1709 goto next_insn; 1710 1711 if (insn->imm == BPF_FUNC_get_route_realm) 1712 prog->dst_needed = 1; 1713 if (insn->imm == BPF_FUNC_get_prandom_u32) 1714 bpf_user_rnd_init_once(); 1715 if (insn->imm == BPF_FUNC_override_return) 1716 prog->kprobe_override = 1; 1717 if (insn->imm == BPF_FUNC_tail_call) { 1718 /* If we tail call into other programs, we 1719 * cannot make any assumptions since they can 1720 * be replaced dynamically during runtime in 1721 * the program array. 1722 */ 1723 prog->cb_access = 1; 1724 if (!bpf_allow_tail_call_in_subprogs(env)) 1725 prog->aux->stack_depth = MAX_BPF_STACK; 1726 prog->aux->max_pkt_offset = MAX_PACKET_OFF; 1727 1728 /* mark bpf_tail_call as different opcode to avoid 1729 * conditional branch in the interpreter for every normal 1730 * call and to prevent accidental JITing by JIT compiler 1731 * that doesn't support bpf_tail_call yet 1732 */ 1733 insn->imm = 0; 1734 insn->code = BPF_JMP | BPF_TAIL_CALL; 1735 1736 aux = &env->insn_aux_data[i + delta]; 1737 if (env->bpf_capable && !prog->blinding_requested && 1738 prog->jit_requested && 1739 !bpf_map_key_poisoned(aux) && 1740 !bpf_map_ptr_poisoned(aux) && 1741 !bpf_map_ptr_unpriv(aux)) { 1742 struct bpf_jit_poke_descriptor desc = { 1743 .reason = BPF_POKE_REASON_TAIL_CALL, 1744 .tail_call.map = aux->map_ptr_state.map_ptr, 1745 .tail_call.key = bpf_map_key_immediate(aux), 1746 .insn_idx = i + delta, 1747 }; 1748 1749 ret = bpf_jit_add_poke_descriptor(prog, &desc); 1750 if (ret < 0) { 1751 verbose(env, "adding tail call poke descriptor failed\n"); 1752 return ret; 1753 } 1754 1755 insn->imm = ret + 1; 1756 goto next_insn; 1757 } 1758 1759 if (!bpf_map_ptr_unpriv(aux)) 1760 goto next_insn; 1761 1762 /* instead of changing every JIT dealing with tail_call 1763 * emit two extra insns: 1764 * if (index >= max_entries) goto out; 1765 * index &= array->index_mask; 1766 * to avoid out-of-bounds cpu speculation 1767 */ 1768 if (bpf_map_ptr_poisoned(aux)) { 1769 verbose(env, "tail_call abusing map_ptr\n"); 1770 return -EINVAL; 1771 } 1772 1773 map_ptr = aux->map_ptr_state.map_ptr; 1774 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 1775 map_ptr->max_entries, 2); 1776 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 1777 container_of(map_ptr, 1778 struct bpf_array, 1779 map)->index_mask); 1780 insn_buf[2] = *insn; 1781 cnt = 3; 1782 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1783 if (!new_prog) 1784 return -ENOMEM; 1785 1786 delta += cnt - 1; 1787 env->prog = prog = new_prog; 1788 insn = new_prog->insnsi + i + delta; 1789 goto next_insn; 1790 } 1791 1792 if (insn->imm == BPF_FUNC_timer_set_callback) { 1793 /* The verifier will process callback_fn as many times as necessary 1794 * with different maps and the register states prepared by 1795 * set_timer_callback_state will be accurate. 1796 * 1797 * The following use case is valid: 1798 * map1 is shared by prog1, prog2, prog3. 1799 * prog1 calls bpf_timer_init for some map1 elements 1800 * prog2 calls bpf_timer_set_callback for some map1 elements. 1801 * Those that were not bpf_timer_init-ed will return -EINVAL. 1802 * prog3 calls bpf_timer_start for some map1 elements. 1803 * Those that were not both bpf_timer_init-ed and 1804 * bpf_timer_set_callback-ed will return -EINVAL. 1805 */ 1806 struct bpf_insn ld_addrs[2] = { 1807 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), 1808 }; 1809 1810 insn_buf[0] = ld_addrs[0]; 1811 insn_buf[1] = ld_addrs[1]; 1812 insn_buf[2] = *insn; 1813 cnt = 3; 1814 1815 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1816 if (!new_prog) 1817 return -ENOMEM; 1818 1819 delta += cnt - 1; 1820 env->prog = prog = new_prog; 1821 insn = new_prog->insnsi + i + delta; 1822 goto patch_call_imm; 1823 } 1824 1825 /* bpf_per_cpu_ptr() and bpf_this_cpu_ptr() */ 1826 if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) { 1827 /* patch with 'r1 = *(u64 *)(r1 + 0)' since for percpu data, 1828 * bpf_mem_alloc() returns a ptr to the percpu data ptr. 1829 */ 1830 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 1831 insn_buf[1] = *insn; 1832 cnt = 2; 1833 1834 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1835 if (!new_prog) 1836 return -ENOMEM; 1837 1838 delta += cnt - 1; 1839 env->prog = prog = new_prog; 1840 insn = new_prog->insnsi + i + delta; 1841 goto patch_call_imm; 1842 } 1843 1844 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 1845 * and other inlining handlers are currently limited to 64 bit 1846 * only. 1847 */ 1848 if (prog->jit_requested && BITS_PER_LONG == 64 && 1849 (insn->imm == BPF_FUNC_map_lookup_elem || 1850 insn->imm == BPF_FUNC_map_update_elem || 1851 insn->imm == BPF_FUNC_map_delete_elem || 1852 insn->imm == BPF_FUNC_map_push_elem || 1853 insn->imm == BPF_FUNC_map_pop_elem || 1854 insn->imm == BPF_FUNC_map_peek_elem || 1855 insn->imm == BPF_FUNC_redirect_map || 1856 insn->imm == BPF_FUNC_for_each_map_elem || 1857 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { 1858 aux = &env->insn_aux_data[i + delta]; 1859 if (bpf_map_ptr_poisoned(aux)) 1860 goto patch_call_imm; 1861 1862 map_ptr = aux->map_ptr_state.map_ptr; 1863 ops = map_ptr->ops; 1864 if (insn->imm == BPF_FUNC_map_lookup_elem && 1865 ops->map_gen_lookup) { 1866 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 1867 if (cnt == -EOPNOTSUPP) 1868 goto patch_map_ops_generic; 1869 if (cnt <= 0 || cnt >= INSN_BUF_SIZE) { 1870 verifier_bug(env, "%d insns generated for map lookup", cnt); 1871 return -EFAULT; 1872 } 1873 1874 new_prog = bpf_patch_insn_data(env, i + delta, 1875 insn_buf, cnt); 1876 if (!new_prog) 1877 return -ENOMEM; 1878 1879 delta += cnt - 1; 1880 env->prog = prog = new_prog; 1881 insn = new_prog->insnsi + i + delta; 1882 goto next_insn; 1883 } 1884 1885 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 1886 (void *(*)(struct bpf_map *map, void *key))NULL)); 1887 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 1888 (long (*)(struct bpf_map *map, void *key))NULL)); 1889 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 1890 (long (*)(struct bpf_map *map, void *key, void *value, 1891 u64 flags))NULL)); 1892 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 1893 (long (*)(struct bpf_map *map, void *value, 1894 u64 flags))NULL)); 1895 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 1896 (long (*)(struct bpf_map *map, void *value))NULL)); 1897 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 1898 (long (*)(struct bpf_map *map, void *value))NULL)); 1899 BUILD_BUG_ON(!__same_type(ops->map_redirect, 1900 (long (*)(struct bpf_map *map, u64 index, u64 flags))NULL)); 1901 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, 1902 (long (*)(struct bpf_map *map, 1903 bpf_callback_t callback_fn, 1904 void *callback_ctx, 1905 u64 flags))NULL)); 1906 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, 1907 (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL)); 1908 1909 patch_map_ops_generic: 1910 switch (insn->imm) { 1911 case BPF_FUNC_map_lookup_elem: 1912 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); 1913 goto next_insn; 1914 case BPF_FUNC_map_update_elem: 1915 insn->imm = BPF_CALL_IMM(ops->map_update_elem); 1916 goto next_insn; 1917 case BPF_FUNC_map_delete_elem: 1918 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); 1919 goto next_insn; 1920 case BPF_FUNC_map_push_elem: 1921 insn->imm = BPF_CALL_IMM(ops->map_push_elem); 1922 goto next_insn; 1923 case BPF_FUNC_map_pop_elem: 1924 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); 1925 goto next_insn; 1926 case BPF_FUNC_map_peek_elem: 1927 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); 1928 goto next_insn; 1929 case BPF_FUNC_redirect_map: 1930 insn->imm = BPF_CALL_IMM(ops->map_redirect); 1931 goto next_insn; 1932 case BPF_FUNC_for_each_map_elem: 1933 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); 1934 goto next_insn; 1935 case BPF_FUNC_map_lookup_percpu_elem: 1936 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); 1937 goto next_insn; 1938 } 1939 1940 goto patch_call_imm; 1941 } 1942 1943 /* Implement bpf_jiffies64 inline. */ 1944 if (prog->jit_requested && BITS_PER_LONG == 64 && 1945 insn->imm == BPF_FUNC_jiffies64) { 1946 struct bpf_insn ld_jiffies_addr[2] = { 1947 BPF_LD_IMM64(BPF_REG_0, 1948 (unsigned long)&jiffies), 1949 }; 1950 1951 insn_buf[0] = ld_jiffies_addr[0]; 1952 insn_buf[1] = ld_jiffies_addr[1]; 1953 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, 1954 BPF_REG_0, 0); 1955 cnt = 3; 1956 1957 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1958 cnt); 1959 if (!new_prog) 1960 return -ENOMEM; 1961 1962 delta += cnt - 1; 1963 env->prog = prog = new_prog; 1964 insn = new_prog->insnsi + i + delta; 1965 goto next_insn; 1966 } 1967 1968 #if defined(CONFIG_X86_64) && !defined(CONFIG_UML) 1969 /* Implement bpf_get_smp_processor_id() inline. */ 1970 if (insn->imm == BPF_FUNC_get_smp_processor_id && 1971 bpf_verifier_inlines_helper_call(env, insn->imm)) { 1972 /* BPF_FUNC_get_smp_processor_id inlining is an 1973 * optimization, so if cpu_number is ever 1974 * changed in some incompatible and hard to support 1975 * way, it's fine to back out this inlining logic 1976 */ 1977 #ifdef CONFIG_SMP 1978 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)&cpu_number); 1979 insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); 1980 insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0); 1981 cnt = 3; 1982 #else 1983 insn_buf[0] = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0); 1984 cnt = 1; 1985 #endif 1986 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 1987 if (!new_prog) 1988 return -ENOMEM; 1989 1990 delta += cnt - 1; 1991 env->prog = prog = new_prog; 1992 insn = new_prog->insnsi + i + delta; 1993 goto next_insn; 1994 } 1995 1996 /* Implement bpf_get_current_task() and bpf_get_current_task_btf() inline. */ 1997 if ((insn->imm == BPF_FUNC_get_current_task || insn->imm == BPF_FUNC_get_current_task_btf) && 1998 bpf_verifier_inlines_helper_call(env, insn->imm)) { 1999 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)¤t_task); 2000 insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); 2001 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); 2002 cnt = 3; 2003 2004 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 2005 if (!new_prog) 2006 return -ENOMEM; 2007 2008 delta += cnt - 1; 2009 env->prog = prog = new_prog; 2010 insn = new_prog->insnsi + i + delta; 2011 goto next_insn; 2012 } 2013 #endif 2014 /* Implement bpf_get_func_arg inline. */ 2015 if (prog_type == BPF_PROG_TYPE_TRACING && 2016 insn->imm == BPF_FUNC_get_func_arg) { 2017 if (eatype == BPF_TRACE_RAW_TP) { 2018 int nr_args = btf_type_vlen(prog->aux->attach_func_proto); 2019 2020 /* skip 'void *__data' in btf_trace_##name() and save to reg0 */ 2021 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, nr_args - 1); 2022 cnt = 1; 2023 } else { 2024 /* Load nr_args from ctx - 8 */ 2025 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 2026 insn_buf[1] = BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xFF); 2027 cnt = 2; 2028 } 2029 insn_buf[cnt++] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6); 2030 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3); 2031 insn_buf[cnt++] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1); 2032 insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0); 2033 insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); 2034 insn_buf[cnt++] = BPF_MOV64_IMM(BPF_REG_0, 0); 2035 insn_buf[cnt++] = BPF_JMP_A(1); 2036 insn_buf[cnt++] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); 2037 2038 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 2039 if (!new_prog) 2040 return -ENOMEM; 2041 2042 delta += cnt - 1; 2043 env->prog = prog = new_prog; 2044 insn = new_prog->insnsi + i + delta; 2045 goto next_insn; 2046 } 2047 2048 /* Implement bpf_get_func_ret inline. */ 2049 if (prog_type == BPF_PROG_TYPE_TRACING && 2050 insn->imm == BPF_FUNC_get_func_ret) { 2051 if (eatype == BPF_TRACE_FEXIT || 2052 eatype == BPF_TRACE_FSESSION || 2053 eatype == BPF_MODIFY_RETURN) { 2054 /* Load nr_args from ctx - 8 */ 2055 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 2056 insn_buf[1] = BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xFF); 2057 insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); 2058 insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); 2059 insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); 2060 insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0); 2061 insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0); 2062 cnt = 7; 2063 } else { 2064 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP); 2065 cnt = 1; 2066 } 2067 2068 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 2069 if (!new_prog) 2070 return -ENOMEM; 2071 2072 delta += cnt - 1; 2073 env->prog = prog = new_prog; 2074 insn = new_prog->insnsi + i + delta; 2075 goto next_insn; 2076 } 2077 2078 /* Implement get_func_arg_cnt inline. */ 2079 if (prog_type == BPF_PROG_TYPE_TRACING && 2080 insn->imm == BPF_FUNC_get_func_arg_cnt) { 2081 if (eatype == BPF_TRACE_RAW_TP) { 2082 int nr_args = btf_type_vlen(prog->aux->attach_func_proto); 2083 2084 /* skip 'void *__data' in btf_trace_##name() and save to reg0 */ 2085 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, nr_args - 1); 2086 cnt = 1; 2087 } else { 2088 /* Load nr_args from ctx - 8 */ 2089 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 2090 insn_buf[1] = BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xFF); 2091 cnt = 2; 2092 } 2093 2094 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 2095 if (!new_prog) 2096 return -ENOMEM; 2097 2098 delta += cnt - 1; 2099 env->prog = prog = new_prog; 2100 insn = new_prog->insnsi + i + delta; 2101 goto next_insn; 2102 } 2103 2104 /* Implement bpf_get_func_ip inline. */ 2105 if (prog_type == BPF_PROG_TYPE_TRACING && 2106 insn->imm == BPF_FUNC_get_func_ip) { 2107 /* Load IP address from ctx - 16 */ 2108 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16); 2109 2110 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); 2111 if (!new_prog) 2112 return -ENOMEM; 2113 2114 env->prog = prog = new_prog; 2115 insn = new_prog->insnsi + i + delta; 2116 goto next_insn; 2117 } 2118 2119 /* Implement bpf_get_branch_snapshot inline. */ 2120 if (IS_ENABLED(CONFIG_PERF_EVENTS) && 2121 prog->jit_requested && BITS_PER_LONG == 64 && 2122 insn->imm == BPF_FUNC_get_branch_snapshot) { 2123 /* We are dealing with the following func protos: 2124 * u64 bpf_get_branch_snapshot(void *buf, u32 size, u64 flags); 2125 * int perf_snapshot_branch_stack(struct perf_branch_entry *entries, u32 cnt); 2126 */ 2127 const u32 br_entry_size = sizeof(struct perf_branch_entry); 2128 2129 /* struct perf_branch_entry is part of UAPI and is 2130 * used as an array element, so extremely unlikely to 2131 * ever grow or shrink 2132 */ 2133 BUILD_BUG_ON(br_entry_size != 24); 2134 2135 /* if (unlikely(flags)) return -EINVAL */ 2136 insn_buf[0] = BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 0, 7); 2137 2138 /* Transform size (bytes) into number of entries (cnt = size / 24). 2139 * But to avoid expensive division instruction, we implement 2140 * divide-by-3 through multiplication, followed by further 2141 * division by 8 through 3-bit right shift. 2142 * Refer to book "Hacker's Delight, 2nd ed." by Henry S. Warren, Jr., 2143 * p. 227, chapter "Unsigned Division by 3" for details and proofs. 2144 * 2145 * N / 3 <=> M * N / 2^33, where M = (2^33 + 1) / 3 = 0xaaaaaaab. 2146 */ 2147 insn_buf[1] = BPF_MOV32_IMM(BPF_REG_0, 0xaaaaaaab); 2148 insn_buf[2] = BPF_ALU64_REG(BPF_MUL, BPF_REG_2, BPF_REG_0); 2149 insn_buf[3] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36); 2150 2151 /* call perf_snapshot_branch_stack implementation */ 2152 insn_buf[4] = BPF_EMIT_CALL(static_call_query(perf_snapshot_branch_stack)); 2153 /* if (entry_cnt == 0) return -ENOENT */ 2154 insn_buf[5] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4); 2155 /* return entry_cnt * sizeof(struct perf_branch_entry) */ 2156 insn_buf[6] = BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, br_entry_size); 2157 insn_buf[7] = BPF_JMP_A(3); 2158 /* return -EINVAL; */ 2159 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); 2160 insn_buf[9] = BPF_JMP_A(1); 2161 /* return -ENOENT; */ 2162 insn_buf[10] = BPF_MOV64_IMM(BPF_REG_0, -ENOENT); 2163 cnt = 11; 2164 2165 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 2166 if (!new_prog) 2167 return -ENOMEM; 2168 2169 delta += cnt - 1; 2170 env->prog = prog = new_prog; 2171 insn = new_prog->insnsi + i + delta; 2172 goto next_insn; 2173 } 2174 2175 /* Implement bpf_kptr_xchg inline */ 2176 if (prog->jit_requested && BITS_PER_LONG == 64 && 2177 insn->imm == BPF_FUNC_kptr_xchg && 2178 bpf_jit_supports_ptr_xchg()) { 2179 insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2); 2180 insn_buf[1] = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0); 2181 cnt = 2; 2182 2183 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 2184 if (!new_prog) 2185 return -ENOMEM; 2186 2187 delta += cnt - 1; 2188 env->prog = prog = new_prog; 2189 insn = new_prog->insnsi + i + delta; 2190 goto next_insn; 2191 } 2192 patch_call_imm: 2193 fn = env->ops->get_func_proto(insn->imm, env->prog); 2194 /* all functions that have prototype and verifier allowed 2195 * programs to call them, must be real in-kernel functions 2196 */ 2197 if (!fn->func) { 2198 verifier_bug(env, 2199 "not inlined functions %s#%d is missing func", 2200 func_id_name(insn->imm), insn->imm); 2201 return -EFAULT; 2202 } 2203 insn->imm = fn->func - __bpf_call_base; 2204 next_insn: 2205 if (subprogs[cur_subprog + 1].start == i + delta + 1) { 2206 subprogs[cur_subprog].stack_depth += stack_depth_extra; 2207 subprogs[cur_subprog].stack_extra = stack_depth_extra; 2208 2209 stack_depth = subprogs[cur_subprog].stack_depth; 2210 if (stack_depth > MAX_BPF_STACK && !prog->jit_requested) { 2211 verbose(env, "stack size %d(extra %d) is too large\n", 2212 stack_depth, stack_depth_extra); 2213 return -EINVAL; 2214 } 2215 cur_subprog++; 2216 stack_depth = subprogs[cur_subprog].stack_depth; 2217 stack_depth_extra = 0; 2218 } 2219 i++; 2220 insn++; 2221 } 2222 2223 env->prog->aux->stack_depth = subprogs[0].stack_depth; 2224 for (i = 0; i < env->subprog_cnt; i++) { 2225 int delta = bpf_jit_supports_timed_may_goto() ? 2 : 1; 2226 int subprog_start = subprogs[i].start; 2227 int stack_slots = subprogs[i].stack_extra / 8; 2228 int slots = delta, cnt = 0; 2229 2230 if (!stack_slots) 2231 continue; 2232 /* We need two slots in case timed may_goto is supported. */ 2233 if (stack_slots > slots) { 2234 verifier_bug(env, "stack_slots supports may_goto only"); 2235 return -EFAULT; 2236 } 2237 2238 stack_depth = subprogs[i].stack_depth; 2239 if (bpf_jit_supports_timed_may_goto()) { 2240 insn_buf[cnt++] = BPF_ST_MEM(BPF_DW, BPF_REG_FP, -stack_depth, 2241 BPF_MAX_TIMED_LOOPS); 2242 insn_buf[cnt++] = BPF_ST_MEM(BPF_DW, BPF_REG_FP, -stack_depth + 8, 0); 2243 } else { 2244 /* Add ST insn to subprog prologue to init extra stack */ 2245 insn_buf[cnt++] = BPF_ST_MEM(BPF_DW, BPF_REG_FP, -stack_depth, 2246 BPF_MAX_LOOPS); 2247 } 2248 /* Copy first actual insn to preserve it */ 2249 insn_buf[cnt++] = env->prog->insnsi[subprog_start]; 2250 2251 new_prog = bpf_patch_insn_data(env, subprog_start, insn_buf, cnt); 2252 if (!new_prog) 2253 return -ENOMEM; 2254 env->prog = prog = new_prog; 2255 /* 2256 * If may_goto is a first insn of a prog there could be a jmp 2257 * insn that points to it, hence adjust all such jmps to point 2258 * to insn after BPF_ST that inits may_goto count. 2259 * Adjustment will succeed because bpf_patch_insn_data() didn't fail. 2260 */ 2261 WARN_ON(adjust_jmp_off(env->prog, subprog_start, delta)); 2262 } 2263 2264 /* Since poke tab is now finalized, publish aux to tracker. */ 2265 for (i = 0; i < prog->aux->size_poke_tab; i++) { 2266 map_ptr = prog->aux->poke_tab[i].tail_call.map; 2267 if (!map_ptr->ops->map_poke_track || 2268 !map_ptr->ops->map_poke_untrack || 2269 !map_ptr->ops->map_poke_run) { 2270 verifier_bug(env, "poke tab is misconfigured"); 2271 return -EFAULT; 2272 } 2273 2274 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 2275 if (ret < 0) { 2276 verbose(env, "tracking tail call prog failed\n"); 2277 return ret; 2278 } 2279 } 2280 2281 ret = sort_kfunc_descs_by_imm_off(env); 2282 if (ret) 2283 return ret; 2284 2285 return 0; 2286 } 2287 2288 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env, 2289 int position, 2290 s32 stack_base, 2291 u32 callback_subprogno, 2292 u32 *total_cnt) 2293 { 2294 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; 2295 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; 2296 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; 2297 int reg_loop_max = BPF_REG_6; 2298 int reg_loop_cnt = BPF_REG_7; 2299 int reg_loop_ctx = BPF_REG_8; 2300 2301 struct bpf_insn *insn_buf = env->insn_buf; 2302 struct bpf_prog *new_prog; 2303 u32 callback_start; 2304 u32 call_insn_offset; 2305 s32 callback_offset; 2306 u32 cnt = 0; 2307 2308 /* This represents an inlined version of bpf_iter.c:bpf_loop, 2309 * be careful to modify this code in sync. 2310 */ 2311 2312 /* Return error and jump to the end of the patch if 2313 * expected number of iterations is too big. 2314 */ 2315 insn_buf[cnt++] = BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2); 2316 insn_buf[cnt++] = BPF_MOV32_IMM(BPF_REG_0, -E2BIG); 2317 insn_buf[cnt++] = BPF_JMP_IMM(BPF_JA, 0, 0, 16); 2318 /* spill R6, R7, R8 to use these as loop vars */ 2319 insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset); 2320 insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset); 2321 insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset); 2322 /* initialize loop vars */ 2323 insn_buf[cnt++] = BPF_MOV64_REG(reg_loop_max, BPF_REG_1); 2324 insn_buf[cnt++] = BPF_MOV32_IMM(reg_loop_cnt, 0); 2325 insn_buf[cnt++] = BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3); 2326 /* loop header, 2327 * if reg_loop_cnt >= reg_loop_max skip the loop body 2328 */ 2329 insn_buf[cnt++] = BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5); 2330 /* callback call, 2331 * correct callback offset would be set after patching 2332 */ 2333 insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt); 2334 insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx); 2335 insn_buf[cnt++] = BPF_CALL_REL(0); 2336 /* increment loop counter */ 2337 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1); 2338 /* jump to loop header if callback returned 0 */ 2339 insn_buf[cnt++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6); 2340 /* return value of bpf_loop, 2341 * set R0 to the number of iterations 2342 */ 2343 insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt); 2344 /* restore original values of R6, R7, R8 */ 2345 insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset); 2346 insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset); 2347 insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset); 2348 2349 *total_cnt = cnt; 2350 new_prog = bpf_patch_insn_data(env, position, insn_buf, cnt); 2351 if (!new_prog) 2352 return new_prog; 2353 2354 /* callback start is known only after patching */ 2355 callback_start = env->subprog_info[callback_subprogno].start; 2356 /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */ 2357 call_insn_offset = position + 12; 2358 callback_offset = callback_start - call_insn_offset - 1; 2359 new_prog->insnsi[call_insn_offset].imm = callback_offset; 2360 2361 return new_prog; 2362 } 2363 2364 static bool is_bpf_loop_call(struct bpf_insn *insn) 2365 { 2366 return insn->code == (BPF_JMP | BPF_CALL) && 2367 insn->src_reg == 0 && 2368 insn->imm == BPF_FUNC_loop; 2369 } 2370 2371 /* For all sub-programs in the program (including main) check 2372 * insn_aux_data to see if there are bpf_loop calls that require 2373 * inlining. If such calls are found the calls are replaced with a 2374 * sequence of instructions produced by `inline_bpf_loop` function and 2375 * subprog stack_depth is increased by the size of 3 registers. 2376 * This stack space is used to spill values of the R6, R7, R8. These 2377 * registers are used to store the loop bound, counter and context 2378 * variables. 2379 */ 2380 int bpf_optimize_bpf_loop(struct bpf_verifier_env *env) 2381 { 2382 struct bpf_subprog_info *subprogs = env->subprog_info; 2383 int i, cur_subprog = 0, cnt, delta = 0; 2384 struct bpf_insn *insn = env->prog->insnsi; 2385 int insn_cnt = env->prog->len; 2386 u16 stack_depth = subprogs[cur_subprog].stack_depth; 2387 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; 2388 u16 stack_depth_extra = 0; 2389 2390 for (i = 0; i < insn_cnt; i++, insn++) { 2391 struct bpf_loop_inline_state *inline_state = 2392 &env->insn_aux_data[i + delta].loop_inline_state; 2393 2394 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { 2395 struct bpf_prog *new_prog; 2396 2397 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; 2398 new_prog = inline_bpf_loop(env, 2399 i + delta, 2400 -(stack_depth + stack_depth_extra), 2401 inline_state->callback_subprogno, 2402 &cnt); 2403 if (!new_prog) 2404 return -ENOMEM; 2405 2406 delta += cnt - 1; 2407 env->prog = new_prog; 2408 insn = new_prog->insnsi + i + delta; 2409 } 2410 2411 if (subprogs[cur_subprog + 1].start == i + delta + 1) { 2412 subprogs[cur_subprog].stack_depth += stack_depth_extra; 2413 cur_subprog++; 2414 stack_depth = subprogs[cur_subprog].stack_depth; 2415 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; 2416 stack_depth_extra = 0; 2417 } 2418 } 2419 2420 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 2421 2422 return 0; 2423 } 2424 2425 /* Remove unnecessary spill/fill pairs, members of fastcall pattern, 2426 * adjust subprograms stack depth when possible. 2427 */ 2428 int bpf_remove_fastcall_spills_fills(struct bpf_verifier_env *env) 2429 { 2430 struct bpf_subprog_info *subprog = env->subprog_info; 2431 struct bpf_insn_aux_data *aux = env->insn_aux_data; 2432 struct bpf_insn *insn = env->prog->insnsi; 2433 int insn_cnt = env->prog->len; 2434 u32 spills_num; 2435 bool modified = false; 2436 int i, j; 2437 2438 for (i = 0; i < insn_cnt; i++, insn++) { 2439 if (aux[i].fastcall_spills_num > 0) { 2440 spills_num = aux[i].fastcall_spills_num; 2441 /* NOPs would be removed by opt_remove_nops() */ 2442 for (j = 1; j <= spills_num; ++j) { 2443 *(insn - j) = NOP; 2444 *(insn + j) = NOP; 2445 } 2446 modified = true; 2447 } 2448 if ((subprog + 1)->start == i + 1) { 2449 if (modified && !subprog->keep_fastcall_stack) 2450 subprog->stack_depth = -subprog->fastcall_stack_off; 2451 subprog++; 2452 modified = false; 2453 } 2454 } 2455 2456 return 0; 2457 } 2458 2459