1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * eBPF JIT compiler 4 * 5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> 6 * IBM Corporation 7 * 8 * Based on the powerpc classic BPF JIT compiler by Matt Evans 9 */ 10 #include <linux/moduleloader.h> 11 #include <asm/cacheflush.h> 12 #include <asm/asm-compat.h> 13 #include <linux/netdevice.h> 14 #include <linux/filter.h> 15 #include <linux/if_vlan.h> 16 #include <linux/kernel.h> 17 #include <linux/memory.h> 18 #include <linux/bpf.h> 19 20 #include <asm/kprobes.h> 21 #include <asm/text-patching.h> 22 23 #include "bpf_jit.h" 24 25 /* These offsets are from bpf prog end and stay the same across progs */ 26 static int bpf_jit_ool_stub, bpf_jit_long_branch_stub; 27 28 static void bpf_jit_fill_ill_insns(void *area, unsigned int size) 29 { 30 memset32(area, BREAKPOINT_INSTRUCTION, size / 4); 31 } 32 33 void dummy_tramp(void); 34 35 asm ( 36 " .pushsection .text, \"ax\", @progbits ;" 37 " .global dummy_tramp ;" 38 " .type dummy_tramp, @function ;" 39 "dummy_tramp: ;" 40 #ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE 41 " blr ;" 42 #else 43 /* LR is always in r11, so we don't need a 'mflr r11' here */ 44 " mtctr 11 ;" 45 " mtlr 0 ;" 46 " bctr ;" 47 #endif 48 " .size dummy_tramp, .-dummy_tramp ;" 49 " .popsection ;" 50 ); 51 52 void bpf_jit_build_fentry_stubs(u32 *image, struct codegen_context *ctx) 53 { 54 int ool_stub_idx, long_branch_stub_idx; 55 56 /* 57 * Out-of-line stub: 58 * mflr r0 59 * [b|bl] tramp 60 * mtlr r0 // only with CONFIG_PPC_FTRACE_OUT_OF_LINE 61 * b bpf_func + 4 62 */ 63 ool_stub_idx = ctx->idx; 64 EMIT(PPC_RAW_MFLR(_R0)); 65 EMIT(PPC_RAW_NOP()); 66 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) 67 EMIT(PPC_RAW_MTLR(_R0)); 68 WARN_ON_ONCE(!is_offset_in_branch_range(4 - (long)ctx->idx * 4)); 69 EMIT(PPC_RAW_BRANCH(4 - (long)ctx->idx * 4)); 70 71 /* 72 * Long branch stub: 73 * .long <dummy_tramp_addr> 74 * mflr r11 75 * bcl 20,31,$+4 76 * mflr r12 77 * ld r12, -8-SZL(r12) 78 * mtctr r12 79 * mtlr r11 // needed to retain ftrace ABI 80 * bctr 81 */ 82 if (image) 83 *((unsigned long *)&image[ctx->idx]) = (unsigned long)dummy_tramp; 84 ctx->idx += SZL / 4; 85 long_branch_stub_idx = ctx->idx; 86 EMIT(PPC_RAW_MFLR(_R11)); 87 EMIT(PPC_RAW_BCL4()); 88 EMIT(PPC_RAW_MFLR(_R12)); 89 EMIT(PPC_RAW_LL(_R12, _R12, -8-SZL)); 90 EMIT(PPC_RAW_MTCTR(_R12)); 91 EMIT(PPC_RAW_MTLR(_R11)); 92 EMIT(PPC_RAW_BCTR()); 93 94 if (!bpf_jit_ool_stub) { 95 bpf_jit_ool_stub = (ctx->idx - ool_stub_idx) * 4; 96 bpf_jit_long_branch_stub = (ctx->idx - long_branch_stub_idx) * 4; 97 } 98 } 99 100 int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr) 101 { 102 if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) { 103 PPC_JMP(exit_addr); 104 } else if (ctx->alt_exit_addr) { 105 if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4)))) 106 return -1; 107 PPC_JMP(ctx->alt_exit_addr); 108 } else { 109 ctx->alt_exit_addr = ctx->idx * 4; 110 bpf_jit_build_epilogue(image, ctx); 111 } 112 113 return 0; 114 } 115 116 struct powerpc_jit_data { 117 /* address of rw header */ 118 struct bpf_binary_header *hdr; 119 /* address of ro final header */ 120 struct bpf_binary_header *fhdr; 121 u32 *addrs; 122 u8 *fimage; 123 u32 proglen; 124 struct codegen_context ctx; 125 }; 126 127 bool bpf_jit_needs_zext(void) 128 { 129 return true; 130 } 131 132 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) 133 { 134 u32 proglen; 135 u32 alloclen; 136 u8 *image = NULL; 137 u32 *code_base; 138 u32 *addrs; 139 struct powerpc_jit_data *jit_data; 140 struct codegen_context cgctx; 141 int pass; 142 int flen; 143 struct bpf_binary_header *fhdr = NULL; 144 struct bpf_binary_header *hdr = NULL; 145 struct bpf_prog *org_fp = fp; 146 struct bpf_prog *tmp_fp; 147 bool bpf_blinded = false; 148 bool extra_pass = false; 149 u8 *fimage = NULL; 150 u32 *fcode_base; 151 u32 extable_len; 152 u32 fixup_len; 153 154 if (!fp->jit_requested) 155 return org_fp; 156 157 tmp_fp = bpf_jit_blind_constants(org_fp); 158 if (IS_ERR(tmp_fp)) 159 return org_fp; 160 161 if (tmp_fp != org_fp) { 162 bpf_blinded = true; 163 fp = tmp_fp; 164 } 165 166 jit_data = fp->aux->jit_data; 167 if (!jit_data) { 168 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 169 if (!jit_data) { 170 fp = org_fp; 171 goto out; 172 } 173 fp->aux->jit_data = jit_data; 174 } 175 176 flen = fp->len; 177 addrs = jit_data->addrs; 178 if (addrs) { 179 cgctx = jit_data->ctx; 180 /* 181 * JIT compiled to a writable location (image/code_base) first. 182 * It is then moved to the readonly final location (fimage/fcode_base) 183 * using instruction patching. 184 */ 185 fimage = jit_data->fimage; 186 fhdr = jit_data->fhdr; 187 proglen = jit_data->proglen; 188 hdr = jit_data->hdr; 189 image = (void *)hdr + ((void *)fimage - (void *)fhdr); 190 extra_pass = true; 191 /* During extra pass, ensure index is reset before repopulating extable entries */ 192 cgctx.exentry_idx = 0; 193 goto skip_init_ctx; 194 } 195 196 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL); 197 if (addrs == NULL) { 198 fp = org_fp; 199 goto out_addrs; 200 } 201 202 memset(&cgctx, 0, sizeof(struct codegen_context)); 203 bpf_jit_init_reg_mapping(&cgctx); 204 205 /* Make sure that the stack is quadword aligned. */ 206 cgctx.stack_size = round_up(fp->aux->stack_depth, 16); 207 208 /* Scouting faux-generate pass 0 */ 209 if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { 210 /* We hit something illegal or unsupported. */ 211 fp = org_fp; 212 goto out_addrs; 213 } 214 215 /* 216 * If we have seen a tail call, we need a second pass. 217 * This is because bpf_jit_emit_common_epilogue() is called 218 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen. 219 * We also need a second pass if we ended up with too large 220 * a program so as to ensure BPF_EXIT branches are in range. 221 */ 222 if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) { 223 cgctx.idx = 0; 224 if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { 225 fp = org_fp; 226 goto out_addrs; 227 } 228 } 229 230 bpf_jit_realloc_regs(&cgctx); 231 /* 232 * Pretend to build prologue, given the features we've seen. This will 233 * update ctgtx.idx as it pretends to output instructions, then we can 234 * calculate total size from idx. 235 */ 236 bpf_jit_build_prologue(NULL, &cgctx); 237 addrs[fp->len] = cgctx.idx * 4; 238 bpf_jit_build_epilogue(NULL, &cgctx); 239 240 fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4; 241 extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry); 242 243 proglen = cgctx.idx * 4; 244 alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len; 245 246 fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image, 247 bpf_jit_fill_ill_insns); 248 if (!fhdr) { 249 fp = org_fp; 250 goto out_addrs; 251 } 252 253 if (extable_len) 254 fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len; 255 256 skip_init_ctx: 257 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); 258 fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE); 259 260 /* Code generation passes 1-2 */ 261 for (pass = 1; pass < 3; pass++) { 262 /* Now build the prologue, body code & epilogue for real. */ 263 cgctx.idx = 0; 264 cgctx.alt_exit_addr = 0; 265 bpf_jit_build_prologue(code_base, &cgctx); 266 if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass, 267 extra_pass)) { 268 bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size)); 269 bpf_jit_binary_pack_free(fhdr, hdr); 270 fp = org_fp; 271 goto out_addrs; 272 } 273 bpf_jit_build_epilogue(code_base, &cgctx); 274 275 if (bpf_jit_enable > 1) 276 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, 277 proglen - (cgctx.idx * 4), cgctx.seen); 278 } 279 280 if (bpf_jit_enable > 1) 281 /* 282 * Note that we output the base address of the code_base 283 * rather than image, since opcodes are in code_base. 284 */ 285 bpf_jit_dump(flen, proglen, pass, code_base); 286 287 #ifdef CONFIG_PPC64_ELF_ABI_V1 288 /* Function descriptor nastiness: Address + TOC */ 289 ((u64 *)image)[0] = (u64)fcode_base; 290 ((u64 *)image)[1] = local_paca->kernel_toc; 291 #endif 292 293 fp->bpf_func = (void *)fimage; 294 fp->jited = 1; 295 fp->jited_len = cgctx.idx * 4 + FUNCTION_DESCR_SIZE; 296 297 if (!fp->is_func || extra_pass) { 298 if (bpf_jit_binary_pack_finalize(fhdr, hdr)) { 299 fp = org_fp; 300 goto out_addrs; 301 } 302 bpf_prog_fill_jited_linfo(fp, addrs); 303 out_addrs: 304 kfree(addrs); 305 kfree(jit_data); 306 fp->aux->jit_data = NULL; 307 } else { 308 jit_data->addrs = addrs; 309 jit_data->ctx = cgctx; 310 jit_data->proglen = proglen; 311 jit_data->fimage = fimage; 312 jit_data->fhdr = fhdr; 313 jit_data->hdr = hdr; 314 } 315 316 out: 317 if (bpf_blinded) 318 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp); 319 320 return fp; 321 } 322 323 /* 324 * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling 325 * this function, as this only applies to BPF_PROBE_MEM, for now. 326 */ 327 int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass, 328 struct codegen_context *ctx, int insn_idx, int jmp_off, 329 int dst_reg) 330 { 331 off_t offset; 332 unsigned long pc; 333 struct exception_table_entry *ex, *ex_entry; 334 u32 *fixup; 335 336 /* Populate extable entries only in the last pass */ 337 if (pass != 2) 338 return 0; 339 340 if (!fp->aux->extable || 341 WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries)) 342 return -EINVAL; 343 344 /* 345 * Program is first written to image before copying to the 346 * final location (fimage). Accordingly, update in the image first. 347 * As all offsets used are relative, copying as is to the 348 * final location should be alright. 349 */ 350 pc = (unsigned long)&image[insn_idx]; 351 ex = (void *)fp->aux->extable - (void *)fimage + (void *)image; 352 353 fixup = (void *)ex - 354 (fp->aux->num_exentries * BPF_FIXUP_LEN * 4) + 355 (ctx->exentry_idx * BPF_FIXUP_LEN * 4); 356 357 fixup[0] = PPC_RAW_LI(dst_reg, 0); 358 if (IS_ENABLED(CONFIG_PPC32)) 359 fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */ 360 361 fixup[BPF_FIXUP_LEN - 1] = 362 PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]); 363 364 ex_entry = &ex[ctx->exentry_idx]; 365 366 offset = pc - (long)&ex_entry->insn; 367 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) 368 return -ERANGE; 369 ex_entry->insn = offset; 370 371 offset = (long)fixup - (long)&ex_entry->fixup; 372 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) 373 return -ERANGE; 374 ex_entry->fixup = offset; 375 376 ctx->exentry_idx++; 377 return 0; 378 } 379 380 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 381 { 382 int err; 383 384 if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst))) 385 return ERR_PTR(-EINVAL); 386 387 mutex_lock(&text_mutex); 388 err = patch_instructions(dst, src, len, false); 389 mutex_unlock(&text_mutex); 390 391 return err ? ERR_PTR(err) : dst; 392 } 393 394 int bpf_arch_text_invalidate(void *dst, size_t len) 395 { 396 u32 insn = BREAKPOINT_INSTRUCTION; 397 int ret; 398 399 if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst))) 400 return -EINVAL; 401 402 mutex_lock(&text_mutex); 403 ret = patch_instructions(dst, &insn, len, true); 404 mutex_unlock(&text_mutex); 405 406 return ret; 407 } 408 409 void bpf_jit_free(struct bpf_prog *fp) 410 { 411 if (fp->jited) { 412 struct powerpc_jit_data *jit_data = fp->aux->jit_data; 413 struct bpf_binary_header *hdr; 414 415 /* 416 * If we fail the final pass of JIT (from jit_subprogs), 417 * the program may not be finalized yet. Call finalize here 418 * before freeing it. 419 */ 420 if (jit_data) { 421 bpf_jit_binary_pack_finalize(jit_data->fhdr, jit_data->hdr); 422 kvfree(jit_data->addrs); 423 kfree(jit_data); 424 } 425 hdr = bpf_jit_binary_pack_hdr(fp); 426 bpf_jit_binary_pack_free(hdr, NULL); 427 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 428 } 429 430 bpf_prog_unlock_free(fp); 431 } 432 433 bool bpf_jit_supports_kfunc_call(void) 434 { 435 return true; 436 } 437 438 bool bpf_jit_supports_far_kfunc_call(void) 439 { 440 return IS_ENABLED(CONFIG_PPC64); 441 } 442 443 void *arch_alloc_bpf_trampoline(unsigned int size) 444 { 445 return bpf_prog_pack_alloc(size, bpf_jit_fill_ill_insns); 446 } 447 448 void arch_free_bpf_trampoline(void *image, unsigned int size) 449 { 450 bpf_prog_pack_free(image, size); 451 } 452 453 int arch_protect_bpf_trampoline(void *image, unsigned int size) 454 { 455 return 0; 456 } 457 458 static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ctx, 459 struct bpf_tramp_link *l, int regs_off, int retval_off, 460 int run_ctx_off, bool save_ret) 461 { 462 struct bpf_prog *p = l->link.prog; 463 ppc_inst_t branch_insn; 464 u32 jmp_idx; 465 int ret = 0; 466 467 /* Save cookie */ 468 if (IS_ENABLED(CONFIG_PPC64)) { 469 PPC_LI64(_R3, l->cookie); 470 EMIT(PPC_RAW_STD(_R3, _R1, run_ctx_off + offsetof(struct bpf_tramp_run_ctx, 471 bpf_cookie))); 472 } else { 473 PPC_LI32(_R3, l->cookie >> 32); 474 PPC_LI32(_R4, l->cookie); 475 EMIT(PPC_RAW_STW(_R3, _R1, 476 run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie))); 477 EMIT(PPC_RAW_STW(_R4, _R1, 478 run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie) + 4)); 479 } 480 481 /* __bpf_prog_enter(p, &bpf_tramp_run_ctx) */ 482 PPC_LI_ADDR(_R3, p); 483 EMIT(PPC_RAW_MR(_R25, _R3)); 484 EMIT(PPC_RAW_ADDI(_R4, _R1, run_ctx_off)); 485 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, 486 (unsigned long)bpf_trampoline_enter(p)); 487 if (ret) 488 return ret; 489 490 /* Remember prog start time returned by __bpf_prog_enter */ 491 EMIT(PPC_RAW_MR(_R26, _R3)); 492 493 /* 494 * if (__bpf_prog_enter(p) == 0) 495 * goto skip_exec_of_prog; 496 * 497 * Emit a nop to be later patched with conditional branch, once offset is known 498 */ 499 EMIT(PPC_RAW_CMPLI(_R3, 0)); 500 jmp_idx = ctx->idx; 501 EMIT(PPC_RAW_NOP()); 502 503 /* p->bpf_func(ctx) */ 504 EMIT(PPC_RAW_ADDI(_R3, _R1, regs_off)); 505 if (!p->jited) 506 PPC_LI_ADDR(_R4, (unsigned long)p->insnsi); 507 /* Account for max possible instructions during dummy pass for size calculation */ 508 if (image && !create_branch(&branch_insn, (u32 *)&ro_image[ctx->idx], 509 (unsigned long)p->bpf_func, 510 BRANCH_SET_LINK)) { 511 image[ctx->idx] = ppc_inst_val(branch_insn); 512 ctx->idx++; 513 } else { 514 EMIT(PPC_RAW_LL(_R12, _R25, offsetof(struct bpf_prog, bpf_func))); 515 EMIT(PPC_RAW_MTCTR(_R12)); 516 EMIT(PPC_RAW_BCTRL()); 517 } 518 519 if (save_ret) 520 EMIT(PPC_RAW_STL(_R3, _R1, retval_off)); 521 522 /* Fix up branch */ 523 if (image) { 524 if (create_cond_branch(&branch_insn, &image[jmp_idx], 525 (unsigned long)&image[ctx->idx], COND_EQ << 16)) 526 return -EINVAL; 527 image[jmp_idx] = ppc_inst_val(branch_insn); 528 } 529 530 /* __bpf_prog_exit(p, start_time, &bpf_tramp_run_ctx) */ 531 EMIT(PPC_RAW_MR(_R3, _R25)); 532 EMIT(PPC_RAW_MR(_R4, _R26)); 533 EMIT(PPC_RAW_ADDI(_R5, _R1, run_ctx_off)); 534 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, 535 (unsigned long)bpf_trampoline_exit(p)); 536 537 return ret; 538 } 539 540 static int invoke_bpf_mod_ret(u32 *image, u32 *ro_image, struct codegen_context *ctx, 541 struct bpf_tramp_links *tl, int regs_off, int retval_off, 542 int run_ctx_off, u32 *branches) 543 { 544 int i; 545 546 /* 547 * The first fmod_ret program will receive a garbage return value. 548 * Set this to 0 to avoid confusing the program. 549 */ 550 EMIT(PPC_RAW_LI(_R3, 0)); 551 EMIT(PPC_RAW_STL(_R3, _R1, retval_off)); 552 for (i = 0; i < tl->nr_links; i++) { 553 if (invoke_bpf_prog(image, ro_image, ctx, tl->links[i], regs_off, retval_off, 554 run_ctx_off, true)) 555 return -EINVAL; 556 557 /* 558 * mod_ret prog stored return value after prog ctx. Emit: 559 * if (*(u64 *)(ret_val) != 0) 560 * goto do_fexit; 561 */ 562 EMIT(PPC_RAW_LL(_R3, _R1, retval_off)); 563 EMIT(PPC_RAW_CMPLI(_R3, 0)); 564 565 /* 566 * Save the location of the branch and generate a nop, which is 567 * replaced with a conditional jump once do_fexit (i.e. the 568 * start of the fexit invocation) is finalized. 569 */ 570 branches[i] = ctx->idx; 571 EMIT(PPC_RAW_NOP()); 572 } 573 574 return 0; 575 } 576 577 static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_context *ctx, 578 int func_frame_offset, int r4_off) 579 { 580 if (IS_ENABLED(CONFIG_PPC64)) { 581 /* See bpf_jit_stack_tailcallcnt() */ 582 int tailcallcnt_offset = 6 * 8; 583 584 EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); 585 EMIT(PPC_RAW_STL(_R3, _R1, -tailcallcnt_offset)); 586 } else { 587 /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */ 588 EMIT(PPC_RAW_LL(_R4, _R1, r4_off)); 589 } 590 } 591 592 static void bpf_trampoline_restore_tail_call_cnt(u32 *image, struct codegen_context *ctx, 593 int func_frame_offset, int r4_off) 594 { 595 if (IS_ENABLED(CONFIG_PPC64)) { 596 /* See bpf_jit_stack_tailcallcnt() */ 597 int tailcallcnt_offset = 6 * 8; 598 599 EMIT(PPC_RAW_LL(_R3, _R1, -tailcallcnt_offset)); 600 EMIT(PPC_RAW_STL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); 601 } else { 602 /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */ 603 EMIT(PPC_RAW_STL(_R4, _R1, r4_off)); 604 } 605 } 606 607 static void bpf_trampoline_save_args(u32 *image, struct codegen_context *ctx, int func_frame_offset, 608 int nr_regs, int regs_off) 609 { 610 int param_save_area_offset; 611 612 param_save_area_offset = func_frame_offset; /* the two frames we alloted */ 613 param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */ 614 615 for (int i = 0; i < nr_regs; i++) { 616 if (i < 8) { 617 EMIT(PPC_RAW_STL(_R3 + i, _R1, regs_off + i * SZL)); 618 } else { 619 EMIT(PPC_RAW_LL(_R3, _R1, param_save_area_offset + i * SZL)); 620 EMIT(PPC_RAW_STL(_R3, _R1, regs_off + i * SZL)); 621 } 622 } 623 } 624 625 /* Used when restoring just the register parameters when returning back */ 626 static void bpf_trampoline_restore_args_regs(u32 *image, struct codegen_context *ctx, 627 int nr_regs, int regs_off) 628 { 629 for (int i = 0; i < nr_regs && i < 8; i++) 630 EMIT(PPC_RAW_LL(_R3 + i, _R1, regs_off + i * SZL)); 631 } 632 633 /* Used when we call into the traced function. Replicate parameter save area */ 634 static void bpf_trampoline_restore_args_stack(u32 *image, struct codegen_context *ctx, 635 int func_frame_offset, int nr_regs, int regs_off) 636 { 637 int param_save_area_offset; 638 639 param_save_area_offset = func_frame_offset; /* the two frames we alloted */ 640 param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */ 641 642 for (int i = 8; i < nr_regs; i++) { 643 EMIT(PPC_RAW_LL(_R3, _R1, param_save_area_offset + i * SZL)); 644 EMIT(PPC_RAW_STL(_R3, _R1, STACK_FRAME_MIN_SIZE + i * SZL)); 645 } 646 bpf_trampoline_restore_args_regs(image, ctx, nr_regs, regs_off); 647 } 648 649 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image, 650 void *rw_image_end, void *ro_image, 651 const struct btf_func_model *m, u32 flags, 652 struct bpf_tramp_links *tlinks, 653 void *func_addr) 654 { 655 int regs_off, nregs_off, ip_off, run_ctx_off, retval_off, nvr_off, alt_lr_off, r4_off = 0; 656 int i, ret, nr_regs, bpf_frame_size = 0, bpf_dummy_frame_size = 0, func_frame_offset; 657 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 658 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 659 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 660 struct codegen_context codegen_ctx, *ctx; 661 u32 *image = (u32 *)rw_image; 662 ppc_inst_t branch_insn; 663 u32 *branches = NULL; 664 bool save_ret; 665 666 if (IS_ENABLED(CONFIG_PPC32)) 667 return -EOPNOTSUPP; 668 669 nr_regs = m->nr_args; 670 /* Extra registers for struct arguments */ 671 for (i = 0; i < m->nr_args; i++) 672 if (m->arg_size[i] > SZL) 673 nr_regs += round_up(m->arg_size[i], SZL) / SZL - 1; 674 675 if (nr_regs > MAX_BPF_FUNC_ARGS) 676 return -EOPNOTSUPP; 677 678 ctx = &codegen_ctx; 679 memset(ctx, 0, sizeof(*ctx)); 680 681 /* 682 * Generated stack layout: 683 * 684 * func prev back chain [ back chain ] 685 * [ ] 686 * bpf prog redzone/tailcallcnt [ ... ] 64 bytes (64-bit powerpc) 687 * [ ] -- 688 * LR save area [ r0 save (64-bit) ] | header 689 * [ r0 save (32-bit) ] | 690 * dummy frame for unwind [ back chain 1 ] -- 691 * [ padding ] align stack frame 692 * r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc 693 * alt_lr_off [ real lr (ool stub)] optional - actual lr 694 * [ r26 ] 695 * nvr_off [ r25 ] nvr save area 696 * retval_off [ return value ] 697 * [ reg argN ] 698 * [ ... ] 699 * regs_off [ reg_arg1 ] prog ctx context 700 * nregs_off [ args count ] 701 * ip_off [ traced function ] 702 * [ ... ] 703 * run_ctx_off [ bpf_tramp_run_ctx ] 704 * [ reg argN ] 705 * [ ... ] 706 * param_save_area [ reg_arg1 ] min 8 doublewords, per ABI 707 * [ TOC save (64-bit) ] -- 708 * [ LR save (64-bit) ] | header 709 * [ LR save (32-bit) ] | 710 * bpf trampoline frame [ back chain 2 ] -- 711 * 712 */ 713 714 /* Minimum stack frame header */ 715 bpf_frame_size = STACK_FRAME_MIN_SIZE; 716 717 /* 718 * Room for parameter save area. 719 * 720 * As per the ABI, this is required if we call into the traced 721 * function (BPF_TRAMP_F_CALL_ORIG): 722 * - if the function takes more than 8 arguments for the rest to spill onto the stack 723 * - or, if the function has variadic arguments 724 * - or, if this functions's prototype was not available to the caller 725 * 726 * Reserve space for at least 8 registers for now. This can be optimized later. 727 */ 728 bpf_frame_size += (nr_regs > 8 ? nr_regs : 8) * SZL; 729 730 /* Room for struct bpf_tramp_run_ctx */ 731 run_ctx_off = bpf_frame_size; 732 bpf_frame_size += round_up(sizeof(struct bpf_tramp_run_ctx), SZL); 733 734 /* Room for IP address argument */ 735 ip_off = bpf_frame_size; 736 if (flags & BPF_TRAMP_F_IP_ARG) 737 bpf_frame_size += SZL; 738 739 /* Room for args count */ 740 nregs_off = bpf_frame_size; 741 bpf_frame_size += SZL; 742 743 /* Room for args */ 744 regs_off = bpf_frame_size; 745 bpf_frame_size += nr_regs * SZL; 746 747 /* Room for return value of func_addr or fentry prog */ 748 retval_off = bpf_frame_size; 749 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 750 if (save_ret) 751 bpf_frame_size += SZL; 752 753 /* Room for nvr save area */ 754 nvr_off = bpf_frame_size; 755 bpf_frame_size += 2 * SZL; 756 757 /* Optional save area for actual LR in case of ool ftrace */ 758 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { 759 alt_lr_off = bpf_frame_size; 760 bpf_frame_size += SZL; 761 } 762 763 if (IS_ENABLED(CONFIG_PPC32)) { 764 if (nr_regs < 2) { 765 r4_off = bpf_frame_size; 766 bpf_frame_size += SZL; 767 } else { 768 r4_off = regs_off + SZL; 769 } 770 } 771 772 /* Padding to align stack frame, if any */ 773 bpf_frame_size = round_up(bpf_frame_size, SZL * 2); 774 775 /* Dummy frame size for proper unwind - includes 64-bytes red zone for 64-bit powerpc */ 776 bpf_dummy_frame_size = STACK_FRAME_MIN_SIZE + 64; 777 778 /* Offset to the traced function's stack frame */ 779 func_frame_offset = bpf_dummy_frame_size + bpf_frame_size; 780 781 /* Create dummy frame for unwind, store original return value */ 782 EMIT(PPC_RAW_STL(_R0, _R1, PPC_LR_STKOFF)); 783 /* Protect red zone where tail call count goes */ 784 EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_dummy_frame_size)); 785 786 /* Create our stack frame */ 787 EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_frame_size)); 788 789 /* 64-bit: Save TOC and load kernel TOC */ 790 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) { 791 EMIT(PPC_RAW_STD(_R2, _R1, 24)); 792 PPC64_LOAD_PACA(); 793 } 794 795 /* 32-bit: save tail call count in r4 */ 796 if (IS_ENABLED(CONFIG_PPC32) && nr_regs < 2) 797 EMIT(PPC_RAW_STL(_R4, _R1, r4_off)); 798 799 bpf_trampoline_save_args(image, ctx, func_frame_offset, nr_regs, regs_off); 800 801 /* Save our return address */ 802 EMIT(PPC_RAW_MFLR(_R3)); 803 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) 804 EMIT(PPC_RAW_STL(_R3, _R1, alt_lr_off)); 805 else 806 EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); 807 808 /* 809 * Save ip address of the traced function. 810 * We could recover this from LR, but we will need to address for OOL trampoline, 811 * and optional GEP area. 812 */ 813 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) || flags & BPF_TRAMP_F_IP_ARG) { 814 EMIT(PPC_RAW_LWZ(_R4, _R3, 4)); 815 EMIT(PPC_RAW_SLWI(_R4, _R4, 6)); 816 EMIT(PPC_RAW_SRAWI(_R4, _R4, 6)); 817 EMIT(PPC_RAW_ADD(_R3, _R3, _R4)); 818 EMIT(PPC_RAW_ADDI(_R3, _R3, 4)); 819 } 820 821 if (flags & BPF_TRAMP_F_IP_ARG) 822 EMIT(PPC_RAW_STL(_R3, _R1, ip_off)); 823 824 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) 825 /* Fake our LR for unwind */ 826 EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); 827 828 /* Save function arg count -- see bpf_get_func_arg_cnt() */ 829 EMIT(PPC_RAW_LI(_R3, nr_regs)); 830 EMIT(PPC_RAW_STL(_R3, _R1, nregs_off)); 831 832 /* Save nv regs */ 833 EMIT(PPC_RAW_STL(_R25, _R1, nvr_off)); 834 EMIT(PPC_RAW_STL(_R26, _R1, nvr_off + SZL)); 835 836 if (flags & BPF_TRAMP_F_CALL_ORIG) { 837 PPC_LI_ADDR(_R3, (unsigned long)im); 838 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, 839 (unsigned long)__bpf_tramp_enter); 840 if (ret) 841 return ret; 842 } 843 844 for (i = 0; i < fentry->nr_links; i++) 845 if (invoke_bpf_prog(image, ro_image, ctx, fentry->links[i], regs_off, retval_off, 846 run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET)) 847 return -EINVAL; 848 849 if (fmod_ret->nr_links) { 850 branches = kcalloc(fmod_ret->nr_links, sizeof(u32), GFP_KERNEL); 851 if (!branches) 852 return -ENOMEM; 853 854 if (invoke_bpf_mod_ret(image, ro_image, ctx, fmod_ret, regs_off, retval_off, 855 run_ctx_off, branches)) { 856 ret = -EINVAL; 857 goto cleanup; 858 } 859 } 860 861 /* Call the traced function */ 862 if (flags & BPF_TRAMP_F_CALL_ORIG) { 863 /* 864 * The address in LR save area points to the correct point in the original function 865 * with both PPC_FTRACE_OUT_OF_LINE as well as with traditional ftrace instruction 866 * sequence 867 */ 868 EMIT(PPC_RAW_LL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); 869 EMIT(PPC_RAW_MTCTR(_R3)); 870 871 /* Replicate tail_call_cnt before calling the original BPF prog */ 872 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) 873 bpf_trampoline_setup_tail_call_cnt(image, ctx, func_frame_offset, r4_off); 874 875 /* Restore args */ 876 bpf_trampoline_restore_args_stack(image, ctx, func_frame_offset, nr_regs, regs_off); 877 878 /* Restore TOC for 64-bit */ 879 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) 880 EMIT(PPC_RAW_LD(_R2, _R1, 24)); 881 EMIT(PPC_RAW_BCTRL()); 882 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) 883 PPC64_LOAD_PACA(); 884 885 /* Store return value for bpf prog to access */ 886 EMIT(PPC_RAW_STL(_R3, _R1, retval_off)); 887 888 /* Restore updated tail_call_cnt */ 889 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) 890 bpf_trampoline_restore_tail_call_cnt(image, ctx, func_frame_offset, r4_off); 891 892 /* Reserve space to patch branch instruction to skip fexit progs */ 893 if (ro_image) /* image is NULL for dummy pass */ 894 im->ip_after_call = &((u32 *)ro_image)[ctx->idx]; 895 EMIT(PPC_RAW_NOP()); 896 } 897 898 /* Update branches saved in invoke_bpf_mod_ret with address of do_fexit */ 899 for (i = 0; i < fmod_ret->nr_links && image; i++) { 900 if (create_cond_branch(&branch_insn, &image[branches[i]], 901 (unsigned long)&image[ctx->idx], COND_NE << 16)) { 902 ret = -EINVAL; 903 goto cleanup; 904 } 905 906 image[branches[i]] = ppc_inst_val(branch_insn); 907 } 908 909 for (i = 0; i < fexit->nr_links; i++) 910 if (invoke_bpf_prog(image, ro_image, ctx, fexit->links[i], regs_off, retval_off, 911 run_ctx_off, false)) { 912 ret = -EINVAL; 913 goto cleanup; 914 } 915 916 if (flags & BPF_TRAMP_F_CALL_ORIG) { 917 if (ro_image) /* image is NULL for dummy pass */ 918 im->ip_epilogue = &((u32 *)ro_image)[ctx->idx]; 919 PPC_LI_ADDR(_R3, im); 920 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, 921 (unsigned long)__bpf_tramp_exit); 922 if (ret) 923 goto cleanup; 924 } 925 926 if (flags & BPF_TRAMP_F_RESTORE_REGS) 927 bpf_trampoline_restore_args_regs(image, ctx, nr_regs, regs_off); 928 929 /* Restore return value of func_addr or fentry prog */ 930 if (save_ret) 931 EMIT(PPC_RAW_LL(_R3, _R1, retval_off)); 932 933 /* Restore nv regs */ 934 EMIT(PPC_RAW_LL(_R26, _R1, nvr_off + SZL)); 935 EMIT(PPC_RAW_LL(_R25, _R1, nvr_off)); 936 937 /* Epilogue */ 938 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) 939 EMIT(PPC_RAW_LD(_R2, _R1, 24)); 940 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 941 /* Skip the traced function and return to parent */ 942 EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); 943 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); 944 EMIT(PPC_RAW_MTLR(_R0)); 945 EMIT(PPC_RAW_BLR()); 946 } else { 947 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { 948 EMIT(PPC_RAW_LL(_R0, _R1, alt_lr_off)); 949 EMIT(PPC_RAW_MTLR(_R0)); 950 EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); 951 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); 952 EMIT(PPC_RAW_BLR()); 953 } else { 954 EMIT(PPC_RAW_LL(_R0, _R1, bpf_frame_size + PPC_LR_STKOFF)); 955 EMIT(PPC_RAW_MTCTR(_R0)); 956 EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); 957 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); 958 EMIT(PPC_RAW_MTLR(_R0)); 959 EMIT(PPC_RAW_BCTR()); 960 } 961 } 962 963 /* Make sure the trampoline generation logic doesn't overflow */ 964 if (image && WARN_ON_ONCE(&image[ctx->idx] > (u32 *)rw_image_end - BPF_INSN_SAFETY)) { 965 ret = -EFAULT; 966 goto cleanup; 967 } 968 ret = ctx->idx * 4 + BPF_INSN_SAFETY * 4; 969 970 cleanup: 971 kfree(branches); 972 return ret; 973 } 974 975 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, 976 struct bpf_tramp_links *tlinks, void *func_addr) 977 { 978 struct bpf_tramp_image im; 979 int ret; 980 981 ret = __arch_prepare_bpf_trampoline(&im, NULL, NULL, NULL, m, flags, tlinks, func_addr); 982 return ret; 983 } 984 985 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 986 const struct btf_func_model *m, u32 flags, 987 struct bpf_tramp_links *tlinks, 988 void *func_addr) 989 { 990 u32 size = image_end - image; 991 void *rw_image, *tmp; 992 int ret; 993 994 /* 995 * rw_image doesn't need to be in module memory range, so we can 996 * use kvmalloc. 997 */ 998 rw_image = kvmalloc(size, GFP_KERNEL); 999 if (!rw_image) 1000 return -ENOMEM; 1001 1002 ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m, 1003 flags, tlinks, func_addr); 1004 if (ret < 0) 1005 goto out; 1006 1007 if (bpf_jit_enable > 1) 1008 bpf_jit_dump(1, ret - BPF_INSN_SAFETY * 4, 1, rw_image); 1009 1010 tmp = bpf_arch_text_copy(image, rw_image, size); 1011 if (IS_ERR(tmp)) 1012 ret = PTR_ERR(tmp); 1013 1014 out: 1015 kvfree(rw_image); 1016 return ret; 1017 } 1018 1019 static int bpf_modify_inst(void *ip, ppc_inst_t old_inst, ppc_inst_t new_inst) 1020 { 1021 ppc_inst_t org_inst; 1022 1023 if (copy_inst_from_kernel_nofault(&org_inst, ip)) { 1024 pr_err("0x%lx: fetching instruction failed\n", (unsigned long)ip); 1025 return -EFAULT; 1026 } 1027 1028 if (!ppc_inst_equal(org_inst, old_inst)) { 1029 pr_err("0x%lx: expected (%08lx) != found (%08lx)\n", 1030 (unsigned long)ip, ppc_inst_as_ulong(old_inst), ppc_inst_as_ulong(org_inst)); 1031 return -EINVAL; 1032 } 1033 1034 if (ppc_inst_equal(old_inst, new_inst)) 1035 return 0; 1036 1037 return patch_instruction(ip, new_inst); 1038 } 1039 1040 static void do_isync(void *info __maybe_unused) 1041 { 1042 isync(); 1043 } 1044 1045 /* 1046 * A 3-step process for bpf prog entry: 1047 * 1. At bpf prog entry, a single nop/b: 1048 * bpf_func: 1049 * [nop|b] ool_stub 1050 * 2. Out-of-line stub: 1051 * ool_stub: 1052 * mflr r0 1053 * [b|bl] <bpf_prog>/<long_branch_stub> 1054 * mtlr r0 // CONFIG_PPC_FTRACE_OUT_OF_LINE only 1055 * b bpf_func + 4 1056 * 3. Long branch stub: 1057 * long_branch_stub: 1058 * .long <branch_addr>/<dummy_tramp> 1059 * mflr r11 1060 * bcl 20,31,$+4 1061 * mflr r12 1062 * ld r12, -16(r12) 1063 * mtctr r12 1064 * mtlr r11 // needed to retain ftrace ABI 1065 * bctr 1066 * 1067 * dummy_tramp is used to reduce synchronization requirements. 1068 * 1069 * When attaching a bpf trampoline to a bpf prog, we do not need any 1070 * synchronization here since we always have a valid branch target regardless 1071 * of the order in which the above stores are seen. dummy_tramp ensures that 1072 * the long_branch stub goes to a valid destination on other cpus, even when 1073 * the branch to the long_branch stub is seen before the updated trampoline 1074 * address. 1075 * 1076 * However, when detaching a bpf trampoline from a bpf prog, or if changing 1077 * the bpf trampoline address, we need synchronization to ensure that other 1078 * cpus can no longer branch into the older trampoline so that it can be 1079 * safely freed. bpf_tramp_image_put() uses rcu_tasks to ensure all cpus 1080 * make forward progress, but we still need to ensure that other cpus 1081 * execute isync (or some CSI) so that they don't go back into the 1082 * trampoline again. 1083 */ 1084 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, 1085 void *old_addr, void *new_addr) 1086 { 1087 unsigned long bpf_func, bpf_func_end, size, offset; 1088 ppc_inst_t old_inst, new_inst; 1089 int ret = 0, branch_flags; 1090 char name[KSYM_NAME_LEN]; 1091 1092 if (IS_ENABLED(CONFIG_PPC32)) 1093 return -EOPNOTSUPP; 1094 1095 bpf_func = (unsigned long)ip; 1096 branch_flags = poke_type == BPF_MOD_CALL ? BRANCH_SET_LINK : 0; 1097 1098 /* We currently only support poking bpf programs */ 1099 if (!__bpf_address_lookup(bpf_func, &size, &offset, name)) { 1100 pr_err("%s (0x%lx): kernel/modules are not supported\n", __func__, bpf_func); 1101 return -EOPNOTSUPP; 1102 } 1103 1104 /* 1105 * If we are not poking at bpf prog entry, then we are simply patching in/out 1106 * an unconditional branch instruction at im->ip_after_call 1107 */ 1108 if (offset) { 1109 if (poke_type != BPF_MOD_JUMP) { 1110 pr_err("%s (0x%lx): calls are not supported in bpf prog body\n", __func__, 1111 bpf_func); 1112 return -EOPNOTSUPP; 1113 } 1114 old_inst = ppc_inst(PPC_RAW_NOP()); 1115 if (old_addr) 1116 if (create_branch(&old_inst, ip, (unsigned long)old_addr, 0)) 1117 return -ERANGE; 1118 new_inst = ppc_inst(PPC_RAW_NOP()); 1119 if (new_addr) 1120 if (create_branch(&new_inst, ip, (unsigned long)new_addr, 0)) 1121 return -ERANGE; 1122 mutex_lock(&text_mutex); 1123 ret = bpf_modify_inst(ip, old_inst, new_inst); 1124 mutex_unlock(&text_mutex); 1125 1126 /* Make sure all cpus see the new instruction */ 1127 smp_call_function(do_isync, NULL, 1); 1128 return ret; 1129 } 1130 1131 bpf_func_end = bpf_func + size; 1132 1133 /* Address of the jmp/call instruction in the out-of-line stub */ 1134 ip = (void *)(bpf_func_end - bpf_jit_ool_stub + 4); 1135 1136 if (!is_offset_in_branch_range((long)ip - 4 - bpf_func)) { 1137 pr_err("%s (0x%lx): bpf prog too large, ool stub out of branch range\n", __func__, 1138 bpf_func); 1139 return -ERANGE; 1140 } 1141 1142 old_inst = ppc_inst(PPC_RAW_NOP()); 1143 if (old_addr) { 1144 if (is_offset_in_branch_range(ip - old_addr)) 1145 create_branch(&old_inst, ip, (unsigned long)old_addr, branch_flags); 1146 else 1147 create_branch(&old_inst, ip, bpf_func_end - bpf_jit_long_branch_stub, 1148 branch_flags); 1149 } 1150 new_inst = ppc_inst(PPC_RAW_NOP()); 1151 if (new_addr) { 1152 if (is_offset_in_branch_range(ip - new_addr)) 1153 create_branch(&new_inst, ip, (unsigned long)new_addr, branch_flags); 1154 else 1155 create_branch(&new_inst, ip, bpf_func_end - bpf_jit_long_branch_stub, 1156 branch_flags); 1157 } 1158 1159 mutex_lock(&text_mutex); 1160 1161 /* 1162 * 1. Update the address in the long branch stub: 1163 * If new_addr is out of range, we will have to use the long branch stub, so patch new_addr 1164 * here. Otherwise, revert to dummy_tramp, but only if we had patched old_addr here. 1165 */ 1166 if ((new_addr && !is_offset_in_branch_range(new_addr - ip)) || 1167 (old_addr && !is_offset_in_branch_range(old_addr - ip))) 1168 ret = patch_ulong((void *)(bpf_func_end - bpf_jit_long_branch_stub - SZL), 1169 (new_addr && !is_offset_in_branch_range(new_addr - ip)) ? 1170 (unsigned long)new_addr : (unsigned long)dummy_tramp); 1171 if (ret) 1172 goto out; 1173 1174 /* 2. Update the branch/call in the out-of-line stub */ 1175 ret = bpf_modify_inst(ip, old_inst, new_inst); 1176 if (ret) 1177 goto out; 1178 1179 /* 3. Update instruction at bpf prog entry */ 1180 ip = (void *)bpf_func; 1181 if (!old_addr || !new_addr) { 1182 if (!old_addr) { 1183 old_inst = ppc_inst(PPC_RAW_NOP()); 1184 create_branch(&new_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0); 1185 } else { 1186 new_inst = ppc_inst(PPC_RAW_NOP()); 1187 create_branch(&old_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0); 1188 } 1189 ret = bpf_modify_inst(ip, old_inst, new_inst); 1190 } 1191 1192 out: 1193 mutex_unlock(&text_mutex); 1194 1195 /* 1196 * Sync only if we are not attaching a trampoline to a bpf prog so the older 1197 * trampoline can be freed safely. 1198 */ 1199 if (old_addr) 1200 smp_call_function(do_isync, NULL, 1); 1201 1202 return ret; 1203 } 1204