1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler for ARM64 4 * 5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com> 6 */ 7 8 #define pr_fmt(fmt) "bpf_jit: " fmt 9 10 #include <linux/bitfield.h> 11 #include <linux/bpf.h> 12 #include <linux/filter.h> 13 #include <linux/memory.h> 14 #include <linux/printk.h> 15 #include <linux/slab.h> 16 17 #include <asm/asm-extable.h> 18 #include <asm/byteorder.h> 19 #include <asm/cacheflush.h> 20 #include <asm/debug-monitors.h> 21 #include <asm/insn.h> 22 #include <asm/patching.h> 23 #include <asm/set_memory.h> 24 25 #include "bpf_jit.h" 26 27 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) 28 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) 29 #define TCALL_CNT (MAX_BPF_JIT_REG + 2) 30 #define TMP_REG_3 (MAX_BPF_JIT_REG + 3) 31 #define FP_BOTTOM (MAX_BPF_JIT_REG + 4) 32 #define ARENA_VM_START (MAX_BPF_JIT_REG + 5) 33 34 #define check_imm(bits, imm) do { \ 35 if ((((imm) > 0) && ((imm) >> (bits))) || \ 36 (((imm) < 0) && (~(imm) >> (bits)))) { \ 37 pr_info("[%2d] imm=%d(0x%x) out of range\n", \ 38 i, imm, imm); \ 39 return -EINVAL; \ 40 } \ 41 } while (0) 42 #define check_imm19(imm) check_imm(19, imm) 43 #define check_imm26(imm) check_imm(26, imm) 44 45 /* Map BPF registers to A64 registers */ 46 static const int bpf2a64[] = { 47 /* return value from in-kernel function, and exit value from eBPF */ 48 [BPF_REG_0] = A64_R(7), 49 /* arguments from eBPF program to in-kernel function */ 50 [BPF_REG_1] = A64_R(0), 51 [BPF_REG_2] = A64_R(1), 52 [BPF_REG_3] = A64_R(2), 53 [BPF_REG_4] = A64_R(3), 54 [BPF_REG_5] = A64_R(4), 55 /* callee saved registers that in-kernel function will preserve */ 56 [BPF_REG_6] = A64_R(19), 57 [BPF_REG_7] = A64_R(20), 58 [BPF_REG_8] = A64_R(21), 59 [BPF_REG_9] = A64_R(22), 60 /* read-only frame pointer to access stack */ 61 [BPF_REG_FP] = A64_R(25), 62 /* temporary registers for BPF JIT */ 63 [TMP_REG_1] = A64_R(10), 64 [TMP_REG_2] = A64_R(11), 65 [TMP_REG_3] = A64_R(12), 66 /* tail_call_cnt */ 67 [TCALL_CNT] = A64_R(26), 68 /* temporary register for blinding constants */ 69 [BPF_REG_AX] = A64_R(9), 70 [FP_BOTTOM] = A64_R(27), 71 /* callee saved register for kern_vm_start address */ 72 [ARENA_VM_START] = A64_R(28), 73 }; 74 75 struct jit_ctx { 76 const struct bpf_prog *prog; 77 int idx; 78 int epilogue_offset; 79 int *offset; 80 int exentry_idx; 81 __le32 *image; 82 __le32 *ro_image; 83 u32 stack_size; 84 int fpb_offset; 85 u64 user_vm_start; 86 }; 87 88 struct bpf_plt { 89 u32 insn_ldr; /* load target */ 90 u32 insn_br; /* branch to target */ 91 u64 target; /* target value */ 92 }; 93 94 #define PLT_TARGET_SIZE sizeof_field(struct bpf_plt, target) 95 #define PLT_TARGET_OFFSET offsetof(struct bpf_plt, target) 96 97 static inline void emit(const u32 insn, struct jit_ctx *ctx) 98 { 99 if (ctx->image != NULL) 100 ctx->image[ctx->idx] = cpu_to_le32(insn); 101 102 ctx->idx++; 103 } 104 105 static inline void emit_a64_mov_i(const int is64, const int reg, 106 const s32 val, struct jit_ctx *ctx) 107 { 108 u16 hi = val >> 16; 109 u16 lo = val & 0xffff; 110 111 if (hi & 0x8000) { 112 if (hi == 0xffff) { 113 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); 114 } else { 115 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); 116 if (lo != 0xffff) 117 emit(A64_MOVK(is64, reg, lo, 0), ctx); 118 } 119 } else { 120 emit(A64_MOVZ(is64, reg, lo, 0), ctx); 121 if (hi) 122 emit(A64_MOVK(is64, reg, hi, 16), ctx); 123 } 124 } 125 126 static int i64_i16_blocks(const u64 val, bool inverse) 127 { 128 return (((val >> 0) & 0xffff) != (inverse ? 0xffff : 0x0000)) + 129 (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) + 130 (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) + 131 (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000)); 132 } 133 134 static inline void emit_a64_mov_i64(const int reg, const u64 val, 135 struct jit_ctx *ctx) 136 { 137 u64 nrm_tmp = val, rev_tmp = ~val; 138 bool inverse; 139 int shift; 140 141 if (!(nrm_tmp >> 32)) 142 return emit_a64_mov_i(0, reg, (u32)val, ctx); 143 144 inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false); 145 shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) : 146 (fls64(nrm_tmp) - 1)), 16), 0); 147 if (inverse) 148 emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx); 149 else 150 emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx); 151 shift -= 16; 152 while (shift >= 0) { 153 if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000)) 154 emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx); 155 shift -= 16; 156 } 157 } 158 159 static inline void emit_bti(u32 insn, struct jit_ctx *ctx) 160 { 161 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) 162 emit(insn, ctx); 163 } 164 165 /* 166 * Kernel addresses in the vmalloc space use at most 48 bits, and the 167 * remaining bits are guaranteed to be 0x1. So we can compose the address 168 * with a fixed length movn/movk/movk sequence. 169 */ 170 static inline void emit_addr_mov_i64(const int reg, const u64 val, 171 struct jit_ctx *ctx) 172 { 173 u64 tmp = val; 174 int shift = 0; 175 176 emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx); 177 while (shift < 32) { 178 tmp >>= 16; 179 shift += 16; 180 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); 181 } 182 } 183 184 static inline void emit_call(u64 target, struct jit_ctx *ctx) 185 { 186 u8 tmp = bpf2a64[TMP_REG_1]; 187 188 emit_addr_mov_i64(tmp, target, ctx); 189 emit(A64_BLR(tmp), ctx); 190 } 191 192 static inline int bpf2a64_offset(int bpf_insn, int off, 193 const struct jit_ctx *ctx) 194 { 195 /* BPF JMP offset is relative to the next instruction */ 196 bpf_insn++; 197 /* 198 * Whereas arm64 branch instructions encode the offset 199 * from the branch itself, so we must subtract 1 from the 200 * instruction offset. 201 */ 202 return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1); 203 } 204 205 static void jit_fill_hole(void *area, unsigned int size) 206 { 207 __le32 *ptr; 208 /* We are guaranteed to have aligned memory. */ 209 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) 210 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT); 211 } 212 213 int bpf_arch_text_invalidate(void *dst, size_t len) 214 { 215 if (!aarch64_insn_set(dst, AARCH64_BREAK_FAULT, len)) 216 return -EINVAL; 217 218 return 0; 219 } 220 221 static inline int epilogue_offset(const struct jit_ctx *ctx) 222 { 223 int to = ctx->epilogue_offset; 224 int from = ctx->idx; 225 226 return to - from; 227 } 228 229 static bool is_addsub_imm(u32 imm) 230 { 231 /* Either imm12 or shifted imm12. */ 232 return !(imm & ~0xfff) || !(imm & ~0xfff000); 233 } 234 235 /* 236 * There are 3 types of AArch64 LDR/STR (immediate) instruction: 237 * Post-index, Pre-index, Unsigned offset. 238 * 239 * For BPF ldr/str, the "unsigned offset" type is sufficient. 240 * 241 * "Unsigned offset" type LDR(immediate) format: 242 * 243 * 3 2 1 0 244 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 245 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 246 * |x x|1 1 1 0 0 1 0 1| imm12 | Rn | Rt | 247 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 248 * scale 249 * 250 * "Unsigned offset" type STR(immediate) format: 251 * 3 2 1 0 252 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 253 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 254 * |x x|1 1 1 0 0 1 0 0| imm12 | Rn | Rt | 255 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 256 * scale 257 * 258 * The offset is calculated from imm12 and scale in the following way: 259 * 260 * offset = (u64)imm12 << scale 261 */ 262 static bool is_lsi_offset(int offset, int scale) 263 { 264 if (offset < 0) 265 return false; 266 267 if (offset > (0xFFF << scale)) 268 return false; 269 270 if (offset & ((1 << scale) - 1)) 271 return false; 272 273 return true; 274 } 275 276 /* generated prologue: 277 * bti c // if CONFIG_ARM64_BTI_KERNEL 278 * mov x9, lr 279 * nop // POKE_OFFSET 280 * paciasp // if CONFIG_ARM64_PTR_AUTH_KERNEL 281 * stp x29, lr, [sp, #-16]! 282 * mov x29, sp 283 * stp x19, x20, [sp, #-16]! 284 * stp x21, x22, [sp, #-16]! 285 * stp x25, x26, [sp, #-16]! 286 * stp x27, x28, [sp, #-16]! 287 * mov x25, sp 288 * mov tcc, #0 289 * // PROLOGUE_OFFSET 290 */ 291 292 #define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0) 293 #define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0) 294 295 /* Offset of nop instruction in bpf prog entry to be poked */ 296 #define POKE_OFFSET (BTI_INSNS + 1) 297 298 /* Tail call offset to jump into */ 299 #define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8) 300 301 static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, 302 bool is_exception_cb, u64 arena_vm_start) 303 { 304 const struct bpf_prog *prog = ctx->prog; 305 const bool is_main_prog = !bpf_is_subprog(prog); 306 const u8 r6 = bpf2a64[BPF_REG_6]; 307 const u8 r7 = bpf2a64[BPF_REG_7]; 308 const u8 r8 = bpf2a64[BPF_REG_8]; 309 const u8 r9 = bpf2a64[BPF_REG_9]; 310 const u8 fp = bpf2a64[BPF_REG_FP]; 311 const u8 tcc = bpf2a64[TCALL_CNT]; 312 const u8 fpb = bpf2a64[FP_BOTTOM]; 313 const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; 314 const int idx0 = ctx->idx; 315 int cur_offset; 316 317 /* 318 * BPF prog stack layout 319 * 320 * high 321 * original A64_SP => 0:+-----+ BPF prologue 322 * |FP/LR| 323 * current A64_FP => -16:+-----+ 324 * | ... | callee saved registers 325 * BPF fp register => -64:+-----+ <= (BPF_FP) 326 * | | 327 * | ... | BPF prog stack 328 * | | 329 * +-----+ <= (BPF_FP - prog->aux->stack_depth) 330 * |RSVD | padding 331 * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size) 332 * | | 333 * | ... | Function call stack 334 * | | 335 * +-----+ 336 * low 337 * 338 */ 339 340 /* bpf function may be invoked by 3 instruction types: 341 * 1. bl, attached via freplace to bpf prog via short jump 342 * 2. br, attached via freplace to bpf prog via long jump 343 * 3. blr, working as a function pointer, used by emit_call. 344 * So BTI_JC should used here to support both br and blr. 345 */ 346 emit_bti(A64_BTI_JC, ctx); 347 348 emit(A64_MOV(1, A64_R(9), A64_LR), ctx); 349 emit(A64_NOP, ctx); 350 351 if (!is_exception_cb) { 352 /* Sign lr */ 353 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) 354 emit(A64_PACIASP, ctx); 355 /* Save FP and LR registers to stay align with ARM64 AAPCS */ 356 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 357 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 358 359 /* Save callee-saved registers */ 360 emit(A64_PUSH(r6, r7, A64_SP), ctx); 361 emit(A64_PUSH(r8, r9, A64_SP), ctx); 362 emit(A64_PUSH(fp, tcc, A64_SP), ctx); 363 emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx); 364 } else { 365 /* 366 * Exception callback receives FP of Main Program as third 367 * parameter 368 */ 369 emit(A64_MOV(1, A64_FP, A64_R(2)), ctx); 370 /* 371 * Main Program already pushed the frame record and the 372 * callee-saved registers. The exception callback will not push 373 * anything and re-use the main program's stack. 374 * 375 * 10 registers are on the stack 376 */ 377 emit(A64_SUB_I(1, A64_SP, A64_FP, 80), ctx); 378 } 379 380 /* Set up BPF prog stack base register */ 381 emit(A64_MOV(1, fp, A64_SP), ctx); 382 383 if (!ebpf_from_cbpf && is_main_prog) { 384 /* Initialize tail_call_cnt */ 385 emit(A64_MOVZ(1, tcc, 0, 0), ctx); 386 387 cur_offset = ctx->idx - idx0; 388 if (cur_offset != PROLOGUE_OFFSET) { 389 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", 390 cur_offset, PROLOGUE_OFFSET); 391 return -1; 392 } 393 394 /* BTI landing pad for the tail call, done with a BR */ 395 emit_bti(A64_BTI_J, ctx); 396 } 397 398 /* 399 * Program acting as exception boundary should save all ARM64 400 * Callee-saved registers as the exception callback needs to recover 401 * all ARM64 Callee-saved registers in its epilogue. 402 */ 403 if (prog->aux->exception_boundary) { 404 /* 405 * As we are pushing two more registers, BPF_FP should be moved 406 * 16 bytes 407 */ 408 emit(A64_SUB_I(1, fp, fp, 16), ctx); 409 emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx); 410 } 411 412 emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx); 413 414 /* Stack must be multiples of 16B */ 415 ctx->stack_size = round_up(prog->aux->stack_depth, 16); 416 417 /* Set up function call stack */ 418 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); 419 420 if (arena_vm_start) 421 emit_a64_mov_i64(arena_vm_base, arena_vm_start, ctx); 422 423 return 0; 424 } 425 426 static int out_offset = -1; /* initialized on the first pass of build_body() */ 427 static int emit_bpf_tail_call(struct jit_ctx *ctx) 428 { 429 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ 430 const u8 r2 = bpf2a64[BPF_REG_2]; 431 const u8 r3 = bpf2a64[BPF_REG_3]; 432 433 const u8 tmp = bpf2a64[TMP_REG_1]; 434 const u8 prg = bpf2a64[TMP_REG_2]; 435 const u8 tcc = bpf2a64[TCALL_CNT]; 436 const int idx0 = ctx->idx; 437 #define cur_offset (ctx->idx - idx0) 438 #define jmp_offset (out_offset - (cur_offset)) 439 size_t off; 440 441 /* if (index >= array->map.max_entries) 442 * goto out; 443 */ 444 off = offsetof(struct bpf_array, map.max_entries); 445 emit_a64_mov_i64(tmp, off, ctx); 446 emit(A64_LDR32(tmp, r2, tmp), ctx); 447 emit(A64_MOV(0, r3, r3), ctx); 448 emit(A64_CMP(0, r3, tmp), ctx); 449 emit(A64_B_(A64_COND_CS, jmp_offset), ctx); 450 451 /* 452 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) 453 * goto out; 454 * tail_call_cnt++; 455 */ 456 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); 457 emit(A64_CMP(1, tcc, tmp), ctx); 458 emit(A64_B_(A64_COND_CS, jmp_offset), ctx); 459 emit(A64_ADD_I(1, tcc, tcc, 1), ctx); 460 461 /* prog = array->ptrs[index]; 462 * if (prog == NULL) 463 * goto out; 464 */ 465 off = offsetof(struct bpf_array, ptrs); 466 emit_a64_mov_i64(tmp, off, ctx); 467 emit(A64_ADD(1, tmp, r2, tmp), ctx); 468 emit(A64_LSL(1, prg, r3, 3), ctx); 469 emit(A64_LDR64(prg, tmp, prg), ctx); 470 emit(A64_CBZ(1, prg, jmp_offset), ctx); 471 472 /* goto *(prog->bpf_func + prologue_offset); */ 473 off = offsetof(struct bpf_prog, bpf_func); 474 emit_a64_mov_i64(tmp, off, ctx); 475 emit(A64_LDR64(tmp, prg, tmp), ctx); 476 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); 477 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); 478 emit(A64_BR(tmp), ctx); 479 480 /* out: */ 481 if (out_offset == -1) 482 out_offset = cur_offset; 483 if (cur_offset != out_offset) { 484 pr_err_once("tail_call out_offset = %d, expected %d!\n", 485 cur_offset, out_offset); 486 return -1; 487 } 488 return 0; 489 #undef cur_offset 490 #undef jmp_offset 491 } 492 493 #ifdef CONFIG_ARM64_LSE_ATOMICS 494 static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx) 495 { 496 const u8 code = insn->code; 497 const u8 dst = bpf2a64[insn->dst_reg]; 498 const u8 src = bpf2a64[insn->src_reg]; 499 const u8 tmp = bpf2a64[TMP_REG_1]; 500 const u8 tmp2 = bpf2a64[TMP_REG_2]; 501 const bool isdw = BPF_SIZE(code) == BPF_DW; 502 const s16 off = insn->off; 503 u8 reg; 504 505 if (!off) { 506 reg = dst; 507 } else { 508 emit_a64_mov_i(1, tmp, off, ctx); 509 emit(A64_ADD(1, tmp, tmp, dst), ctx); 510 reg = tmp; 511 } 512 513 switch (insn->imm) { 514 /* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */ 515 case BPF_ADD: 516 emit(A64_STADD(isdw, reg, src), ctx); 517 break; 518 case BPF_AND: 519 emit(A64_MVN(isdw, tmp2, src), ctx); 520 emit(A64_STCLR(isdw, reg, tmp2), ctx); 521 break; 522 case BPF_OR: 523 emit(A64_STSET(isdw, reg, src), ctx); 524 break; 525 case BPF_XOR: 526 emit(A64_STEOR(isdw, reg, src), ctx); 527 break; 528 /* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */ 529 case BPF_ADD | BPF_FETCH: 530 emit(A64_LDADDAL(isdw, src, reg, src), ctx); 531 break; 532 case BPF_AND | BPF_FETCH: 533 emit(A64_MVN(isdw, tmp2, src), ctx); 534 emit(A64_LDCLRAL(isdw, src, reg, tmp2), ctx); 535 break; 536 case BPF_OR | BPF_FETCH: 537 emit(A64_LDSETAL(isdw, src, reg, src), ctx); 538 break; 539 case BPF_XOR | BPF_FETCH: 540 emit(A64_LDEORAL(isdw, src, reg, src), ctx); 541 break; 542 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 543 case BPF_XCHG: 544 emit(A64_SWPAL(isdw, src, reg, src), ctx); 545 break; 546 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 547 case BPF_CMPXCHG: 548 emit(A64_CASAL(isdw, src, reg, bpf2a64[BPF_REG_0]), ctx); 549 break; 550 default: 551 pr_err_once("unknown atomic op code %02x\n", insn->imm); 552 return -EINVAL; 553 } 554 555 return 0; 556 } 557 #else 558 static inline int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx) 559 { 560 return -EINVAL; 561 } 562 #endif 563 564 static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx) 565 { 566 const u8 code = insn->code; 567 const u8 dst = bpf2a64[insn->dst_reg]; 568 const u8 src = bpf2a64[insn->src_reg]; 569 const u8 tmp = bpf2a64[TMP_REG_1]; 570 const u8 tmp2 = bpf2a64[TMP_REG_2]; 571 const u8 tmp3 = bpf2a64[TMP_REG_3]; 572 const int i = insn - ctx->prog->insnsi; 573 const s32 imm = insn->imm; 574 const s16 off = insn->off; 575 const bool isdw = BPF_SIZE(code) == BPF_DW; 576 u8 reg; 577 s32 jmp_offset; 578 579 if (!off) { 580 reg = dst; 581 } else { 582 emit_a64_mov_i(1, tmp, off, ctx); 583 emit(A64_ADD(1, tmp, tmp, dst), ctx); 584 reg = tmp; 585 } 586 587 if (imm == BPF_ADD || imm == BPF_AND || 588 imm == BPF_OR || imm == BPF_XOR) { 589 /* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */ 590 emit(A64_LDXR(isdw, tmp2, reg), ctx); 591 if (imm == BPF_ADD) 592 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); 593 else if (imm == BPF_AND) 594 emit(A64_AND(isdw, tmp2, tmp2, src), ctx); 595 else if (imm == BPF_OR) 596 emit(A64_ORR(isdw, tmp2, tmp2, src), ctx); 597 else 598 emit(A64_EOR(isdw, tmp2, tmp2, src), ctx); 599 emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx); 600 jmp_offset = -3; 601 check_imm19(jmp_offset); 602 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); 603 } else if (imm == (BPF_ADD | BPF_FETCH) || 604 imm == (BPF_AND | BPF_FETCH) || 605 imm == (BPF_OR | BPF_FETCH) || 606 imm == (BPF_XOR | BPF_FETCH)) { 607 /* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */ 608 const u8 ax = bpf2a64[BPF_REG_AX]; 609 610 emit(A64_MOV(isdw, ax, src), ctx); 611 emit(A64_LDXR(isdw, src, reg), ctx); 612 if (imm == (BPF_ADD | BPF_FETCH)) 613 emit(A64_ADD(isdw, tmp2, src, ax), ctx); 614 else if (imm == (BPF_AND | BPF_FETCH)) 615 emit(A64_AND(isdw, tmp2, src, ax), ctx); 616 else if (imm == (BPF_OR | BPF_FETCH)) 617 emit(A64_ORR(isdw, tmp2, src, ax), ctx); 618 else 619 emit(A64_EOR(isdw, tmp2, src, ax), ctx); 620 emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx); 621 jmp_offset = -3; 622 check_imm19(jmp_offset); 623 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); 624 emit(A64_DMB_ISH, ctx); 625 } else if (imm == BPF_XCHG) { 626 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 627 emit(A64_MOV(isdw, tmp2, src), ctx); 628 emit(A64_LDXR(isdw, src, reg), ctx); 629 emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx); 630 jmp_offset = -2; 631 check_imm19(jmp_offset); 632 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); 633 emit(A64_DMB_ISH, ctx); 634 } else if (imm == BPF_CMPXCHG) { 635 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 636 const u8 r0 = bpf2a64[BPF_REG_0]; 637 638 emit(A64_MOV(isdw, tmp2, r0), ctx); 639 emit(A64_LDXR(isdw, r0, reg), ctx); 640 emit(A64_EOR(isdw, tmp3, r0, tmp2), ctx); 641 jmp_offset = 4; 642 check_imm19(jmp_offset); 643 emit(A64_CBNZ(isdw, tmp3, jmp_offset), ctx); 644 emit(A64_STLXR(isdw, src, reg, tmp3), ctx); 645 jmp_offset = -4; 646 check_imm19(jmp_offset); 647 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); 648 emit(A64_DMB_ISH, ctx); 649 } else { 650 pr_err_once("unknown atomic op code %02x\n", imm); 651 return -EINVAL; 652 } 653 654 return 0; 655 } 656 657 void dummy_tramp(void); 658 659 asm ( 660 " .pushsection .text, \"ax\", @progbits\n" 661 " .global dummy_tramp\n" 662 " .type dummy_tramp, %function\n" 663 "dummy_tramp:" 664 #if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) 665 " bti j\n" /* dummy_tramp is called via "br x10" */ 666 #endif 667 " mov x10, x30\n" 668 " mov x30, x9\n" 669 " ret x10\n" 670 " .size dummy_tramp, .-dummy_tramp\n" 671 " .popsection\n" 672 ); 673 674 /* build a plt initialized like this: 675 * 676 * plt: 677 * ldr tmp, target 678 * br tmp 679 * target: 680 * .quad dummy_tramp 681 * 682 * when a long jump trampoline is attached, target is filled with the 683 * trampoline address, and when the trampoline is removed, target is 684 * restored to dummy_tramp address. 685 */ 686 static void build_plt(struct jit_ctx *ctx) 687 { 688 const u8 tmp = bpf2a64[TMP_REG_1]; 689 struct bpf_plt *plt = NULL; 690 691 /* make sure target is 64-bit aligned */ 692 if ((ctx->idx + PLT_TARGET_OFFSET / AARCH64_INSN_SIZE) % 2) 693 emit(A64_NOP, ctx); 694 695 plt = (struct bpf_plt *)(ctx->image + ctx->idx); 696 /* plt is called via bl, no BTI needed here */ 697 emit(A64_LDR64LIT(tmp, 2 * AARCH64_INSN_SIZE), ctx); 698 emit(A64_BR(tmp), ctx); 699 700 if (ctx->image) 701 plt->target = (u64)&dummy_tramp; 702 } 703 704 static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb) 705 { 706 const u8 r0 = bpf2a64[BPF_REG_0]; 707 const u8 r6 = bpf2a64[BPF_REG_6]; 708 const u8 r7 = bpf2a64[BPF_REG_7]; 709 const u8 r8 = bpf2a64[BPF_REG_8]; 710 const u8 r9 = bpf2a64[BPF_REG_9]; 711 const u8 fp = bpf2a64[BPF_REG_FP]; 712 const u8 fpb = bpf2a64[FP_BOTTOM]; 713 714 /* We're done with BPF stack */ 715 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); 716 717 /* 718 * Program acting as exception boundary pushes R23 and R24 in addition 719 * to BPF callee-saved registers. Exception callback uses the boundary 720 * program's stack frame, so recover these extra registers in the above 721 * two cases. 722 */ 723 if (ctx->prog->aux->exception_boundary || is_exception_cb) 724 emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx); 725 726 /* Restore x27 and x28 */ 727 emit(A64_POP(fpb, A64_R(28), A64_SP), ctx); 728 /* Restore fs (x25) and x26 */ 729 emit(A64_POP(fp, A64_R(26), A64_SP), ctx); 730 731 /* Restore callee-saved register */ 732 emit(A64_POP(r8, r9, A64_SP), ctx); 733 emit(A64_POP(r6, r7, A64_SP), ctx); 734 735 /* Restore FP/LR registers */ 736 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); 737 738 /* Set return value */ 739 emit(A64_MOV(1, A64_R(0), r0), ctx); 740 741 /* Authenticate lr */ 742 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) 743 emit(A64_AUTIASP, ctx); 744 745 emit(A64_RET(A64_LR), ctx); 746 } 747 748 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0) 749 #define BPF_FIXUP_REG_MASK GENMASK(31, 27) 750 #define DONT_CLEAR 5 /* Unused ARM64 register from BPF's POV */ 751 752 bool ex_handler_bpf(const struct exception_table_entry *ex, 753 struct pt_regs *regs) 754 { 755 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup); 756 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup); 757 758 if (dst_reg != DONT_CLEAR) 759 regs->regs[dst_reg] = 0; 760 regs->pc = (unsigned long)&ex->fixup - offset; 761 return true; 762 } 763 764 /* For accesses to BTF pointers, add an entry to the exception table */ 765 static int add_exception_handler(const struct bpf_insn *insn, 766 struct jit_ctx *ctx, 767 int dst_reg) 768 { 769 off_t ins_offset; 770 off_t fixup_offset; 771 unsigned long pc; 772 struct exception_table_entry *ex; 773 774 if (!ctx->image) 775 /* First pass */ 776 return 0; 777 778 if (BPF_MODE(insn->code) != BPF_PROBE_MEM && 779 BPF_MODE(insn->code) != BPF_PROBE_MEMSX && 780 BPF_MODE(insn->code) != BPF_PROBE_MEM32) 781 return 0; 782 783 if (!ctx->prog->aux->extable || 784 WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries)) 785 return -EINVAL; 786 787 ex = &ctx->prog->aux->extable[ctx->exentry_idx]; 788 pc = (unsigned long)&ctx->ro_image[ctx->idx - 1]; 789 790 /* 791 * This is the relative offset of the instruction that may fault from 792 * the exception table itself. This will be written to the exception 793 * table and if this instruction faults, the destination register will 794 * be set to '0' and the execution will jump to the next instruction. 795 */ 796 ins_offset = pc - (long)&ex->insn; 797 if (WARN_ON_ONCE(ins_offset >= 0 || ins_offset < INT_MIN)) 798 return -ERANGE; 799 800 /* 801 * Since the extable follows the program, the fixup offset is always 802 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value 803 * to keep things simple, and put the destination register in the upper 804 * bits. We don't need to worry about buildtime or runtime sort 805 * modifying the upper bits because the table is already sorted, and 806 * isn't part of the main exception table. 807 * 808 * The fixup_offset is set to the next instruction from the instruction 809 * that may fault. The execution will jump to this after handling the 810 * fault. 811 */ 812 fixup_offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE); 813 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset)) 814 return -ERANGE; 815 816 /* 817 * The offsets above have been calculated using the RO buffer but we 818 * need to use the R/W buffer for writes. 819 * switch ex to rw buffer for writing. 820 */ 821 ex = (void *)ctx->image + ((void *)ex - (void *)ctx->ro_image); 822 823 ex->insn = ins_offset; 824 825 if (BPF_CLASS(insn->code) != BPF_LDX) 826 dst_reg = DONT_CLEAR; 827 828 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) | 829 FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg); 830 831 ex->type = EX_TYPE_BPF; 832 833 ctx->exentry_idx++; 834 return 0; 835 } 836 837 /* JITs an eBPF instruction. 838 * Returns: 839 * 0 - successfully JITed an 8-byte eBPF instruction. 840 * >0 - successfully JITed a 16-byte eBPF instruction. 841 * <0 - failed to JIT. 842 */ 843 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, 844 bool extra_pass) 845 { 846 const u8 code = insn->code; 847 u8 dst = bpf2a64[insn->dst_reg]; 848 u8 src = bpf2a64[insn->src_reg]; 849 const u8 tmp = bpf2a64[TMP_REG_1]; 850 const u8 tmp2 = bpf2a64[TMP_REG_2]; 851 const u8 fp = bpf2a64[BPF_REG_FP]; 852 const u8 fpb = bpf2a64[FP_BOTTOM]; 853 const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; 854 const s16 off = insn->off; 855 const s32 imm = insn->imm; 856 const int i = insn - ctx->prog->insnsi; 857 const bool is64 = BPF_CLASS(code) == BPF_ALU64 || 858 BPF_CLASS(code) == BPF_JMP; 859 u8 jmp_cond; 860 s32 jmp_offset; 861 u32 a64_insn; 862 u8 src_adj; 863 u8 dst_adj; 864 int off_adj; 865 int ret; 866 bool sign_extend; 867 868 switch (code) { 869 /* dst = src */ 870 case BPF_ALU | BPF_MOV | BPF_X: 871 case BPF_ALU64 | BPF_MOV | BPF_X: 872 if (insn_is_cast_user(insn)) { 873 emit(A64_MOV(0, tmp, src), ctx); // 32-bit mov clears the upper 32 bits 874 emit_a64_mov_i(0, dst, ctx->user_vm_start >> 32, ctx); 875 emit(A64_LSL(1, dst, dst, 32), ctx); 876 emit(A64_CBZ(1, tmp, 2), ctx); 877 emit(A64_ORR(1, tmp, dst, tmp), ctx); 878 emit(A64_MOV(1, dst, tmp), ctx); 879 break; 880 } 881 switch (insn->off) { 882 case 0: 883 emit(A64_MOV(is64, dst, src), ctx); 884 break; 885 case 8: 886 emit(A64_SXTB(is64, dst, src), ctx); 887 break; 888 case 16: 889 emit(A64_SXTH(is64, dst, src), ctx); 890 break; 891 case 32: 892 emit(A64_SXTW(is64, dst, src), ctx); 893 break; 894 } 895 break; 896 /* dst = dst OP src */ 897 case BPF_ALU | BPF_ADD | BPF_X: 898 case BPF_ALU64 | BPF_ADD | BPF_X: 899 emit(A64_ADD(is64, dst, dst, src), ctx); 900 break; 901 case BPF_ALU | BPF_SUB | BPF_X: 902 case BPF_ALU64 | BPF_SUB | BPF_X: 903 emit(A64_SUB(is64, dst, dst, src), ctx); 904 break; 905 case BPF_ALU | BPF_AND | BPF_X: 906 case BPF_ALU64 | BPF_AND | BPF_X: 907 emit(A64_AND(is64, dst, dst, src), ctx); 908 break; 909 case BPF_ALU | BPF_OR | BPF_X: 910 case BPF_ALU64 | BPF_OR | BPF_X: 911 emit(A64_ORR(is64, dst, dst, src), ctx); 912 break; 913 case BPF_ALU | BPF_XOR | BPF_X: 914 case BPF_ALU64 | BPF_XOR | BPF_X: 915 emit(A64_EOR(is64, dst, dst, src), ctx); 916 break; 917 case BPF_ALU | BPF_MUL | BPF_X: 918 case BPF_ALU64 | BPF_MUL | BPF_X: 919 emit(A64_MUL(is64, dst, dst, src), ctx); 920 break; 921 case BPF_ALU | BPF_DIV | BPF_X: 922 case BPF_ALU64 | BPF_DIV | BPF_X: 923 if (!off) 924 emit(A64_UDIV(is64, dst, dst, src), ctx); 925 else 926 emit(A64_SDIV(is64, dst, dst, src), ctx); 927 break; 928 case BPF_ALU | BPF_MOD | BPF_X: 929 case BPF_ALU64 | BPF_MOD | BPF_X: 930 if (!off) 931 emit(A64_UDIV(is64, tmp, dst, src), ctx); 932 else 933 emit(A64_SDIV(is64, tmp, dst, src), ctx); 934 emit(A64_MSUB(is64, dst, dst, tmp, src), ctx); 935 break; 936 case BPF_ALU | BPF_LSH | BPF_X: 937 case BPF_ALU64 | BPF_LSH | BPF_X: 938 emit(A64_LSLV(is64, dst, dst, src), ctx); 939 break; 940 case BPF_ALU | BPF_RSH | BPF_X: 941 case BPF_ALU64 | BPF_RSH | BPF_X: 942 emit(A64_LSRV(is64, dst, dst, src), ctx); 943 break; 944 case BPF_ALU | BPF_ARSH | BPF_X: 945 case BPF_ALU64 | BPF_ARSH | BPF_X: 946 emit(A64_ASRV(is64, dst, dst, src), ctx); 947 break; 948 /* dst = -dst */ 949 case BPF_ALU | BPF_NEG: 950 case BPF_ALU64 | BPF_NEG: 951 emit(A64_NEG(is64, dst, dst), ctx); 952 break; 953 /* dst = BSWAP##imm(dst) */ 954 case BPF_ALU | BPF_END | BPF_FROM_LE: 955 case BPF_ALU | BPF_END | BPF_FROM_BE: 956 case BPF_ALU64 | BPF_END | BPF_FROM_LE: 957 #ifdef CONFIG_CPU_BIG_ENDIAN 958 if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_BE) 959 goto emit_bswap_uxt; 960 #else /* !CONFIG_CPU_BIG_ENDIAN */ 961 if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE) 962 goto emit_bswap_uxt; 963 #endif 964 switch (imm) { 965 case 16: 966 emit(A64_REV16(is64, dst, dst), ctx); 967 /* zero-extend 16 bits into 64 bits */ 968 emit(A64_UXTH(is64, dst, dst), ctx); 969 break; 970 case 32: 971 emit(A64_REV32(0, dst, dst), ctx); 972 /* upper 32 bits already cleared */ 973 break; 974 case 64: 975 emit(A64_REV64(dst, dst), ctx); 976 break; 977 } 978 break; 979 emit_bswap_uxt: 980 switch (imm) { 981 case 16: 982 /* zero-extend 16 bits into 64 bits */ 983 emit(A64_UXTH(is64, dst, dst), ctx); 984 break; 985 case 32: 986 /* zero-extend 32 bits into 64 bits */ 987 emit(A64_UXTW(is64, dst, dst), ctx); 988 break; 989 case 64: 990 /* nop */ 991 break; 992 } 993 break; 994 /* dst = imm */ 995 case BPF_ALU | BPF_MOV | BPF_K: 996 case BPF_ALU64 | BPF_MOV | BPF_K: 997 emit_a64_mov_i(is64, dst, imm, ctx); 998 break; 999 /* dst = dst OP imm */ 1000 case BPF_ALU | BPF_ADD | BPF_K: 1001 case BPF_ALU64 | BPF_ADD | BPF_K: 1002 if (is_addsub_imm(imm)) { 1003 emit(A64_ADD_I(is64, dst, dst, imm), ctx); 1004 } else if (is_addsub_imm(-imm)) { 1005 emit(A64_SUB_I(is64, dst, dst, -imm), ctx); 1006 } else { 1007 emit_a64_mov_i(is64, tmp, imm, ctx); 1008 emit(A64_ADD(is64, dst, dst, tmp), ctx); 1009 } 1010 break; 1011 case BPF_ALU | BPF_SUB | BPF_K: 1012 case BPF_ALU64 | BPF_SUB | BPF_K: 1013 if (is_addsub_imm(imm)) { 1014 emit(A64_SUB_I(is64, dst, dst, imm), ctx); 1015 } else if (is_addsub_imm(-imm)) { 1016 emit(A64_ADD_I(is64, dst, dst, -imm), ctx); 1017 } else { 1018 emit_a64_mov_i(is64, tmp, imm, ctx); 1019 emit(A64_SUB(is64, dst, dst, tmp), ctx); 1020 } 1021 break; 1022 case BPF_ALU | BPF_AND | BPF_K: 1023 case BPF_ALU64 | BPF_AND | BPF_K: 1024 a64_insn = A64_AND_I(is64, dst, dst, imm); 1025 if (a64_insn != AARCH64_BREAK_FAULT) { 1026 emit(a64_insn, ctx); 1027 } else { 1028 emit_a64_mov_i(is64, tmp, imm, ctx); 1029 emit(A64_AND(is64, dst, dst, tmp), ctx); 1030 } 1031 break; 1032 case BPF_ALU | BPF_OR | BPF_K: 1033 case BPF_ALU64 | BPF_OR | BPF_K: 1034 a64_insn = A64_ORR_I(is64, dst, dst, imm); 1035 if (a64_insn != AARCH64_BREAK_FAULT) { 1036 emit(a64_insn, ctx); 1037 } else { 1038 emit_a64_mov_i(is64, tmp, imm, ctx); 1039 emit(A64_ORR(is64, dst, dst, tmp), ctx); 1040 } 1041 break; 1042 case BPF_ALU | BPF_XOR | BPF_K: 1043 case BPF_ALU64 | BPF_XOR | BPF_K: 1044 a64_insn = A64_EOR_I(is64, dst, dst, imm); 1045 if (a64_insn != AARCH64_BREAK_FAULT) { 1046 emit(a64_insn, ctx); 1047 } else { 1048 emit_a64_mov_i(is64, tmp, imm, ctx); 1049 emit(A64_EOR(is64, dst, dst, tmp), ctx); 1050 } 1051 break; 1052 case BPF_ALU | BPF_MUL | BPF_K: 1053 case BPF_ALU64 | BPF_MUL | BPF_K: 1054 emit_a64_mov_i(is64, tmp, imm, ctx); 1055 emit(A64_MUL(is64, dst, dst, tmp), ctx); 1056 break; 1057 case BPF_ALU | BPF_DIV | BPF_K: 1058 case BPF_ALU64 | BPF_DIV | BPF_K: 1059 emit_a64_mov_i(is64, tmp, imm, ctx); 1060 if (!off) 1061 emit(A64_UDIV(is64, dst, dst, tmp), ctx); 1062 else 1063 emit(A64_SDIV(is64, dst, dst, tmp), ctx); 1064 break; 1065 case BPF_ALU | BPF_MOD | BPF_K: 1066 case BPF_ALU64 | BPF_MOD | BPF_K: 1067 emit_a64_mov_i(is64, tmp2, imm, ctx); 1068 if (!off) 1069 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); 1070 else 1071 emit(A64_SDIV(is64, tmp, dst, tmp2), ctx); 1072 emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx); 1073 break; 1074 case BPF_ALU | BPF_LSH | BPF_K: 1075 case BPF_ALU64 | BPF_LSH | BPF_K: 1076 emit(A64_LSL(is64, dst, dst, imm), ctx); 1077 break; 1078 case BPF_ALU | BPF_RSH | BPF_K: 1079 case BPF_ALU64 | BPF_RSH | BPF_K: 1080 emit(A64_LSR(is64, dst, dst, imm), ctx); 1081 break; 1082 case BPF_ALU | BPF_ARSH | BPF_K: 1083 case BPF_ALU64 | BPF_ARSH | BPF_K: 1084 emit(A64_ASR(is64, dst, dst, imm), ctx); 1085 break; 1086 1087 /* JUMP off */ 1088 case BPF_JMP | BPF_JA: 1089 case BPF_JMP32 | BPF_JA: 1090 if (BPF_CLASS(code) == BPF_JMP) 1091 jmp_offset = bpf2a64_offset(i, off, ctx); 1092 else 1093 jmp_offset = bpf2a64_offset(i, imm, ctx); 1094 check_imm26(jmp_offset); 1095 emit(A64_B(jmp_offset), ctx); 1096 break; 1097 /* IF (dst COND src) JUMP off */ 1098 case BPF_JMP | BPF_JEQ | BPF_X: 1099 case BPF_JMP | BPF_JGT | BPF_X: 1100 case BPF_JMP | BPF_JLT | BPF_X: 1101 case BPF_JMP | BPF_JGE | BPF_X: 1102 case BPF_JMP | BPF_JLE | BPF_X: 1103 case BPF_JMP | BPF_JNE | BPF_X: 1104 case BPF_JMP | BPF_JSGT | BPF_X: 1105 case BPF_JMP | BPF_JSLT | BPF_X: 1106 case BPF_JMP | BPF_JSGE | BPF_X: 1107 case BPF_JMP | BPF_JSLE | BPF_X: 1108 case BPF_JMP32 | BPF_JEQ | BPF_X: 1109 case BPF_JMP32 | BPF_JGT | BPF_X: 1110 case BPF_JMP32 | BPF_JLT | BPF_X: 1111 case BPF_JMP32 | BPF_JGE | BPF_X: 1112 case BPF_JMP32 | BPF_JLE | BPF_X: 1113 case BPF_JMP32 | BPF_JNE | BPF_X: 1114 case BPF_JMP32 | BPF_JSGT | BPF_X: 1115 case BPF_JMP32 | BPF_JSLT | BPF_X: 1116 case BPF_JMP32 | BPF_JSGE | BPF_X: 1117 case BPF_JMP32 | BPF_JSLE | BPF_X: 1118 emit(A64_CMP(is64, dst, src), ctx); 1119 emit_cond_jmp: 1120 jmp_offset = bpf2a64_offset(i, off, ctx); 1121 check_imm19(jmp_offset); 1122 switch (BPF_OP(code)) { 1123 case BPF_JEQ: 1124 jmp_cond = A64_COND_EQ; 1125 break; 1126 case BPF_JGT: 1127 jmp_cond = A64_COND_HI; 1128 break; 1129 case BPF_JLT: 1130 jmp_cond = A64_COND_CC; 1131 break; 1132 case BPF_JGE: 1133 jmp_cond = A64_COND_CS; 1134 break; 1135 case BPF_JLE: 1136 jmp_cond = A64_COND_LS; 1137 break; 1138 case BPF_JSET: 1139 case BPF_JNE: 1140 jmp_cond = A64_COND_NE; 1141 break; 1142 case BPF_JSGT: 1143 jmp_cond = A64_COND_GT; 1144 break; 1145 case BPF_JSLT: 1146 jmp_cond = A64_COND_LT; 1147 break; 1148 case BPF_JSGE: 1149 jmp_cond = A64_COND_GE; 1150 break; 1151 case BPF_JSLE: 1152 jmp_cond = A64_COND_LE; 1153 break; 1154 default: 1155 return -EFAULT; 1156 } 1157 emit(A64_B_(jmp_cond, jmp_offset), ctx); 1158 break; 1159 case BPF_JMP | BPF_JSET | BPF_X: 1160 case BPF_JMP32 | BPF_JSET | BPF_X: 1161 emit(A64_TST(is64, dst, src), ctx); 1162 goto emit_cond_jmp; 1163 /* IF (dst COND imm) JUMP off */ 1164 case BPF_JMP | BPF_JEQ | BPF_K: 1165 case BPF_JMP | BPF_JGT | BPF_K: 1166 case BPF_JMP | BPF_JLT | BPF_K: 1167 case BPF_JMP | BPF_JGE | BPF_K: 1168 case BPF_JMP | BPF_JLE | BPF_K: 1169 case BPF_JMP | BPF_JNE | BPF_K: 1170 case BPF_JMP | BPF_JSGT | BPF_K: 1171 case BPF_JMP | BPF_JSLT | BPF_K: 1172 case BPF_JMP | BPF_JSGE | BPF_K: 1173 case BPF_JMP | BPF_JSLE | BPF_K: 1174 case BPF_JMP32 | BPF_JEQ | BPF_K: 1175 case BPF_JMP32 | BPF_JGT | BPF_K: 1176 case BPF_JMP32 | BPF_JLT | BPF_K: 1177 case BPF_JMP32 | BPF_JGE | BPF_K: 1178 case BPF_JMP32 | BPF_JLE | BPF_K: 1179 case BPF_JMP32 | BPF_JNE | BPF_K: 1180 case BPF_JMP32 | BPF_JSGT | BPF_K: 1181 case BPF_JMP32 | BPF_JSLT | BPF_K: 1182 case BPF_JMP32 | BPF_JSGE | BPF_K: 1183 case BPF_JMP32 | BPF_JSLE | BPF_K: 1184 if (is_addsub_imm(imm)) { 1185 emit(A64_CMP_I(is64, dst, imm), ctx); 1186 } else if (is_addsub_imm(-imm)) { 1187 emit(A64_CMN_I(is64, dst, -imm), ctx); 1188 } else { 1189 emit_a64_mov_i(is64, tmp, imm, ctx); 1190 emit(A64_CMP(is64, dst, tmp), ctx); 1191 } 1192 goto emit_cond_jmp; 1193 case BPF_JMP | BPF_JSET | BPF_K: 1194 case BPF_JMP32 | BPF_JSET | BPF_K: 1195 a64_insn = A64_TST_I(is64, dst, imm); 1196 if (a64_insn != AARCH64_BREAK_FAULT) { 1197 emit(a64_insn, ctx); 1198 } else { 1199 emit_a64_mov_i(is64, tmp, imm, ctx); 1200 emit(A64_TST(is64, dst, tmp), ctx); 1201 } 1202 goto emit_cond_jmp; 1203 /* function call */ 1204 case BPF_JMP | BPF_CALL: 1205 { 1206 const u8 r0 = bpf2a64[BPF_REG_0]; 1207 bool func_addr_fixed; 1208 u64 func_addr; 1209 1210 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, 1211 &func_addr, &func_addr_fixed); 1212 if (ret < 0) 1213 return ret; 1214 emit_call(func_addr, ctx); 1215 emit(A64_MOV(1, r0, A64_R(0)), ctx); 1216 break; 1217 } 1218 /* tail call */ 1219 case BPF_JMP | BPF_TAIL_CALL: 1220 if (emit_bpf_tail_call(ctx)) 1221 return -EFAULT; 1222 break; 1223 /* function return */ 1224 case BPF_JMP | BPF_EXIT: 1225 /* Optimization: when last instruction is EXIT, 1226 simply fallthrough to epilogue. */ 1227 if (i == ctx->prog->len - 1) 1228 break; 1229 jmp_offset = epilogue_offset(ctx); 1230 check_imm26(jmp_offset); 1231 emit(A64_B(jmp_offset), ctx); 1232 break; 1233 1234 /* dst = imm64 */ 1235 case BPF_LD | BPF_IMM | BPF_DW: 1236 { 1237 const struct bpf_insn insn1 = insn[1]; 1238 u64 imm64; 1239 1240 imm64 = (u64)insn1.imm << 32 | (u32)imm; 1241 if (bpf_pseudo_func(insn)) 1242 emit_addr_mov_i64(dst, imm64, ctx); 1243 else 1244 emit_a64_mov_i64(dst, imm64, ctx); 1245 1246 return 1; 1247 } 1248 1249 /* LDX: dst = (u64)*(unsigned size *)(src + off) */ 1250 case BPF_LDX | BPF_MEM | BPF_W: 1251 case BPF_LDX | BPF_MEM | BPF_H: 1252 case BPF_LDX | BPF_MEM | BPF_B: 1253 case BPF_LDX | BPF_MEM | BPF_DW: 1254 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1255 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1256 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1257 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1258 /* LDXS: dst_reg = (s64)*(signed size *)(src_reg + off) */ 1259 case BPF_LDX | BPF_MEMSX | BPF_B: 1260 case BPF_LDX | BPF_MEMSX | BPF_H: 1261 case BPF_LDX | BPF_MEMSX | BPF_W: 1262 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: 1263 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: 1264 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: 1265 case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: 1266 case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: 1267 case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: 1268 case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: 1269 if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { 1270 emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx); 1271 src = tmp2; 1272 } 1273 if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) { 1274 src_adj = fpb; 1275 off_adj = off + ctx->fpb_offset; 1276 } else { 1277 src_adj = src; 1278 off_adj = off; 1279 } 1280 sign_extend = (BPF_MODE(insn->code) == BPF_MEMSX || 1281 BPF_MODE(insn->code) == BPF_PROBE_MEMSX); 1282 switch (BPF_SIZE(code)) { 1283 case BPF_W: 1284 if (is_lsi_offset(off_adj, 2)) { 1285 if (sign_extend) 1286 emit(A64_LDRSWI(dst, src_adj, off_adj), ctx); 1287 else 1288 emit(A64_LDR32I(dst, src_adj, off_adj), ctx); 1289 } else { 1290 emit_a64_mov_i(1, tmp, off, ctx); 1291 if (sign_extend) 1292 emit(A64_LDRSW(dst, src, tmp), ctx); 1293 else 1294 emit(A64_LDR32(dst, src, tmp), ctx); 1295 } 1296 break; 1297 case BPF_H: 1298 if (is_lsi_offset(off_adj, 1)) { 1299 if (sign_extend) 1300 emit(A64_LDRSHI(dst, src_adj, off_adj), ctx); 1301 else 1302 emit(A64_LDRHI(dst, src_adj, off_adj), ctx); 1303 } else { 1304 emit_a64_mov_i(1, tmp, off, ctx); 1305 if (sign_extend) 1306 emit(A64_LDRSH(dst, src, tmp), ctx); 1307 else 1308 emit(A64_LDRH(dst, src, tmp), ctx); 1309 } 1310 break; 1311 case BPF_B: 1312 if (is_lsi_offset(off_adj, 0)) { 1313 if (sign_extend) 1314 emit(A64_LDRSBI(dst, src_adj, off_adj), ctx); 1315 else 1316 emit(A64_LDRBI(dst, src_adj, off_adj), ctx); 1317 } else { 1318 emit_a64_mov_i(1, tmp, off, ctx); 1319 if (sign_extend) 1320 emit(A64_LDRSB(dst, src, tmp), ctx); 1321 else 1322 emit(A64_LDRB(dst, src, tmp), ctx); 1323 } 1324 break; 1325 case BPF_DW: 1326 if (is_lsi_offset(off_adj, 3)) { 1327 emit(A64_LDR64I(dst, src_adj, off_adj), ctx); 1328 } else { 1329 emit_a64_mov_i(1, tmp, off, ctx); 1330 emit(A64_LDR64(dst, src, tmp), ctx); 1331 } 1332 break; 1333 } 1334 1335 ret = add_exception_handler(insn, ctx, dst); 1336 if (ret) 1337 return ret; 1338 break; 1339 1340 /* speculation barrier */ 1341 case BPF_ST | BPF_NOSPEC: 1342 /* 1343 * Nothing required here. 1344 * 1345 * In case of arm64, we rely on the firmware mitigation of 1346 * Speculative Store Bypass as controlled via the ssbd kernel 1347 * parameter. Whenever the mitigation is enabled, it works 1348 * for all of the kernel code with no need to provide any 1349 * additional instructions. 1350 */ 1351 break; 1352 1353 /* ST: *(size *)(dst + off) = imm */ 1354 case BPF_ST | BPF_MEM | BPF_W: 1355 case BPF_ST | BPF_MEM | BPF_H: 1356 case BPF_ST | BPF_MEM | BPF_B: 1357 case BPF_ST | BPF_MEM | BPF_DW: 1358 case BPF_ST | BPF_PROBE_MEM32 | BPF_B: 1359 case BPF_ST | BPF_PROBE_MEM32 | BPF_H: 1360 case BPF_ST | BPF_PROBE_MEM32 | BPF_W: 1361 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: 1362 if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { 1363 emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); 1364 dst = tmp2; 1365 } 1366 if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) { 1367 dst_adj = fpb; 1368 off_adj = off + ctx->fpb_offset; 1369 } else { 1370 dst_adj = dst; 1371 off_adj = off; 1372 } 1373 /* Load imm to a register then store it */ 1374 emit_a64_mov_i(1, tmp, imm, ctx); 1375 switch (BPF_SIZE(code)) { 1376 case BPF_W: 1377 if (is_lsi_offset(off_adj, 2)) { 1378 emit(A64_STR32I(tmp, dst_adj, off_adj), ctx); 1379 } else { 1380 emit_a64_mov_i(1, tmp2, off, ctx); 1381 emit(A64_STR32(tmp, dst, tmp2), ctx); 1382 } 1383 break; 1384 case BPF_H: 1385 if (is_lsi_offset(off_adj, 1)) { 1386 emit(A64_STRHI(tmp, dst_adj, off_adj), ctx); 1387 } else { 1388 emit_a64_mov_i(1, tmp2, off, ctx); 1389 emit(A64_STRH(tmp, dst, tmp2), ctx); 1390 } 1391 break; 1392 case BPF_B: 1393 if (is_lsi_offset(off_adj, 0)) { 1394 emit(A64_STRBI(tmp, dst_adj, off_adj), ctx); 1395 } else { 1396 emit_a64_mov_i(1, tmp2, off, ctx); 1397 emit(A64_STRB(tmp, dst, tmp2), ctx); 1398 } 1399 break; 1400 case BPF_DW: 1401 if (is_lsi_offset(off_adj, 3)) { 1402 emit(A64_STR64I(tmp, dst_adj, off_adj), ctx); 1403 } else { 1404 emit_a64_mov_i(1, tmp2, off, ctx); 1405 emit(A64_STR64(tmp, dst, tmp2), ctx); 1406 } 1407 break; 1408 } 1409 1410 ret = add_exception_handler(insn, ctx, dst); 1411 if (ret) 1412 return ret; 1413 break; 1414 1415 /* STX: *(size *)(dst + off) = src */ 1416 case BPF_STX | BPF_MEM | BPF_W: 1417 case BPF_STX | BPF_MEM | BPF_H: 1418 case BPF_STX | BPF_MEM | BPF_B: 1419 case BPF_STX | BPF_MEM | BPF_DW: 1420 case BPF_STX | BPF_PROBE_MEM32 | BPF_B: 1421 case BPF_STX | BPF_PROBE_MEM32 | BPF_H: 1422 case BPF_STX | BPF_PROBE_MEM32 | BPF_W: 1423 case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: 1424 if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { 1425 emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); 1426 dst = tmp2; 1427 } 1428 if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) { 1429 dst_adj = fpb; 1430 off_adj = off + ctx->fpb_offset; 1431 } else { 1432 dst_adj = dst; 1433 off_adj = off; 1434 } 1435 switch (BPF_SIZE(code)) { 1436 case BPF_W: 1437 if (is_lsi_offset(off_adj, 2)) { 1438 emit(A64_STR32I(src, dst_adj, off_adj), ctx); 1439 } else { 1440 emit_a64_mov_i(1, tmp, off, ctx); 1441 emit(A64_STR32(src, dst, tmp), ctx); 1442 } 1443 break; 1444 case BPF_H: 1445 if (is_lsi_offset(off_adj, 1)) { 1446 emit(A64_STRHI(src, dst_adj, off_adj), ctx); 1447 } else { 1448 emit_a64_mov_i(1, tmp, off, ctx); 1449 emit(A64_STRH(src, dst, tmp), ctx); 1450 } 1451 break; 1452 case BPF_B: 1453 if (is_lsi_offset(off_adj, 0)) { 1454 emit(A64_STRBI(src, dst_adj, off_adj), ctx); 1455 } else { 1456 emit_a64_mov_i(1, tmp, off, ctx); 1457 emit(A64_STRB(src, dst, tmp), ctx); 1458 } 1459 break; 1460 case BPF_DW: 1461 if (is_lsi_offset(off_adj, 3)) { 1462 emit(A64_STR64I(src, dst_adj, off_adj), ctx); 1463 } else { 1464 emit_a64_mov_i(1, tmp, off, ctx); 1465 emit(A64_STR64(src, dst, tmp), ctx); 1466 } 1467 break; 1468 } 1469 1470 ret = add_exception_handler(insn, ctx, dst); 1471 if (ret) 1472 return ret; 1473 break; 1474 1475 case BPF_STX | BPF_ATOMIC | BPF_W: 1476 case BPF_STX | BPF_ATOMIC | BPF_DW: 1477 if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) 1478 ret = emit_lse_atomic(insn, ctx); 1479 else 1480 ret = emit_ll_sc_atomic(insn, ctx); 1481 if (ret) 1482 return ret; 1483 break; 1484 1485 default: 1486 pr_err_once("unknown opcode %02x\n", code); 1487 return -EINVAL; 1488 } 1489 1490 return 0; 1491 } 1492 1493 /* 1494 * Return 0 if FP may change at runtime, otherwise find the minimum negative 1495 * offset to FP, converts it to positive number, and align down to 8 bytes. 1496 */ 1497 static int find_fpb_offset(struct bpf_prog *prog) 1498 { 1499 int i; 1500 int offset = 0; 1501 1502 for (i = 0; i < prog->len; i++) { 1503 const struct bpf_insn *insn = &prog->insnsi[i]; 1504 const u8 class = BPF_CLASS(insn->code); 1505 const u8 mode = BPF_MODE(insn->code); 1506 const u8 src = insn->src_reg; 1507 const u8 dst = insn->dst_reg; 1508 const s32 imm = insn->imm; 1509 const s16 off = insn->off; 1510 1511 switch (class) { 1512 case BPF_STX: 1513 case BPF_ST: 1514 /* fp holds atomic operation result */ 1515 if (class == BPF_STX && mode == BPF_ATOMIC && 1516 ((imm == BPF_XCHG || 1517 imm == (BPF_FETCH | BPF_ADD) || 1518 imm == (BPF_FETCH | BPF_AND) || 1519 imm == (BPF_FETCH | BPF_XOR) || 1520 imm == (BPF_FETCH | BPF_OR)) && 1521 src == BPF_REG_FP)) 1522 return 0; 1523 1524 if (mode == BPF_MEM && dst == BPF_REG_FP && 1525 off < offset) 1526 offset = insn->off; 1527 break; 1528 1529 case BPF_JMP32: 1530 case BPF_JMP: 1531 break; 1532 1533 case BPF_LDX: 1534 case BPF_LD: 1535 /* fp holds load result */ 1536 if (dst == BPF_REG_FP) 1537 return 0; 1538 1539 if (class == BPF_LDX && mode == BPF_MEM && 1540 src == BPF_REG_FP && off < offset) 1541 offset = off; 1542 break; 1543 1544 case BPF_ALU: 1545 case BPF_ALU64: 1546 default: 1547 /* fp holds ALU result */ 1548 if (dst == BPF_REG_FP) 1549 return 0; 1550 } 1551 } 1552 1553 if (offset < 0) { 1554 /* 1555 * safely be converted to a positive 'int', since insn->off 1556 * is 's16' 1557 */ 1558 offset = -offset; 1559 /* align down to 8 bytes */ 1560 offset = ALIGN_DOWN(offset, 8); 1561 } 1562 1563 return offset; 1564 } 1565 1566 static int build_body(struct jit_ctx *ctx, bool extra_pass) 1567 { 1568 const struct bpf_prog *prog = ctx->prog; 1569 int i; 1570 1571 /* 1572 * - offset[0] offset of the end of prologue, 1573 * start of the 1st instruction. 1574 * - offset[1] - offset of the end of 1st instruction, 1575 * start of the 2nd instruction 1576 * [....] 1577 * - offset[3] - offset of the end of 3rd instruction, 1578 * start of 4th instruction 1579 */ 1580 for (i = 0; i < prog->len; i++) { 1581 const struct bpf_insn *insn = &prog->insnsi[i]; 1582 int ret; 1583 1584 if (ctx->image == NULL) 1585 ctx->offset[i] = ctx->idx; 1586 ret = build_insn(insn, ctx, extra_pass); 1587 if (ret > 0) { 1588 i++; 1589 if (ctx->image == NULL) 1590 ctx->offset[i] = ctx->idx; 1591 continue; 1592 } 1593 if (ret) 1594 return ret; 1595 } 1596 /* 1597 * offset is allocated with prog->len + 1 so fill in 1598 * the last element with the offset after the last 1599 * instruction (end of program) 1600 */ 1601 if (ctx->image == NULL) 1602 ctx->offset[i] = ctx->idx; 1603 1604 return 0; 1605 } 1606 1607 static int validate_code(struct jit_ctx *ctx) 1608 { 1609 int i; 1610 1611 for (i = 0; i < ctx->idx; i++) { 1612 u32 a64_insn = le32_to_cpu(ctx->image[i]); 1613 1614 if (a64_insn == AARCH64_BREAK_FAULT) 1615 return -1; 1616 } 1617 return 0; 1618 } 1619 1620 static int validate_ctx(struct jit_ctx *ctx) 1621 { 1622 if (validate_code(ctx)) 1623 return -1; 1624 1625 if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries)) 1626 return -1; 1627 1628 return 0; 1629 } 1630 1631 static inline void bpf_flush_icache(void *start, void *end) 1632 { 1633 flush_icache_range((unsigned long)start, (unsigned long)end); 1634 } 1635 1636 struct arm64_jit_data { 1637 struct bpf_binary_header *header; 1638 u8 *ro_image; 1639 struct bpf_binary_header *ro_header; 1640 struct jit_ctx ctx; 1641 }; 1642 1643 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1644 { 1645 int image_size, prog_size, extable_size, extable_align, extable_offset; 1646 struct bpf_prog *tmp, *orig_prog = prog; 1647 struct bpf_binary_header *header; 1648 struct bpf_binary_header *ro_header; 1649 struct arm64_jit_data *jit_data; 1650 bool was_classic = bpf_prog_was_classic(prog); 1651 bool tmp_blinded = false; 1652 bool extra_pass = false; 1653 struct jit_ctx ctx; 1654 u64 arena_vm_start; 1655 u8 *image_ptr; 1656 u8 *ro_image_ptr; 1657 1658 if (!prog->jit_requested) 1659 return orig_prog; 1660 1661 tmp = bpf_jit_blind_constants(prog); 1662 /* If blinding was requested and we failed during blinding, 1663 * we must fall back to the interpreter. 1664 */ 1665 if (IS_ERR(tmp)) 1666 return orig_prog; 1667 if (tmp != prog) { 1668 tmp_blinded = true; 1669 prog = tmp; 1670 } 1671 1672 arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena); 1673 jit_data = prog->aux->jit_data; 1674 if (!jit_data) { 1675 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 1676 if (!jit_data) { 1677 prog = orig_prog; 1678 goto out; 1679 } 1680 prog->aux->jit_data = jit_data; 1681 } 1682 if (jit_data->ctx.offset) { 1683 ctx = jit_data->ctx; 1684 ro_image_ptr = jit_data->ro_image; 1685 ro_header = jit_data->ro_header; 1686 header = jit_data->header; 1687 image_ptr = (void *)header + ((void *)ro_image_ptr 1688 - (void *)ro_header); 1689 extra_pass = true; 1690 prog_size = sizeof(u32) * ctx.idx; 1691 goto skip_init_ctx; 1692 } 1693 memset(&ctx, 0, sizeof(ctx)); 1694 ctx.prog = prog; 1695 1696 ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL); 1697 if (ctx.offset == NULL) { 1698 prog = orig_prog; 1699 goto out_off; 1700 } 1701 1702 ctx.fpb_offset = find_fpb_offset(prog); 1703 ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena); 1704 1705 /* 1706 * 1. Initial fake pass to compute ctx->idx and ctx->offset. 1707 * 1708 * BPF line info needs ctx->offset[i] to be the offset of 1709 * instruction[i] in jited image, so build prologue first. 1710 */ 1711 if (build_prologue(&ctx, was_classic, prog->aux->exception_cb, 1712 arena_vm_start)) { 1713 prog = orig_prog; 1714 goto out_off; 1715 } 1716 1717 if (build_body(&ctx, extra_pass)) { 1718 prog = orig_prog; 1719 goto out_off; 1720 } 1721 1722 ctx.epilogue_offset = ctx.idx; 1723 build_epilogue(&ctx, prog->aux->exception_cb); 1724 build_plt(&ctx); 1725 1726 extable_align = __alignof__(struct exception_table_entry); 1727 extable_size = prog->aux->num_exentries * 1728 sizeof(struct exception_table_entry); 1729 1730 /* Now we know the actual image size. */ 1731 prog_size = sizeof(u32) * ctx.idx; 1732 /* also allocate space for plt target */ 1733 extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align); 1734 image_size = extable_offset + extable_size; 1735 ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr, 1736 sizeof(u32), &header, &image_ptr, 1737 jit_fill_hole); 1738 if (!ro_header) { 1739 prog = orig_prog; 1740 goto out_off; 1741 } 1742 1743 /* 2. Now, the actual pass. */ 1744 1745 /* 1746 * Use the image(RW) for writing the JITed instructions. But also save 1747 * the ro_image(RX) for calculating the offsets in the image. The RW 1748 * image will be later copied to the RX image from where the program 1749 * will run. The bpf_jit_binary_pack_finalize() will do this copy in the 1750 * final step. 1751 */ 1752 ctx.image = (__le32 *)image_ptr; 1753 ctx.ro_image = (__le32 *)ro_image_ptr; 1754 if (extable_size) 1755 prog->aux->extable = (void *)ro_image_ptr + extable_offset; 1756 skip_init_ctx: 1757 ctx.idx = 0; 1758 ctx.exentry_idx = 0; 1759 1760 build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start); 1761 1762 if (build_body(&ctx, extra_pass)) { 1763 prog = orig_prog; 1764 goto out_free_hdr; 1765 } 1766 1767 build_epilogue(&ctx, prog->aux->exception_cb); 1768 build_plt(&ctx); 1769 1770 /* 3. Extra pass to validate JITed code. */ 1771 if (validate_ctx(&ctx)) { 1772 prog = orig_prog; 1773 goto out_free_hdr; 1774 } 1775 1776 /* And we're done. */ 1777 if (bpf_jit_enable > 1) 1778 bpf_jit_dump(prog->len, prog_size, 2, ctx.image); 1779 1780 if (!prog->is_func || extra_pass) { 1781 if (extra_pass && ctx.idx != jit_data->ctx.idx) { 1782 pr_err_once("multi-func JIT bug %d != %d\n", 1783 ctx.idx, jit_data->ctx.idx); 1784 prog->bpf_func = NULL; 1785 prog->jited = 0; 1786 prog->jited_len = 0; 1787 goto out_free_hdr; 1788 } 1789 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, ro_header, 1790 header))) { 1791 /* ro_header has been freed */ 1792 ro_header = NULL; 1793 prog = orig_prog; 1794 goto out_off; 1795 } 1796 /* 1797 * The instructions have now been copied to the ROX region from 1798 * where they will execute. Now the data cache has to be cleaned to 1799 * the PoU and the I-cache has to be invalidated for the VAs. 1800 */ 1801 bpf_flush_icache(ro_header, ctx.ro_image + ctx.idx); 1802 } else { 1803 jit_data->ctx = ctx; 1804 jit_data->ro_image = ro_image_ptr; 1805 jit_data->header = header; 1806 jit_data->ro_header = ro_header; 1807 } 1808 1809 prog->bpf_func = (void *)ctx.ro_image; 1810 prog->jited = 1; 1811 prog->jited_len = prog_size; 1812 1813 if (!prog->is_func || extra_pass) { 1814 int i; 1815 1816 /* offset[prog->len] is the size of program */ 1817 for (i = 0; i <= prog->len; i++) 1818 ctx.offset[i] *= AARCH64_INSN_SIZE; 1819 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); 1820 out_off: 1821 kvfree(ctx.offset); 1822 kfree(jit_data); 1823 prog->aux->jit_data = NULL; 1824 } 1825 out: 1826 if (tmp_blinded) 1827 bpf_jit_prog_release_other(prog, prog == orig_prog ? 1828 tmp : orig_prog); 1829 return prog; 1830 1831 out_free_hdr: 1832 if (header) { 1833 bpf_arch_text_copy(&ro_header->size, &header->size, 1834 sizeof(header->size)); 1835 bpf_jit_binary_pack_free(ro_header, header); 1836 } 1837 goto out_off; 1838 } 1839 1840 bool bpf_jit_supports_kfunc_call(void) 1841 { 1842 return true; 1843 } 1844 1845 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 1846 { 1847 if (!aarch64_insn_copy(dst, src, len)) 1848 return ERR_PTR(-EINVAL); 1849 return dst; 1850 } 1851 1852 u64 bpf_jit_alloc_exec_limit(void) 1853 { 1854 return VMALLOC_END - VMALLOC_START; 1855 } 1856 1857 void *bpf_jit_alloc_exec(unsigned long size) 1858 { 1859 /* Memory is intended to be executable, reset the pointer tag. */ 1860 return kasan_reset_tag(vmalloc(size)); 1861 } 1862 1863 void bpf_jit_free_exec(void *addr) 1864 { 1865 return vfree(addr); 1866 } 1867 1868 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ 1869 bool bpf_jit_supports_subprog_tailcalls(void) 1870 { 1871 return true; 1872 } 1873 1874 static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, 1875 int args_off, int retval_off, int run_ctx_off, 1876 bool save_ret) 1877 { 1878 __le32 *branch; 1879 u64 enter_prog; 1880 u64 exit_prog; 1881 struct bpf_prog *p = l->link.prog; 1882 int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 1883 1884 enter_prog = (u64)bpf_trampoline_enter(p); 1885 exit_prog = (u64)bpf_trampoline_exit(p); 1886 1887 if (l->cookie == 0) { 1888 /* if cookie is zero, one instruction is enough to store it */ 1889 emit(A64_STR64I(A64_ZR, A64_SP, run_ctx_off + cookie_off), ctx); 1890 } else { 1891 emit_a64_mov_i64(A64_R(10), l->cookie, ctx); 1892 emit(A64_STR64I(A64_R(10), A64_SP, run_ctx_off + cookie_off), 1893 ctx); 1894 } 1895 1896 /* save p to callee saved register x19 to avoid loading p with mov_i64 1897 * each time. 1898 */ 1899 emit_addr_mov_i64(A64_R(19), (const u64)p, ctx); 1900 1901 /* arg1: prog */ 1902 emit(A64_MOV(1, A64_R(0), A64_R(19)), ctx); 1903 /* arg2: &run_ctx */ 1904 emit(A64_ADD_I(1, A64_R(1), A64_SP, run_ctx_off), ctx); 1905 1906 emit_call(enter_prog, ctx); 1907 1908 /* save return value to callee saved register x20 */ 1909 emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx); 1910 1911 /* if (__bpf_prog_enter(prog) == 0) 1912 * goto skip_exec_of_prog; 1913 */ 1914 branch = ctx->image + ctx->idx; 1915 emit(A64_NOP, ctx); 1916 1917 emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx); 1918 if (!p->jited) 1919 emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx); 1920 1921 emit_call((const u64)p->bpf_func, ctx); 1922 1923 if (save_ret) 1924 emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx); 1925 1926 if (ctx->image) { 1927 int offset = &ctx->image[ctx->idx] - branch; 1928 *branch = cpu_to_le32(A64_CBZ(1, A64_R(0), offset)); 1929 } 1930 1931 /* arg1: prog */ 1932 emit(A64_MOV(1, A64_R(0), A64_R(19)), ctx); 1933 /* arg2: start time */ 1934 emit(A64_MOV(1, A64_R(1), A64_R(20)), ctx); 1935 /* arg3: &run_ctx */ 1936 emit(A64_ADD_I(1, A64_R(2), A64_SP, run_ctx_off), ctx); 1937 1938 emit_call(exit_prog, ctx); 1939 } 1940 1941 static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl, 1942 int args_off, int retval_off, int run_ctx_off, 1943 __le32 **branches) 1944 { 1945 int i; 1946 1947 /* The first fmod_ret program will receive a garbage return value. 1948 * Set this to 0 to avoid confusing the program. 1949 */ 1950 emit(A64_STR64I(A64_ZR, A64_SP, retval_off), ctx); 1951 for (i = 0; i < tl->nr_links; i++) { 1952 invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, 1953 run_ctx_off, true); 1954 /* if (*(u64 *)(sp + retval_off) != 0) 1955 * goto do_fexit; 1956 */ 1957 emit(A64_LDR64I(A64_R(10), A64_SP, retval_off), ctx); 1958 /* Save the location of branch, and generate a nop. 1959 * This nop will be replaced with a cbnz later. 1960 */ 1961 branches[i] = ctx->image + ctx->idx; 1962 emit(A64_NOP, ctx); 1963 } 1964 } 1965 1966 static void save_args(struct jit_ctx *ctx, int args_off, int nregs) 1967 { 1968 int i; 1969 1970 for (i = 0; i < nregs; i++) { 1971 emit(A64_STR64I(i, A64_SP, args_off), ctx); 1972 args_off += 8; 1973 } 1974 } 1975 1976 static void restore_args(struct jit_ctx *ctx, int args_off, int nregs) 1977 { 1978 int i; 1979 1980 for (i = 0; i < nregs; i++) { 1981 emit(A64_LDR64I(i, A64_SP, args_off), ctx); 1982 args_off += 8; 1983 } 1984 } 1985 1986 /* Based on the x86's implementation of arch_prepare_bpf_trampoline(). 1987 * 1988 * bpf prog and function entry before bpf trampoline hooked: 1989 * mov x9, lr 1990 * nop 1991 * 1992 * bpf prog and function entry after bpf trampoline hooked: 1993 * mov x9, lr 1994 * bl <bpf_trampoline or plt> 1995 * 1996 */ 1997 static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, 1998 struct bpf_tramp_links *tlinks, void *func_addr, 1999 int nregs, u32 flags) 2000 { 2001 int i; 2002 int stack_size; 2003 int retaddr_off; 2004 int regs_off; 2005 int retval_off; 2006 int args_off; 2007 int nregs_off; 2008 int ip_off; 2009 int run_ctx_off; 2010 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2011 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2012 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2013 bool save_ret; 2014 __le32 **branches = NULL; 2015 2016 /* trampoline stack layout: 2017 * [ parent ip ] 2018 * [ FP ] 2019 * SP + retaddr_off [ self ip ] 2020 * [ FP ] 2021 * 2022 * [ padding ] align SP to multiples of 16 2023 * 2024 * [ x20 ] callee saved reg x20 2025 * SP + regs_off [ x19 ] callee saved reg x19 2026 * 2027 * SP + retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or 2028 * BPF_TRAMP_F_RET_FENTRY_RET 2029 * 2030 * [ arg reg N ] 2031 * [ ... ] 2032 * SP + args_off [ arg reg 1 ] 2033 * 2034 * SP + nregs_off [ arg regs count ] 2035 * 2036 * SP + ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2037 * 2038 * SP + run_ctx_off [ bpf_tramp_run_ctx ] 2039 */ 2040 2041 stack_size = 0; 2042 run_ctx_off = stack_size; 2043 /* room for bpf_tramp_run_ctx */ 2044 stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8); 2045 2046 ip_off = stack_size; 2047 /* room for IP address argument */ 2048 if (flags & BPF_TRAMP_F_IP_ARG) 2049 stack_size += 8; 2050 2051 nregs_off = stack_size; 2052 /* room for args count */ 2053 stack_size += 8; 2054 2055 args_off = stack_size; 2056 /* room for args */ 2057 stack_size += nregs * 8; 2058 2059 /* room for return value */ 2060 retval_off = stack_size; 2061 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2062 if (save_ret) 2063 stack_size += 8; 2064 2065 /* room for callee saved registers, currently x19 and x20 are used */ 2066 regs_off = stack_size; 2067 stack_size += 16; 2068 2069 /* round up to multiples of 16 to avoid SPAlignmentFault */ 2070 stack_size = round_up(stack_size, 16); 2071 2072 /* return address locates above FP */ 2073 retaddr_off = stack_size + 8; 2074 2075 /* bpf trampoline may be invoked by 3 instruction types: 2076 * 1. bl, attached to bpf prog or kernel function via short jump 2077 * 2. br, attached to bpf prog or kernel function via long jump 2078 * 3. blr, working as a function pointer, used by struct_ops. 2079 * So BTI_JC should used here to support both br and blr. 2080 */ 2081 emit_bti(A64_BTI_JC, ctx); 2082 2083 /* frame for parent function */ 2084 emit(A64_PUSH(A64_FP, A64_R(9), A64_SP), ctx); 2085 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 2086 2087 /* frame for patched function */ 2088 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 2089 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 2090 2091 /* allocate stack space */ 2092 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); 2093 2094 if (flags & BPF_TRAMP_F_IP_ARG) { 2095 /* save ip address of the traced function */ 2096 emit_addr_mov_i64(A64_R(10), (const u64)func_addr, ctx); 2097 emit(A64_STR64I(A64_R(10), A64_SP, ip_off), ctx); 2098 } 2099 2100 /* save arg regs count*/ 2101 emit(A64_MOVZ(1, A64_R(10), nregs, 0), ctx); 2102 emit(A64_STR64I(A64_R(10), A64_SP, nregs_off), ctx); 2103 2104 /* save arg regs */ 2105 save_args(ctx, args_off, nregs); 2106 2107 /* save callee saved registers */ 2108 emit(A64_STR64I(A64_R(19), A64_SP, regs_off), ctx); 2109 emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx); 2110 2111 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2112 emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); 2113 emit_call((const u64)__bpf_tramp_enter, ctx); 2114 } 2115 2116 for (i = 0; i < fentry->nr_links; i++) 2117 invoke_bpf_prog(ctx, fentry->links[i], args_off, 2118 retval_off, run_ctx_off, 2119 flags & BPF_TRAMP_F_RET_FENTRY_RET); 2120 2121 if (fmod_ret->nr_links) { 2122 branches = kcalloc(fmod_ret->nr_links, sizeof(__le32 *), 2123 GFP_KERNEL); 2124 if (!branches) 2125 return -ENOMEM; 2126 2127 invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off, 2128 run_ctx_off, branches); 2129 } 2130 2131 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2132 restore_args(ctx, args_off, nregs); 2133 /* call original func */ 2134 emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx); 2135 emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx); 2136 emit(A64_RET(A64_R(10)), ctx); 2137 /* store return value */ 2138 emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx); 2139 /* reserve a nop for bpf_tramp_image_put */ 2140 im->ip_after_call = ctx->ro_image + ctx->idx; 2141 emit(A64_NOP, ctx); 2142 } 2143 2144 /* update the branches saved in invoke_bpf_mod_ret with cbnz */ 2145 for (i = 0; i < fmod_ret->nr_links && ctx->image != NULL; i++) { 2146 int offset = &ctx->image[ctx->idx] - branches[i]; 2147 *branches[i] = cpu_to_le32(A64_CBNZ(1, A64_R(10), offset)); 2148 } 2149 2150 for (i = 0; i < fexit->nr_links; i++) 2151 invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, 2152 run_ctx_off, false); 2153 2154 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2155 im->ip_epilogue = ctx->ro_image + ctx->idx; 2156 emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); 2157 emit_call((const u64)__bpf_tramp_exit, ctx); 2158 } 2159 2160 if (flags & BPF_TRAMP_F_RESTORE_REGS) 2161 restore_args(ctx, args_off, nregs); 2162 2163 /* restore callee saved register x19 and x20 */ 2164 emit(A64_LDR64I(A64_R(19), A64_SP, regs_off), ctx); 2165 emit(A64_LDR64I(A64_R(20), A64_SP, regs_off + 8), ctx); 2166 2167 if (save_ret) 2168 emit(A64_LDR64I(A64_R(0), A64_SP, retval_off), ctx); 2169 2170 /* reset SP */ 2171 emit(A64_MOV(1, A64_SP, A64_FP), ctx); 2172 2173 /* pop frames */ 2174 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); 2175 emit(A64_POP(A64_FP, A64_R(9), A64_SP), ctx); 2176 2177 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 2178 /* skip patched function, return to parent */ 2179 emit(A64_MOV(1, A64_LR, A64_R(9)), ctx); 2180 emit(A64_RET(A64_R(9)), ctx); 2181 } else { 2182 /* return to patched function */ 2183 emit(A64_MOV(1, A64_R(10), A64_LR), ctx); 2184 emit(A64_MOV(1, A64_LR, A64_R(9)), ctx); 2185 emit(A64_RET(A64_R(10)), ctx); 2186 } 2187 2188 kfree(branches); 2189 2190 return ctx->idx; 2191 } 2192 2193 static int btf_func_model_nregs(const struct btf_func_model *m) 2194 { 2195 int nregs = m->nr_args; 2196 int i; 2197 2198 /* extra registers needed for struct argument */ 2199 for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) { 2200 /* The arg_size is at most 16 bytes, enforced by the verifier. */ 2201 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) 2202 nregs += (m->arg_size[i] + 7) / 8 - 1; 2203 } 2204 2205 return nregs; 2206 } 2207 2208 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, 2209 struct bpf_tramp_links *tlinks, void *func_addr) 2210 { 2211 struct jit_ctx ctx = { 2212 .image = NULL, 2213 .idx = 0, 2214 }; 2215 struct bpf_tramp_image im; 2216 int nregs, ret; 2217 2218 nregs = btf_func_model_nregs(m); 2219 /* the first 8 registers are used for arguments */ 2220 if (nregs > 8) 2221 return -ENOTSUPP; 2222 2223 ret = prepare_trampoline(&ctx, &im, tlinks, func_addr, nregs, flags); 2224 if (ret < 0) 2225 return ret; 2226 2227 return ret < 0 ? ret : ret * AARCH64_INSN_SIZE; 2228 } 2229 2230 void *arch_alloc_bpf_trampoline(unsigned int size) 2231 { 2232 return bpf_prog_pack_alloc(size, jit_fill_hole); 2233 } 2234 2235 void arch_free_bpf_trampoline(void *image, unsigned int size) 2236 { 2237 bpf_prog_pack_free(image, size); 2238 } 2239 2240 int arch_protect_bpf_trampoline(void *image, unsigned int size) 2241 { 2242 return 0; 2243 } 2244 2245 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, 2246 void *ro_image_end, const struct btf_func_model *m, 2247 u32 flags, struct bpf_tramp_links *tlinks, 2248 void *func_addr) 2249 { 2250 int ret, nregs; 2251 void *image, *tmp; 2252 u32 size = ro_image_end - ro_image; 2253 2254 /* image doesn't need to be in module memory range, so we can 2255 * use kvmalloc. 2256 */ 2257 image = kvmalloc(size, GFP_KERNEL); 2258 if (!image) 2259 return -ENOMEM; 2260 2261 struct jit_ctx ctx = { 2262 .image = image, 2263 .ro_image = ro_image, 2264 .idx = 0, 2265 }; 2266 2267 nregs = btf_func_model_nregs(m); 2268 /* the first 8 registers are used for arguments */ 2269 if (nregs > 8) 2270 return -ENOTSUPP; 2271 2272 jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image)); 2273 ret = prepare_trampoline(&ctx, im, tlinks, func_addr, nregs, flags); 2274 2275 if (ret > 0 && validate_code(&ctx) < 0) { 2276 ret = -EINVAL; 2277 goto out; 2278 } 2279 2280 if (ret > 0) 2281 ret *= AARCH64_INSN_SIZE; 2282 2283 tmp = bpf_arch_text_copy(ro_image, image, size); 2284 if (IS_ERR(tmp)) { 2285 ret = PTR_ERR(tmp); 2286 goto out; 2287 } 2288 2289 bpf_flush_icache(ro_image, ro_image + size); 2290 out: 2291 kvfree(image); 2292 return ret; 2293 } 2294 2295 static bool is_long_jump(void *ip, void *target) 2296 { 2297 long offset; 2298 2299 /* NULL target means this is a NOP */ 2300 if (!target) 2301 return false; 2302 2303 offset = (long)target - (long)ip; 2304 return offset < -SZ_128M || offset >= SZ_128M; 2305 } 2306 2307 static int gen_branch_or_nop(enum aarch64_insn_branch_type type, void *ip, 2308 void *addr, void *plt, u32 *insn) 2309 { 2310 void *target; 2311 2312 if (!addr) { 2313 *insn = aarch64_insn_gen_nop(); 2314 return 0; 2315 } 2316 2317 if (is_long_jump(ip, addr)) 2318 target = plt; 2319 else 2320 target = addr; 2321 2322 *insn = aarch64_insn_gen_branch_imm((unsigned long)ip, 2323 (unsigned long)target, 2324 type); 2325 2326 return *insn != AARCH64_BREAK_FAULT ? 0 : -EFAULT; 2327 } 2328 2329 /* Replace the branch instruction from @ip to @old_addr in a bpf prog or a bpf 2330 * trampoline with the branch instruction from @ip to @new_addr. If @old_addr 2331 * or @new_addr is NULL, the old or new instruction is NOP. 2332 * 2333 * When @ip is the bpf prog entry, a bpf trampoline is being attached or 2334 * detached. Since bpf trampoline and bpf prog are allocated separately with 2335 * vmalloc, the address distance may exceed 128MB, the maximum branch range. 2336 * So long jump should be handled. 2337 * 2338 * When a bpf prog is constructed, a plt pointing to empty trampoline 2339 * dummy_tramp is placed at the end: 2340 * 2341 * bpf_prog: 2342 * mov x9, lr 2343 * nop // patchsite 2344 * ... 2345 * ret 2346 * 2347 * plt: 2348 * ldr x10, target 2349 * br x10 2350 * target: 2351 * .quad dummy_tramp // plt target 2352 * 2353 * This is also the state when no trampoline is attached. 2354 * 2355 * When a short-jump bpf trampoline is attached, the patchsite is patched 2356 * to a bl instruction to the trampoline directly: 2357 * 2358 * bpf_prog: 2359 * mov x9, lr 2360 * bl <short-jump bpf trampoline address> // patchsite 2361 * ... 2362 * ret 2363 * 2364 * plt: 2365 * ldr x10, target 2366 * br x10 2367 * target: 2368 * .quad dummy_tramp // plt target 2369 * 2370 * When a long-jump bpf trampoline is attached, the plt target is filled with 2371 * the trampoline address and the patchsite is patched to a bl instruction to 2372 * the plt: 2373 * 2374 * bpf_prog: 2375 * mov x9, lr 2376 * bl plt // patchsite 2377 * ... 2378 * ret 2379 * 2380 * plt: 2381 * ldr x10, target 2382 * br x10 2383 * target: 2384 * .quad <long-jump bpf trampoline address> // plt target 2385 * 2386 * The dummy_tramp is used to prevent another CPU from jumping to unknown 2387 * locations during the patching process, making the patching process easier. 2388 */ 2389 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, 2390 void *old_addr, void *new_addr) 2391 { 2392 int ret; 2393 u32 old_insn; 2394 u32 new_insn; 2395 u32 replaced; 2396 struct bpf_plt *plt = NULL; 2397 unsigned long size = 0UL; 2398 unsigned long offset = ~0UL; 2399 enum aarch64_insn_branch_type branch_type; 2400 char namebuf[KSYM_NAME_LEN]; 2401 void *image = NULL; 2402 u64 plt_target = 0ULL; 2403 bool poking_bpf_entry; 2404 2405 if (!__bpf_address_lookup((unsigned long)ip, &size, &offset, namebuf)) 2406 /* Only poking bpf text is supported. Since kernel function 2407 * entry is set up by ftrace, we reply on ftrace to poke kernel 2408 * functions. 2409 */ 2410 return -ENOTSUPP; 2411 2412 image = ip - offset; 2413 /* zero offset means we're poking bpf prog entry */ 2414 poking_bpf_entry = (offset == 0UL); 2415 2416 /* bpf prog entry, find plt and the real patchsite */ 2417 if (poking_bpf_entry) { 2418 /* plt locates at the end of bpf prog */ 2419 plt = image + size - PLT_TARGET_OFFSET; 2420 2421 /* skip to the nop instruction in bpf prog entry: 2422 * bti c // if BTI enabled 2423 * mov x9, x30 2424 * nop 2425 */ 2426 ip = image + POKE_OFFSET * AARCH64_INSN_SIZE; 2427 } 2428 2429 /* long jump is only possible at bpf prog entry */ 2430 if (WARN_ON((is_long_jump(ip, new_addr) || is_long_jump(ip, old_addr)) && 2431 !poking_bpf_entry)) 2432 return -EINVAL; 2433 2434 if (poke_type == BPF_MOD_CALL) 2435 branch_type = AARCH64_INSN_BRANCH_LINK; 2436 else 2437 branch_type = AARCH64_INSN_BRANCH_NOLINK; 2438 2439 if (gen_branch_or_nop(branch_type, ip, old_addr, plt, &old_insn) < 0) 2440 return -EFAULT; 2441 2442 if (gen_branch_or_nop(branch_type, ip, new_addr, plt, &new_insn) < 0) 2443 return -EFAULT; 2444 2445 if (is_long_jump(ip, new_addr)) 2446 plt_target = (u64)new_addr; 2447 else if (is_long_jump(ip, old_addr)) 2448 /* if the old target is a long jump and the new target is not, 2449 * restore the plt target to dummy_tramp, so there is always a 2450 * legal and harmless address stored in plt target, and we'll 2451 * never jump from plt to an unknown place. 2452 */ 2453 plt_target = (u64)&dummy_tramp; 2454 2455 if (plt_target) { 2456 /* non-zero plt_target indicates we're patching a bpf prog, 2457 * which is read only. 2458 */ 2459 if (set_memory_rw(PAGE_MASK & ((uintptr_t)&plt->target), 1)) 2460 return -EFAULT; 2461 WRITE_ONCE(plt->target, plt_target); 2462 set_memory_ro(PAGE_MASK & ((uintptr_t)&plt->target), 1); 2463 /* since plt target points to either the new trampoline 2464 * or dummy_tramp, even if another CPU reads the old plt 2465 * target value before fetching the bl instruction to plt, 2466 * it will be brought back by dummy_tramp, so no barrier is 2467 * required here. 2468 */ 2469 } 2470 2471 /* if the old target and the new target are both long jumps, no 2472 * patching is required 2473 */ 2474 if (old_insn == new_insn) 2475 return 0; 2476 2477 mutex_lock(&text_mutex); 2478 if (aarch64_insn_read(ip, &replaced)) { 2479 ret = -EFAULT; 2480 goto out; 2481 } 2482 2483 if (replaced != old_insn) { 2484 ret = -EFAULT; 2485 goto out; 2486 } 2487 2488 /* We call aarch64_insn_patch_text_nosync() to replace instruction 2489 * atomically, so no other CPUs will fetch a half-new and half-old 2490 * instruction. But there is chance that another CPU executes the 2491 * old instruction after the patching operation finishes (e.g., 2492 * pipeline not flushed, or icache not synchronized yet). 2493 * 2494 * 1. when a new trampoline is attached, it is not a problem for 2495 * different CPUs to jump to different trampolines temporarily. 2496 * 2497 * 2. when an old trampoline is freed, we should wait for all other 2498 * CPUs to exit the trampoline and make sure the trampoline is no 2499 * longer reachable, since bpf_tramp_image_put() function already 2500 * uses percpu_ref and task-based rcu to do the sync, no need to call 2501 * the sync version here, see bpf_tramp_image_put() for details. 2502 */ 2503 ret = aarch64_insn_patch_text_nosync(ip, new_insn); 2504 out: 2505 mutex_unlock(&text_mutex); 2506 2507 return ret; 2508 } 2509 2510 bool bpf_jit_supports_ptr_xchg(void) 2511 { 2512 return true; 2513 } 2514 2515 bool bpf_jit_supports_exceptions(void) 2516 { 2517 /* We unwind through both kernel frames starting from within bpf_throw 2518 * call and BPF frames. Therefore we require FP unwinder to be enabled 2519 * to walk kernel frames and reach BPF frames in the stack trace. 2520 * ARM64 kernel is aways compiled with CONFIG_FRAME_POINTER=y 2521 */ 2522 return true; 2523 } 2524 2525 bool bpf_jit_supports_arena(void) 2526 { 2527 return true; 2528 } 2529 2530 void bpf_jit_free(struct bpf_prog *prog) 2531 { 2532 if (prog->jited) { 2533 struct arm64_jit_data *jit_data = prog->aux->jit_data; 2534 struct bpf_binary_header *hdr; 2535 2536 /* 2537 * If we fail the final pass of JIT (from jit_subprogs), 2538 * the program may not be finalized yet. Call finalize here 2539 * before freeing it. 2540 */ 2541 if (jit_data) { 2542 bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size, 2543 sizeof(jit_data->header->size)); 2544 kfree(jit_data); 2545 } 2546 hdr = bpf_jit_binary_pack_hdr(prog); 2547 bpf_jit_binary_pack_free(hdr, NULL); 2548 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); 2549 } 2550 2551 bpf_prog_unlock_free(prog); 2552 } 2553