1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <asm/extable.h> 15 #include <asm/ftrace.h> 16 #include <asm/set_memory.h> 17 #include <asm/nospec-branch.h> 18 #include <asm/text-patching.h> 19 #include <asm/unwind.h> 20 #include <asm/cfi.h> 21 22 static bool all_callee_regs_used[4] = {true, true, true, true}; 23 24 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 25 { 26 if (len == 1) 27 *ptr = bytes; 28 else if (len == 2) 29 *(u16 *)ptr = bytes; 30 else { 31 *(u32 *)ptr = bytes; 32 barrier(); 33 } 34 return ptr + len; 35 } 36 37 #define EMIT(bytes, len) \ 38 do { prog = emit_code(prog, bytes, len); } while (0) 39 40 #define EMIT1(b1) EMIT(b1, 1) 41 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 42 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 43 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 44 45 #define EMIT1_off32(b1, off) \ 46 do { EMIT1(b1); EMIT(off, 4); } while (0) 47 #define EMIT2_off32(b1, b2, off) \ 48 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 49 #define EMIT3_off32(b1, b2, b3, off) \ 50 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 51 #define EMIT4_off32(b1, b2, b3, b4, off) \ 52 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 53 54 #ifdef CONFIG_X86_KERNEL_IBT 55 #define EMIT_ENDBR() EMIT(gen_endbr(), 4) 56 #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4) 57 #else 58 #define EMIT_ENDBR() 59 #define EMIT_ENDBR_POISON() 60 #endif 61 62 static bool is_imm8(int value) 63 { 64 return value <= 127 && value >= -128; 65 } 66 67 static bool is_simm32(s64 value) 68 { 69 return value == (s64)(s32)value; 70 } 71 72 static bool is_uimm32(u64 value) 73 { 74 return value == (u64)(u32)value; 75 } 76 77 /* mov dst, src */ 78 #define EMIT_mov(DST, SRC) \ 79 do { \ 80 if (DST != SRC) \ 81 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 82 } while (0) 83 84 static int bpf_size_to_x86_bytes(int bpf_size) 85 { 86 if (bpf_size == BPF_W) 87 return 4; 88 else if (bpf_size == BPF_H) 89 return 2; 90 else if (bpf_size == BPF_B) 91 return 1; 92 else if (bpf_size == BPF_DW) 93 return 4; /* imm32 */ 94 else 95 return 0; 96 } 97 98 /* 99 * List of x86 cond jumps opcodes (. + s8) 100 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 101 */ 102 #define X86_JB 0x72 103 #define X86_JAE 0x73 104 #define X86_JE 0x74 105 #define X86_JNE 0x75 106 #define X86_JBE 0x76 107 #define X86_JA 0x77 108 #define X86_JL 0x7C 109 #define X86_JGE 0x7D 110 #define X86_JLE 0x7E 111 #define X86_JG 0x7F 112 113 /* Pick a register outside of BPF range for JIT internal work */ 114 #define AUX_REG (MAX_BPF_JIT_REG + 1) 115 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 116 #define X86_REG_R12 (MAX_BPF_JIT_REG + 3) 117 118 /* 119 * The following table maps BPF registers to x86-64 registers. 120 * 121 * x86-64 register R12 is unused, since if used as base address 122 * register in load/store instructions, it always needs an 123 * extra byte of encoding and is callee saved. 124 * 125 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 126 * trampoline. x86-64 register R10 is used for blinding (if enabled). 127 */ 128 static const int reg2hex[] = { 129 [BPF_REG_0] = 0, /* RAX */ 130 [BPF_REG_1] = 7, /* RDI */ 131 [BPF_REG_2] = 6, /* RSI */ 132 [BPF_REG_3] = 2, /* RDX */ 133 [BPF_REG_4] = 1, /* RCX */ 134 [BPF_REG_5] = 0, /* R8 */ 135 [BPF_REG_6] = 3, /* RBX callee saved */ 136 [BPF_REG_7] = 5, /* R13 callee saved */ 137 [BPF_REG_8] = 6, /* R14 callee saved */ 138 [BPF_REG_9] = 7, /* R15 callee saved */ 139 [BPF_REG_FP] = 5, /* RBP readonly */ 140 [BPF_REG_AX] = 2, /* R10 temp register */ 141 [AUX_REG] = 3, /* R11 temp register */ 142 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 143 [X86_REG_R12] = 4, /* R12 callee saved */ 144 }; 145 146 static const int reg2pt_regs[] = { 147 [BPF_REG_0] = offsetof(struct pt_regs, ax), 148 [BPF_REG_1] = offsetof(struct pt_regs, di), 149 [BPF_REG_2] = offsetof(struct pt_regs, si), 150 [BPF_REG_3] = offsetof(struct pt_regs, dx), 151 [BPF_REG_4] = offsetof(struct pt_regs, cx), 152 [BPF_REG_5] = offsetof(struct pt_regs, r8), 153 [BPF_REG_6] = offsetof(struct pt_regs, bx), 154 [BPF_REG_7] = offsetof(struct pt_regs, r13), 155 [BPF_REG_8] = offsetof(struct pt_regs, r14), 156 [BPF_REG_9] = offsetof(struct pt_regs, r15), 157 }; 158 159 /* 160 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 161 * which need extra byte of encoding. 162 * rax,rcx,...,rbp have simpler encoding 163 */ 164 static bool is_ereg(u32 reg) 165 { 166 return (1 << reg) & (BIT(BPF_REG_5) | 167 BIT(AUX_REG) | 168 BIT(BPF_REG_7) | 169 BIT(BPF_REG_8) | 170 BIT(BPF_REG_9) | 171 BIT(X86_REG_R9) | 172 BIT(X86_REG_R12) | 173 BIT(BPF_REG_AX)); 174 } 175 176 /* 177 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 178 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 179 * of encoding. al,cl,dl,bl have simpler encoding. 180 */ 181 static bool is_ereg_8l(u32 reg) 182 { 183 return is_ereg(reg) || 184 (1 << reg) & (BIT(BPF_REG_1) | 185 BIT(BPF_REG_2) | 186 BIT(BPF_REG_FP)); 187 } 188 189 static bool is_axreg(u32 reg) 190 { 191 return reg == BPF_REG_0; 192 } 193 194 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 195 static u8 add_1mod(u8 byte, u32 reg) 196 { 197 if (is_ereg(reg)) 198 byte |= 1; 199 return byte; 200 } 201 202 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 203 { 204 if (is_ereg(r1)) 205 byte |= 1; 206 if (is_ereg(r2)) 207 byte |= 4; 208 return byte; 209 } 210 211 static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index) 212 { 213 if (is_ereg(r1)) 214 byte |= 1; 215 if (is_ereg(index)) 216 byte |= 2; 217 if (is_ereg(r2)) 218 byte |= 4; 219 return byte; 220 } 221 222 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 223 static u8 add_1reg(u8 byte, u32 dst_reg) 224 { 225 return byte + reg2hex[dst_reg]; 226 } 227 228 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 229 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 230 { 231 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 232 } 233 234 /* Some 1-byte opcodes for binary ALU operations */ 235 static u8 simple_alu_opcodes[] = { 236 [BPF_ADD] = 0x01, 237 [BPF_SUB] = 0x29, 238 [BPF_AND] = 0x21, 239 [BPF_OR] = 0x09, 240 [BPF_XOR] = 0x31, 241 [BPF_LSH] = 0xE0, 242 [BPF_RSH] = 0xE8, 243 [BPF_ARSH] = 0xF8, 244 }; 245 246 static void jit_fill_hole(void *area, unsigned int size) 247 { 248 /* Fill whole space with INT3 instructions */ 249 memset(area, 0xcc, size); 250 } 251 252 int bpf_arch_text_invalidate(void *dst, size_t len) 253 { 254 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); 255 } 256 257 struct jit_context { 258 int cleanup_addr; /* Epilogue code offset */ 259 260 /* 261 * Program specific offsets of labels in the code; these rely on the 262 * JIT doing at least 2 passes, recording the position on the first 263 * pass, only to generate the correct offset on the second pass. 264 */ 265 int tail_call_direct_label; 266 int tail_call_indirect_label; 267 }; 268 269 /* Maximum number of bytes emitted while JITing one eBPF insn */ 270 #define BPF_MAX_INSN_SIZE 128 271 #define BPF_INSN_SAFETY 64 272 273 /* Number of bytes emit_patch() needs to generate instructions */ 274 #define X86_PATCH_SIZE 5 275 /* Number of bytes that will be skipped on tailcall */ 276 #define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE) 277 278 static void push_r12(u8 **pprog) 279 { 280 u8 *prog = *pprog; 281 282 EMIT2(0x41, 0x54); /* push r12 */ 283 *pprog = prog; 284 } 285 286 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 287 { 288 u8 *prog = *pprog; 289 290 if (callee_regs_used[0]) 291 EMIT1(0x53); /* push rbx */ 292 if (callee_regs_used[1]) 293 EMIT2(0x41, 0x55); /* push r13 */ 294 if (callee_regs_used[2]) 295 EMIT2(0x41, 0x56); /* push r14 */ 296 if (callee_regs_used[3]) 297 EMIT2(0x41, 0x57); /* push r15 */ 298 *pprog = prog; 299 } 300 301 static void pop_r12(u8 **pprog) 302 { 303 u8 *prog = *pprog; 304 305 EMIT2(0x41, 0x5C); /* pop r12 */ 306 *pprog = prog; 307 } 308 309 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 310 { 311 u8 *prog = *pprog; 312 313 if (callee_regs_used[3]) 314 EMIT2(0x41, 0x5F); /* pop r15 */ 315 if (callee_regs_used[2]) 316 EMIT2(0x41, 0x5E); /* pop r14 */ 317 if (callee_regs_used[1]) 318 EMIT2(0x41, 0x5D); /* pop r13 */ 319 if (callee_regs_used[0]) 320 EMIT1(0x5B); /* pop rbx */ 321 *pprog = prog; 322 } 323 324 static void emit_nops(u8 **pprog, int len) 325 { 326 u8 *prog = *pprog; 327 int i, noplen; 328 329 while (len > 0) { 330 noplen = len; 331 332 if (noplen > ASM_NOP_MAX) 333 noplen = ASM_NOP_MAX; 334 335 for (i = 0; i < noplen; i++) 336 EMIT1(x86_nops[noplen][i]); 337 len -= noplen; 338 } 339 340 *pprog = prog; 341 } 342 343 /* 344 * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT 345 * in arch/x86/kernel/alternative.c 346 */ 347 348 static void emit_fineibt(u8 **pprog, u32 hash) 349 { 350 u8 *prog = *pprog; 351 352 EMIT_ENDBR(); 353 EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */ 354 EMIT2(0x74, 0x07); /* jz.d8 +7 */ 355 EMIT2(0x0f, 0x0b); /* ud2 */ 356 EMIT1(0x90); /* nop */ 357 EMIT_ENDBR_POISON(); 358 359 *pprog = prog; 360 } 361 362 static void emit_kcfi(u8 **pprog, u32 hash) 363 { 364 u8 *prog = *pprog; 365 366 EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ 367 #ifdef CONFIG_CALL_PADDING 368 EMIT1(0x90); 369 EMIT1(0x90); 370 EMIT1(0x90); 371 EMIT1(0x90); 372 EMIT1(0x90); 373 EMIT1(0x90); 374 EMIT1(0x90); 375 EMIT1(0x90); 376 EMIT1(0x90); 377 EMIT1(0x90); 378 EMIT1(0x90); 379 #endif 380 EMIT_ENDBR(); 381 382 *pprog = prog; 383 } 384 385 static void emit_cfi(u8 **pprog, u32 hash) 386 { 387 u8 *prog = *pprog; 388 389 switch (cfi_mode) { 390 case CFI_FINEIBT: 391 emit_fineibt(&prog, hash); 392 break; 393 394 case CFI_KCFI: 395 emit_kcfi(&prog, hash); 396 break; 397 398 default: 399 EMIT_ENDBR(); 400 break; 401 } 402 403 *pprog = prog; 404 } 405 406 static void emit_prologue_tail_call(u8 **pprog, bool is_subprog) 407 { 408 u8 *prog = *pprog; 409 410 if (!is_subprog) { 411 /* cmp rax, MAX_TAIL_CALL_CNT */ 412 EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT); 413 EMIT2(X86_JA, 6); /* ja 6 */ 414 /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT. 415 * case1: entry of main prog. 416 * case2: tail callee of main prog. 417 */ 418 EMIT1(0x50); /* push rax */ 419 /* Make rax as tail_call_cnt_ptr. */ 420 EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */ 421 EMIT2(0xEB, 1); /* jmp 1 */ 422 /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT. 423 * case: tail callee of subprog. 424 */ 425 EMIT1(0x50); /* push rax */ 426 /* push tail_call_cnt_ptr */ 427 EMIT1(0x50); /* push rax */ 428 } else { /* is_subprog */ 429 /* rax is tail_call_cnt_ptr. */ 430 EMIT1(0x50); /* push rax */ 431 EMIT1(0x50); /* push rax */ 432 } 433 434 *pprog = prog; 435 } 436 437 /* 438 * Emit x86-64 prologue code for BPF program. 439 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 440 * while jumping to another program 441 */ 442 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 443 bool tail_call_reachable, bool is_subprog, 444 bool is_exception_cb) 445 { 446 u8 *prog = *pprog; 447 448 emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash); 449 /* BPF trampoline can be made to work without these nops, 450 * but let's waste 5 bytes for now and optimize later 451 */ 452 emit_nops(&prog, X86_PATCH_SIZE); 453 if (!ebpf_from_cbpf) { 454 if (tail_call_reachable && !is_subprog) 455 /* When it's the entry of the whole tailcall context, 456 * zeroing rax means initialising tail_call_cnt. 457 */ 458 EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */ 459 else 460 /* Keep the same instruction layout. */ 461 emit_nops(&prog, 3); /* nop3 */ 462 } 463 /* Exception callback receives FP as third parameter */ 464 if (is_exception_cb) { 465 EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */ 466 EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */ 467 /* The main frame must have exception_boundary as true, so we 468 * first restore those callee-saved regs from stack, before 469 * reusing the stack frame. 470 */ 471 pop_callee_regs(&prog, all_callee_regs_used); 472 pop_r12(&prog); 473 /* Reset the stack frame. */ 474 EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */ 475 } else { 476 EMIT1(0x55); /* push rbp */ 477 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 478 } 479 480 /* X86_TAIL_CALL_OFFSET is here */ 481 EMIT_ENDBR(); 482 483 /* sub rsp, rounded_stack_depth */ 484 if (stack_depth) 485 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 486 if (tail_call_reachable) 487 emit_prologue_tail_call(&prog, is_subprog); 488 *pprog = prog; 489 } 490 491 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 492 { 493 u8 *prog = *pprog; 494 s64 offset; 495 496 offset = func - (ip + X86_PATCH_SIZE); 497 if (!is_simm32(offset)) { 498 pr_err("Target call %p is out of range\n", func); 499 return -ERANGE; 500 } 501 EMIT1_off32(opcode, offset); 502 *pprog = prog; 503 return 0; 504 } 505 506 static int emit_call(u8 **pprog, void *func, void *ip) 507 { 508 return emit_patch(pprog, func, ip, 0xE8); 509 } 510 511 static int emit_rsb_call(u8 **pprog, void *func, void *ip) 512 { 513 OPTIMIZER_HIDE_VAR(func); 514 ip += x86_call_depth_emit_accounting(pprog, func, ip); 515 return emit_patch(pprog, func, ip, 0xE8); 516 } 517 518 static int emit_jump(u8 **pprog, void *func, void *ip) 519 { 520 return emit_patch(pprog, func, ip, 0xE9); 521 } 522 523 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 524 void *old_addr, void *new_addr) 525 { 526 const u8 *nop_insn = x86_nops[5]; 527 u8 old_insn[X86_PATCH_SIZE]; 528 u8 new_insn[X86_PATCH_SIZE]; 529 u8 *prog; 530 int ret; 531 532 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 533 if (old_addr) { 534 prog = old_insn; 535 ret = t == BPF_MOD_CALL ? 536 emit_call(&prog, old_addr, ip) : 537 emit_jump(&prog, old_addr, ip); 538 if (ret) 539 return ret; 540 } 541 542 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 543 if (new_addr) { 544 prog = new_insn; 545 ret = t == BPF_MOD_CALL ? 546 emit_call(&prog, new_addr, ip) : 547 emit_jump(&prog, new_addr, ip); 548 if (ret) 549 return ret; 550 } 551 552 ret = -EBUSY; 553 mutex_lock(&text_mutex); 554 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 555 goto out; 556 ret = 1; 557 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 558 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 559 ret = 0; 560 } 561 out: 562 mutex_unlock(&text_mutex); 563 return ret; 564 } 565 566 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 567 void *old_addr, void *new_addr) 568 { 569 if (!is_kernel_text((long)ip) && 570 !is_bpf_text_address((long)ip)) 571 /* BPF poking in modules is not supported */ 572 return -EINVAL; 573 574 /* 575 * See emit_prologue(), for IBT builds the trampoline hook is preceded 576 * with an ENDBR instruction. 577 */ 578 if (is_endbr(*(u32 *)ip)) 579 ip += ENDBR_INSN_SIZE; 580 581 return __bpf_arch_text_poke(ip, t, old_addr, new_addr); 582 } 583 584 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 585 586 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 587 { 588 u8 *prog = *pprog; 589 590 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 591 EMIT_LFENCE(); 592 EMIT2(0xFF, 0xE0 + reg); 593 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 594 OPTIMIZER_HIDE_VAR(reg); 595 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) 596 emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip); 597 else 598 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 599 } else { 600 EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ 601 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS)) 602 EMIT1(0xCC); /* int3 */ 603 } 604 605 *pprog = prog; 606 } 607 608 static void emit_return(u8 **pprog, u8 *ip) 609 { 610 u8 *prog = *pprog; 611 612 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { 613 emit_jump(&prog, x86_return_thunk, ip); 614 } else { 615 EMIT1(0xC3); /* ret */ 616 if (IS_ENABLED(CONFIG_MITIGATION_SLS)) 617 EMIT1(0xCC); /* int3 */ 618 } 619 620 *pprog = prog; 621 } 622 623 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8)) 624 625 /* 626 * Generate the following code: 627 * 628 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 629 * if (index >= array->map.max_entries) 630 * goto out; 631 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) 632 * goto out; 633 * prog = array->ptrs[index]; 634 * if (prog == NULL) 635 * goto out; 636 * goto *(prog->bpf_func + prologue_size); 637 * out: 638 */ 639 static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, 640 u8 **pprog, bool *callee_regs_used, 641 u32 stack_depth, u8 *ip, 642 struct jit_context *ctx) 643 { 644 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); 645 u8 *prog = *pprog, *start = *pprog; 646 int offset; 647 648 /* 649 * rdi - pointer to ctx 650 * rsi - pointer to bpf_array 651 * rdx - index in bpf_array 652 */ 653 654 /* 655 * if (index >= array->map.max_entries) 656 * goto out; 657 */ 658 EMIT2(0x89, 0xD2); /* mov edx, edx */ 659 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 660 offsetof(struct bpf_array, map.max_entries)); 661 662 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 663 EMIT2(X86_JBE, offset); /* jbe out */ 664 665 /* 666 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) 667 * goto out; 668 */ 669 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ 670 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ 671 672 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 673 EMIT2(X86_JAE, offset); /* jae out */ 674 675 /* prog = array->ptrs[index]; */ 676 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 677 offsetof(struct bpf_array, ptrs)); 678 679 /* 680 * if (prog == NULL) 681 * goto out; 682 */ 683 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 684 685 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 686 EMIT2(X86_JE, offset); /* je out */ 687 688 /* Inc tail_call_cnt if the slot is populated. */ 689 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ 690 691 if (bpf_prog->aux->exception_boundary) { 692 pop_callee_regs(&prog, all_callee_regs_used); 693 pop_r12(&prog); 694 } else { 695 pop_callee_regs(&prog, callee_regs_used); 696 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) 697 pop_r12(&prog); 698 } 699 700 /* Pop tail_call_cnt_ptr. */ 701 EMIT1(0x58); /* pop rax */ 702 /* Pop tail_call_cnt, if it's main prog. 703 * Pop tail_call_cnt_ptr, if it's subprog. 704 */ 705 EMIT1(0x58); /* pop rax */ 706 if (stack_depth) 707 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 708 round_up(stack_depth, 8)); 709 710 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 711 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 712 offsetof(struct bpf_prog, bpf_func)); 713 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 714 X86_TAIL_CALL_OFFSET); 715 /* 716 * Now we're ready to jump into next BPF program 717 * rdi == ctx (1st arg) 718 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 719 */ 720 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 721 722 /* out: */ 723 ctx->tail_call_indirect_label = prog - start; 724 *pprog = prog; 725 } 726 727 static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, 728 struct bpf_jit_poke_descriptor *poke, 729 u8 **pprog, u8 *ip, 730 bool *callee_regs_used, u32 stack_depth, 731 struct jit_context *ctx) 732 { 733 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); 734 u8 *prog = *pprog, *start = *pprog; 735 int offset; 736 737 /* 738 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) 739 * goto out; 740 */ 741 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ 742 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ 743 744 offset = ctx->tail_call_direct_label - (prog + 2 - start); 745 EMIT2(X86_JAE, offset); /* jae out */ 746 747 poke->tailcall_bypass = ip + (prog - start); 748 poke->adj_off = X86_TAIL_CALL_OFFSET; 749 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 750 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 751 752 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 753 poke->tailcall_bypass); 754 755 /* Inc tail_call_cnt if the slot is populated. */ 756 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ 757 758 if (bpf_prog->aux->exception_boundary) { 759 pop_callee_regs(&prog, all_callee_regs_used); 760 pop_r12(&prog); 761 } else { 762 pop_callee_regs(&prog, callee_regs_used); 763 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) 764 pop_r12(&prog); 765 } 766 767 /* Pop tail_call_cnt_ptr. */ 768 EMIT1(0x58); /* pop rax */ 769 /* Pop tail_call_cnt, if it's main prog. 770 * Pop tail_call_cnt_ptr, if it's subprog. 771 */ 772 EMIT1(0x58); /* pop rax */ 773 if (stack_depth) 774 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 775 776 emit_nops(&prog, X86_PATCH_SIZE); 777 778 /* out: */ 779 ctx->tail_call_direct_label = prog - start; 780 781 *pprog = prog; 782 } 783 784 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 785 { 786 struct bpf_jit_poke_descriptor *poke; 787 struct bpf_array *array; 788 struct bpf_prog *target; 789 int i, ret; 790 791 for (i = 0; i < prog->aux->size_poke_tab; i++) { 792 poke = &prog->aux->poke_tab[i]; 793 if (poke->aux && poke->aux != prog->aux) 794 continue; 795 796 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 797 798 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 799 continue; 800 801 array = container_of(poke->tail_call.map, struct bpf_array, map); 802 mutex_lock(&array->aux->poke_mutex); 803 target = array->ptrs[poke->tail_call.key]; 804 if (target) { 805 ret = __bpf_arch_text_poke(poke->tailcall_target, 806 BPF_MOD_JUMP, NULL, 807 (u8 *)target->bpf_func + 808 poke->adj_off); 809 BUG_ON(ret < 0); 810 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 811 BPF_MOD_JUMP, 812 (u8 *)poke->tailcall_target + 813 X86_PATCH_SIZE, NULL); 814 BUG_ON(ret < 0); 815 } 816 WRITE_ONCE(poke->tailcall_target_stable, true); 817 mutex_unlock(&array->aux->poke_mutex); 818 } 819 } 820 821 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 822 u32 dst_reg, const u32 imm32) 823 { 824 u8 *prog = *pprog; 825 u8 b1, b2, b3; 826 827 /* 828 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 829 * (which zero-extends imm32) to save 2 bytes. 830 */ 831 if (sign_propagate && (s32)imm32 < 0) { 832 /* 'mov %rax, imm32' sign extends imm32 */ 833 b1 = add_1mod(0x48, dst_reg); 834 b2 = 0xC7; 835 b3 = 0xC0; 836 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 837 goto done; 838 } 839 840 /* 841 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 842 * to save 3 bytes. 843 */ 844 if (imm32 == 0) { 845 if (is_ereg(dst_reg)) 846 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 847 b2 = 0x31; /* xor */ 848 b3 = 0xC0; 849 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 850 goto done; 851 } 852 853 /* mov %eax, imm32 */ 854 if (is_ereg(dst_reg)) 855 EMIT1(add_1mod(0x40, dst_reg)); 856 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 857 done: 858 *pprog = prog; 859 } 860 861 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 862 const u32 imm32_hi, const u32 imm32_lo) 863 { 864 u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo; 865 u8 *prog = *pprog; 866 867 if (is_uimm32(imm64)) { 868 /* 869 * For emitting plain u32, where sign bit must not be 870 * propagated LLVM tends to load imm64 over mov32 871 * directly, so save couple of bytes by just doing 872 * 'mov %eax, imm32' instead. 873 */ 874 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 875 } else if (is_simm32(imm64)) { 876 emit_mov_imm32(&prog, true, dst_reg, imm32_lo); 877 } else { 878 /* movabsq rax, imm64 */ 879 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 880 EMIT(imm32_lo, 4); 881 EMIT(imm32_hi, 4); 882 } 883 884 *pprog = prog; 885 } 886 887 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 888 { 889 u8 *prog = *pprog; 890 891 if (is64) { 892 /* mov dst, src */ 893 EMIT_mov(dst_reg, src_reg); 894 } else { 895 /* mov32 dst, src */ 896 if (is_ereg(dst_reg) || is_ereg(src_reg)) 897 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 898 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 899 } 900 901 *pprog = prog; 902 } 903 904 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg, 905 u32 src_reg) 906 { 907 u8 *prog = *pprog; 908 909 if (is64) { 910 /* movs[b,w,l]q dst, src */ 911 if (num_bits == 8) 912 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe, 913 add_2reg(0xC0, src_reg, dst_reg)); 914 else if (num_bits == 16) 915 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf, 916 add_2reg(0xC0, src_reg, dst_reg)); 917 else if (num_bits == 32) 918 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63, 919 add_2reg(0xC0, src_reg, dst_reg)); 920 } else { 921 /* movs[b,w]l dst, src */ 922 if (num_bits == 8) { 923 EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe, 924 add_2reg(0xC0, src_reg, dst_reg)); 925 } else if (num_bits == 16) { 926 if (is_ereg(dst_reg) || is_ereg(src_reg)) 927 EMIT1(add_2mod(0x40, src_reg, dst_reg)); 928 EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf, 929 add_2reg(0xC0, src_reg, dst_reg)); 930 } 931 } 932 933 *pprog = prog; 934 } 935 936 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 937 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 938 { 939 u8 *prog = *pprog; 940 941 if (is_imm8(off)) { 942 /* 1-byte signed displacement. 943 * 944 * If off == 0 we could skip this and save one extra byte, but 945 * special case of x86 R13 which always needs an offset is not 946 * worth the hassle 947 */ 948 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 949 } else { 950 /* 4-byte signed displacement */ 951 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 952 } 953 *pprog = prog; 954 } 955 956 static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off) 957 { 958 u8 *prog = *pprog; 959 960 if (is_imm8(off)) { 961 EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off); 962 } else { 963 EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off); 964 } 965 *pprog = prog; 966 } 967 968 /* 969 * Emit a REX byte if it will be necessary to address these registers 970 */ 971 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 972 { 973 u8 *prog = *pprog; 974 975 if (is64) 976 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 977 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 978 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 979 *pprog = prog; 980 } 981 982 /* 983 * Similar version of maybe_emit_mod() for a single register 984 */ 985 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 986 { 987 u8 *prog = *pprog; 988 989 if (is64) 990 EMIT1(add_1mod(0x48, reg)); 991 else if (is_ereg(reg)) 992 EMIT1(add_1mod(0x40, reg)); 993 *pprog = prog; 994 } 995 996 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 997 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 998 { 999 u8 *prog = *pprog; 1000 1001 switch (size) { 1002 case BPF_B: 1003 /* Emit 'movzx rax, byte ptr [rax + off]' */ 1004 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 1005 break; 1006 case BPF_H: 1007 /* Emit 'movzx rax, word ptr [rax + off]' */ 1008 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 1009 break; 1010 case BPF_W: 1011 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 1012 if (is_ereg(dst_reg) || is_ereg(src_reg)) 1013 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 1014 else 1015 EMIT1(0x8B); 1016 break; 1017 case BPF_DW: 1018 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 1019 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 1020 break; 1021 } 1022 emit_insn_suffix(&prog, src_reg, dst_reg, off); 1023 *pprog = prog; 1024 } 1025 1026 /* LDSX: dst_reg = *(s8*)(src_reg + off) */ 1027 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 1028 { 1029 u8 *prog = *pprog; 1030 1031 switch (size) { 1032 case BPF_B: 1033 /* Emit 'movsx rax, byte ptr [rax + off]' */ 1034 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE); 1035 break; 1036 case BPF_H: 1037 /* Emit 'movsx rax, word ptr [rax + off]' */ 1038 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF); 1039 break; 1040 case BPF_W: 1041 /* Emit 'movsx rax, dword ptr [rax+0x14]' */ 1042 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63); 1043 break; 1044 } 1045 emit_insn_suffix(&prog, src_reg, dst_reg, off); 1046 *pprog = prog; 1047 } 1048 1049 static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) 1050 { 1051 u8 *prog = *pprog; 1052 1053 switch (size) { 1054 case BPF_B: 1055 /* movzx rax, byte ptr [rax + r12 + off] */ 1056 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6); 1057 break; 1058 case BPF_H: 1059 /* movzx rax, word ptr [rax + r12 + off] */ 1060 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7); 1061 break; 1062 case BPF_W: 1063 /* mov eax, dword ptr [rax + r12 + off] */ 1064 EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B); 1065 break; 1066 case BPF_DW: 1067 /* mov rax, qword ptr [rax + r12 + off] */ 1068 EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B); 1069 break; 1070 } 1071 emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off); 1072 *pprog = prog; 1073 } 1074 1075 static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 1076 { 1077 emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off); 1078 } 1079 1080 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1081 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 1082 { 1083 u8 *prog = *pprog; 1084 1085 switch (size) { 1086 case BPF_B: 1087 /* Emit 'mov byte ptr [rax + off], al' */ 1088 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 1089 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 1090 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 1091 else 1092 EMIT1(0x88); 1093 break; 1094 case BPF_H: 1095 if (is_ereg(dst_reg) || is_ereg(src_reg)) 1096 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 1097 else 1098 EMIT2(0x66, 0x89); 1099 break; 1100 case BPF_W: 1101 if (is_ereg(dst_reg) || is_ereg(src_reg)) 1102 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 1103 else 1104 EMIT1(0x89); 1105 break; 1106 case BPF_DW: 1107 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 1108 break; 1109 } 1110 emit_insn_suffix(&prog, dst_reg, src_reg, off); 1111 *pprog = prog; 1112 } 1113 1114 /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */ 1115 static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) 1116 { 1117 u8 *prog = *pprog; 1118 1119 switch (size) { 1120 case BPF_B: 1121 /* mov byte ptr [rax + r12 + off], al */ 1122 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88); 1123 break; 1124 case BPF_H: 1125 /* mov word ptr [rax + r12 + off], ax */ 1126 EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89); 1127 break; 1128 case BPF_W: 1129 /* mov dword ptr [rax + r12 + 1], eax */ 1130 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89); 1131 break; 1132 case BPF_DW: 1133 /* mov qword ptr [rax + r12 + 1], rax */ 1134 EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89); 1135 break; 1136 } 1137 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); 1138 *pprog = prog; 1139 } 1140 1141 static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 1142 { 1143 emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off); 1144 } 1145 1146 /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */ 1147 static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm) 1148 { 1149 u8 *prog = *pprog; 1150 1151 switch (size) { 1152 case BPF_B: 1153 /* mov byte ptr [rax + r12 + off], imm8 */ 1154 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6); 1155 break; 1156 case BPF_H: 1157 /* mov word ptr [rax + r12 + off], imm16 */ 1158 EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7); 1159 break; 1160 case BPF_W: 1161 /* mov dword ptr [rax + r12 + 1], imm32 */ 1162 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7); 1163 break; 1164 case BPF_DW: 1165 /* mov qword ptr [rax + r12 + 1], imm32 */ 1166 EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7); 1167 break; 1168 } 1169 emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off); 1170 EMIT(imm, bpf_size_to_x86_bytes(size)); 1171 *pprog = prog; 1172 } 1173 1174 static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm) 1175 { 1176 emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm); 1177 } 1178 1179 static int emit_atomic(u8 **pprog, u8 atomic_op, 1180 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 1181 { 1182 u8 *prog = *pprog; 1183 1184 EMIT1(0xF0); /* lock prefix */ 1185 1186 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 1187 1188 /* emit opcode */ 1189 switch (atomic_op) { 1190 case BPF_ADD: 1191 case BPF_AND: 1192 case BPF_OR: 1193 case BPF_XOR: 1194 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 1195 EMIT1(simple_alu_opcodes[atomic_op]); 1196 break; 1197 case BPF_ADD | BPF_FETCH: 1198 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 1199 EMIT2(0x0F, 0xC1); 1200 break; 1201 case BPF_XCHG: 1202 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 1203 EMIT1(0x87); 1204 break; 1205 case BPF_CMPXCHG: 1206 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 1207 EMIT2(0x0F, 0xB1); 1208 break; 1209 default: 1210 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 1211 return -EFAULT; 1212 } 1213 1214 emit_insn_suffix(&prog, dst_reg, src_reg, off); 1215 1216 *pprog = prog; 1217 return 0; 1218 } 1219 1220 static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size, 1221 u32 dst_reg, u32 src_reg, u32 index_reg, int off) 1222 { 1223 u8 *prog = *pprog; 1224 1225 EMIT1(0xF0); /* lock prefix */ 1226 switch (size) { 1227 case BPF_W: 1228 EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg)); 1229 break; 1230 case BPF_DW: 1231 EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg)); 1232 break; 1233 default: 1234 pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n"); 1235 return -EFAULT; 1236 } 1237 1238 /* emit opcode */ 1239 switch (atomic_op) { 1240 case BPF_ADD: 1241 case BPF_AND: 1242 case BPF_OR: 1243 case BPF_XOR: 1244 /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */ 1245 EMIT1(simple_alu_opcodes[atomic_op]); 1246 break; 1247 case BPF_ADD | BPF_FETCH: 1248 /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */ 1249 EMIT2(0x0F, 0xC1); 1250 break; 1251 case BPF_XCHG: 1252 /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */ 1253 EMIT1(0x87); 1254 break; 1255 case BPF_CMPXCHG: 1256 /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */ 1257 EMIT2(0x0F, 0xB1); 1258 break; 1259 default: 1260 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 1261 return -EFAULT; 1262 } 1263 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); 1264 *pprog = prog; 1265 return 0; 1266 } 1267 1268 #define DONT_CLEAR 1 1269 1270 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 1271 { 1272 u32 reg = x->fixup >> 8; 1273 1274 /* jump over faulting load and clear dest register */ 1275 if (reg != DONT_CLEAR) 1276 *(unsigned long *)((void *)regs + reg) = 0; 1277 regs->ip += x->fixup & 0xff; 1278 return true; 1279 } 1280 1281 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 1282 bool *regs_used) 1283 { 1284 int i; 1285 1286 for (i = 1; i <= insn_cnt; i++, insn++) { 1287 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 1288 regs_used[0] = true; 1289 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 1290 regs_used[1] = true; 1291 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 1292 regs_used[2] = true; 1293 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 1294 regs_used[3] = true; 1295 } 1296 } 1297 1298 /* emit the 3-byte VEX prefix 1299 * 1300 * r: same as rex.r, extra bit for ModRM reg field 1301 * x: same as rex.x, extra bit for SIB index field 1302 * b: same as rex.b, extra bit for ModRM r/m, or SIB base 1303 * m: opcode map select, encoding escape bytes e.g. 0x0f38 1304 * w: same as rex.w (32 bit or 64 bit) or opcode specific 1305 * src_reg2: additional source reg (encoded as BPF reg) 1306 * l: vector length (128 bit or 256 bit) or reserved 1307 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3) 1308 */ 1309 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m, 1310 bool w, u8 src_reg2, bool l, u8 pp) 1311 { 1312 u8 *prog = *pprog; 1313 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */ 1314 u8 b1, b2; 1315 u8 vvvv = reg2hex[src_reg2]; 1316 1317 /* reg2hex gives only the lower 3 bit of vvvv */ 1318 if (is_ereg(src_reg2)) 1319 vvvv |= 1 << 3; 1320 1321 /* 1322 * 2nd byte of 3-byte VEX prefix 1323 * ~ means bit inverted encoding 1324 * 1325 * 7 0 1326 * +---+---+---+---+---+---+---+---+ 1327 * |~R |~X |~B | m | 1328 * +---+---+---+---+---+---+---+---+ 1329 */ 1330 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f); 1331 /* 1332 * 3rd byte of 3-byte VEX prefix 1333 * 1334 * 7 0 1335 * +---+---+---+---+---+---+---+---+ 1336 * | W | ~vvvv | L | pp | 1337 * +---+---+---+---+---+---+---+---+ 1338 */ 1339 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3); 1340 1341 EMIT3(b0, b1, b2); 1342 *pprog = prog; 1343 } 1344 1345 /* emit BMI2 shift instruction */ 1346 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) 1347 { 1348 u8 *prog = *pprog; 1349 bool r = is_ereg(dst_reg); 1350 u8 m = 2; /* escape code 0f38 */ 1351 1352 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op); 1353 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg)); 1354 *pprog = prog; 1355 } 1356 1357 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 1358 1359 #define __LOAD_TCC_PTR(off) \ 1360 EMIT3_off32(0x48, 0x8B, 0x85, off) 1361 /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */ 1362 #define LOAD_TAIL_CALL_CNT_PTR(stack) \ 1363 __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack)) 1364 1365 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, 1366 int oldproglen, struct jit_context *ctx, bool jmp_padding) 1367 { 1368 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 1369 struct bpf_insn *insn = bpf_prog->insnsi; 1370 bool callee_regs_used[4] = {}; 1371 int insn_cnt = bpf_prog->len; 1372 bool seen_exit = false; 1373 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 1374 u64 arena_vm_start, user_vm_start; 1375 int i, excnt = 0; 1376 int ilen, proglen = 0; 1377 u8 *prog = temp; 1378 int err; 1379 1380 arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena); 1381 user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena); 1382 1383 detect_reg_usage(insn, insn_cnt, callee_regs_used); 1384 1385 emit_prologue(&prog, bpf_prog->aux->stack_depth, 1386 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 1387 bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb); 1388 /* Exception callback will clobber callee regs for its own use, and 1389 * restore the original callee regs from main prog's stack frame. 1390 */ 1391 if (bpf_prog->aux->exception_boundary) { 1392 /* We also need to save r12, which is not mapped to any BPF 1393 * register, as we throw after entry into the kernel, which may 1394 * overwrite r12. 1395 */ 1396 push_r12(&prog); 1397 push_callee_regs(&prog, all_callee_regs_used); 1398 } else { 1399 if (arena_vm_start) 1400 push_r12(&prog); 1401 push_callee_regs(&prog, callee_regs_used); 1402 } 1403 if (arena_vm_start) 1404 emit_mov_imm64(&prog, X86_REG_R12, 1405 arena_vm_start >> 32, (u32) arena_vm_start); 1406 1407 ilen = prog - temp; 1408 if (rw_image) 1409 memcpy(rw_image + proglen, temp, ilen); 1410 proglen += ilen; 1411 addrs[0] = proglen; 1412 prog = temp; 1413 1414 for (i = 1; i <= insn_cnt; i++, insn++) { 1415 const s32 imm32 = insn->imm; 1416 u32 dst_reg = insn->dst_reg; 1417 u32 src_reg = insn->src_reg; 1418 u8 b2 = 0, b3 = 0; 1419 u8 *start_of_ldx; 1420 s64 jmp_offset; 1421 s16 insn_off; 1422 u8 jmp_cond; 1423 u8 *func; 1424 int nops; 1425 1426 switch (insn->code) { 1427 /* ALU */ 1428 case BPF_ALU | BPF_ADD | BPF_X: 1429 case BPF_ALU | BPF_SUB | BPF_X: 1430 case BPF_ALU | BPF_AND | BPF_X: 1431 case BPF_ALU | BPF_OR | BPF_X: 1432 case BPF_ALU | BPF_XOR | BPF_X: 1433 case BPF_ALU64 | BPF_ADD | BPF_X: 1434 case BPF_ALU64 | BPF_SUB | BPF_X: 1435 case BPF_ALU64 | BPF_AND | BPF_X: 1436 case BPF_ALU64 | BPF_OR | BPF_X: 1437 case BPF_ALU64 | BPF_XOR | BPF_X: 1438 maybe_emit_mod(&prog, dst_reg, src_reg, 1439 BPF_CLASS(insn->code) == BPF_ALU64); 1440 b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 1441 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 1442 break; 1443 1444 case BPF_ALU64 | BPF_MOV | BPF_X: 1445 if (insn_is_cast_user(insn)) { 1446 if (dst_reg != src_reg) 1447 /* 32-bit mov */ 1448 emit_mov_reg(&prog, false, dst_reg, src_reg); 1449 /* shl dst_reg, 32 */ 1450 maybe_emit_1mod(&prog, dst_reg, true); 1451 EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32); 1452 1453 /* or dst_reg, user_vm_start */ 1454 maybe_emit_1mod(&prog, dst_reg, true); 1455 if (is_axreg(dst_reg)) 1456 EMIT1_off32(0x0D, user_vm_start >> 32); 1457 else 1458 EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32); 1459 1460 /* rol dst_reg, 32 */ 1461 maybe_emit_1mod(&prog, dst_reg, true); 1462 EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32); 1463 1464 /* xor r11, r11 */ 1465 EMIT3(0x4D, 0x31, 0xDB); 1466 1467 /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */ 1468 maybe_emit_mod(&prog, dst_reg, dst_reg, false); 1469 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1470 1471 /* cmove r11, dst_reg; if so, set dst_reg to zero */ 1472 /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */ 1473 maybe_emit_mod(&prog, AUX_REG, dst_reg, true); 1474 EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg)); 1475 break; 1476 } else if (insn_is_mov_percpu_addr(insn)) { 1477 /* mov <dst>, <src> (if necessary) */ 1478 EMIT_mov(dst_reg, src_reg); 1479 #ifdef CONFIG_SMP 1480 /* add <dst>, gs:[<off>] */ 1481 EMIT2(0x65, add_1mod(0x48, dst_reg)); 1482 EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25); 1483 EMIT((u32)(unsigned long)&this_cpu_off, 4); 1484 #endif 1485 break; 1486 } 1487 fallthrough; 1488 case BPF_ALU | BPF_MOV | BPF_X: 1489 if (insn->off == 0) 1490 emit_mov_reg(&prog, 1491 BPF_CLASS(insn->code) == BPF_ALU64, 1492 dst_reg, src_reg); 1493 else 1494 emit_movsx_reg(&prog, insn->off, 1495 BPF_CLASS(insn->code) == BPF_ALU64, 1496 dst_reg, src_reg); 1497 break; 1498 1499 /* neg dst */ 1500 case BPF_ALU | BPF_NEG: 1501 case BPF_ALU64 | BPF_NEG: 1502 maybe_emit_1mod(&prog, dst_reg, 1503 BPF_CLASS(insn->code) == BPF_ALU64); 1504 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 1505 break; 1506 1507 case BPF_ALU | BPF_ADD | BPF_K: 1508 case BPF_ALU | BPF_SUB | BPF_K: 1509 case BPF_ALU | BPF_AND | BPF_K: 1510 case BPF_ALU | BPF_OR | BPF_K: 1511 case BPF_ALU | BPF_XOR | BPF_K: 1512 case BPF_ALU64 | BPF_ADD | BPF_K: 1513 case BPF_ALU64 | BPF_SUB | BPF_K: 1514 case BPF_ALU64 | BPF_AND | BPF_K: 1515 case BPF_ALU64 | BPF_OR | BPF_K: 1516 case BPF_ALU64 | BPF_XOR | BPF_K: 1517 maybe_emit_1mod(&prog, dst_reg, 1518 BPF_CLASS(insn->code) == BPF_ALU64); 1519 1520 /* 1521 * b3 holds 'normal' opcode, b2 short form only valid 1522 * in case dst is eax/rax. 1523 */ 1524 switch (BPF_OP(insn->code)) { 1525 case BPF_ADD: 1526 b3 = 0xC0; 1527 b2 = 0x05; 1528 break; 1529 case BPF_SUB: 1530 b3 = 0xE8; 1531 b2 = 0x2D; 1532 break; 1533 case BPF_AND: 1534 b3 = 0xE0; 1535 b2 = 0x25; 1536 break; 1537 case BPF_OR: 1538 b3 = 0xC8; 1539 b2 = 0x0D; 1540 break; 1541 case BPF_XOR: 1542 b3 = 0xF0; 1543 b2 = 0x35; 1544 break; 1545 } 1546 1547 if (is_imm8(imm32)) 1548 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 1549 else if (is_axreg(dst_reg)) 1550 EMIT1_off32(b2, imm32); 1551 else 1552 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 1553 break; 1554 1555 case BPF_ALU64 | BPF_MOV | BPF_K: 1556 case BPF_ALU | BPF_MOV | BPF_K: 1557 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 1558 dst_reg, imm32); 1559 break; 1560 1561 case BPF_LD | BPF_IMM | BPF_DW: 1562 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 1563 insn++; 1564 i++; 1565 break; 1566 1567 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 1568 case BPF_ALU | BPF_MOD | BPF_X: 1569 case BPF_ALU | BPF_DIV | BPF_X: 1570 case BPF_ALU | BPF_MOD | BPF_K: 1571 case BPF_ALU | BPF_DIV | BPF_K: 1572 case BPF_ALU64 | BPF_MOD | BPF_X: 1573 case BPF_ALU64 | BPF_DIV | BPF_X: 1574 case BPF_ALU64 | BPF_MOD | BPF_K: 1575 case BPF_ALU64 | BPF_DIV | BPF_K: { 1576 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1577 1578 if (dst_reg != BPF_REG_0) 1579 EMIT1(0x50); /* push rax */ 1580 if (dst_reg != BPF_REG_3) 1581 EMIT1(0x52); /* push rdx */ 1582 1583 if (BPF_SRC(insn->code) == BPF_X) { 1584 if (src_reg == BPF_REG_0 || 1585 src_reg == BPF_REG_3) { 1586 /* mov r11, src_reg */ 1587 EMIT_mov(AUX_REG, src_reg); 1588 src_reg = AUX_REG; 1589 } 1590 } else { 1591 /* mov r11, imm32 */ 1592 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 1593 src_reg = AUX_REG; 1594 } 1595 1596 if (dst_reg != BPF_REG_0) 1597 /* mov rax, dst_reg */ 1598 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 1599 1600 if (insn->off == 0) { 1601 /* 1602 * xor edx, edx 1603 * equivalent to 'xor rdx, rdx', but one byte less 1604 */ 1605 EMIT2(0x31, 0xd2); 1606 1607 /* div src_reg */ 1608 maybe_emit_1mod(&prog, src_reg, is64); 1609 EMIT2(0xF7, add_1reg(0xF0, src_reg)); 1610 } else { 1611 if (BPF_CLASS(insn->code) == BPF_ALU) 1612 EMIT1(0x99); /* cdq */ 1613 else 1614 EMIT2(0x48, 0x99); /* cqo */ 1615 1616 /* idiv src_reg */ 1617 maybe_emit_1mod(&prog, src_reg, is64); 1618 EMIT2(0xF7, add_1reg(0xF8, src_reg)); 1619 } 1620 1621 if (BPF_OP(insn->code) == BPF_MOD && 1622 dst_reg != BPF_REG_3) 1623 /* mov dst_reg, rdx */ 1624 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 1625 else if (BPF_OP(insn->code) == BPF_DIV && 1626 dst_reg != BPF_REG_0) 1627 /* mov dst_reg, rax */ 1628 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 1629 1630 if (dst_reg != BPF_REG_3) 1631 EMIT1(0x5A); /* pop rdx */ 1632 if (dst_reg != BPF_REG_0) 1633 EMIT1(0x58); /* pop rax */ 1634 break; 1635 } 1636 1637 case BPF_ALU | BPF_MUL | BPF_K: 1638 case BPF_ALU64 | BPF_MUL | BPF_K: 1639 maybe_emit_mod(&prog, dst_reg, dst_reg, 1640 BPF_CLASS(insn->code) == BPF_ALU64); 1641 1642 if (is_imm8(imm32)) 1643 /* imul dst_reg, dst_reg, imm8 */ 1644 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1645 imm32); 1646 else 1647 /* imul dst_reg, dst_reg, imm32 */ 1648 EMIT2_off32(0x69, 1649 add_2reg(0xC0, dst_reg, dst_reg), 1650 imm32); 1651 break; 1652 1653 case BPF_ALU | BPF_MUL | BPF_X: 1654 case BPF_ALU64 | BPF_MUL | BPF_X: 1655 maybe_emit_mod(&prog, src_reg, dst_reg, 1656 BPF_CLASS(insn->code) == BPF_ALU64); 1657 1658 /* imul dst_reg, src_reg */ 1659 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1660 break; 1661 1662 /* Shifts */ 1663 case BPF_ALU | BPF_LSH | BPF_K: 1664 case BPF_ALU | BPF_RSH | BPF_K: 1665 case BPF_ALU | BPF_ARSH | BPF_K: 1666 case BPF_ALU64 | BPF_LSH | BPF_K: 1667 case BPF_ALU64 | BPF_RSH | BPF_K: 1668 case BPF_ALU64 | BPF_ARSH | BPF_K: 1669 maybe_emit_1mod(&prog, dst_reg, 1670 BPF_CLASS(insn->code) == BPF_ALU64); 1671 1672 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1673 if (imm32 == 1) 1674 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1675 else 1676 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1677 break; 1678 1679 case BPF_ALU | BPF_LSH | BPF_X: 1680 case BPF_ALU | BPF_RSH | BPF_X: 1681 case BPF_ALU | BPF_ARSH | BPF_X: 1682 case BPF_ALU64 | BPF_LSH | BPF_X: 1683 case BPF_ALU64 | BPF_RSH | BPF_X: 1684 case BPF_ALU64 | BPF_ARSH | BPF_X: 1685 /* BMI2 shifts aren't better when shift count is already in rcx */ 1686 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) { 1687 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */ 1688 bool w = (BPF_CLASS(insn->code) == BPF_ALU64); 1689 u8 op; 1690 1691 switch (BPF_OP(insn->code)) { 1692 case BPF_LSH: 1693 op = 1; /* prefix 0x66 */ 1694 break; 1695 case BPF_RSH: 1696 op = 3; /* prefix 0xf2 */ 1697 break; 1698 case BPF_ARSH: 1699 op = 2; /* prefix 0xf3 */ 1700 break; 1701 } 1702 1703 emit_shiftx(&prog, dst_reg, src_reg, w, op); 1704 1705 break; 1706 } 1707 1708 if (src_reg != BPF_REG_4) { /* common case */ 1709 /* Check for bad case when dst_reg == rcx */ 1710 if (dst_reg == BPF_REG_4) { 1711 /* mov r11, dst_reg */ 1712 EMIT_mov(AUX_REG, dst_reg); 1713 dst_reg = AUX_REG; 1714 } else { 1715 EMIT1(0x51); /* push rcx */ 1716 } 1717 /* mov rcx, src_reg */ 1718 EMIT_mov(BPF_REG_4, src_reg); 1719 } 1720 1721 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1722 maybe_emit_1mod(&prog, dst_reg, 1723 BPF_CLASS(insn->code) == BPF_ALU64); 1724 1725 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1726 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1727 1728 if (src_reg != BPF_REG_4) { 1729 if (insn->dst_reg == BPF_REG_4) 1730 /* mov dst_reg, r11 */ 1731 EMIT_mov(insn->dst_reg, AUX_REG); 1732 else 1733 EMIT1(0x59); /* pop rcx */ 1734 } 1735 1736 break; 1737 1738 case BPF_ALU | BPF_END | BPF_FROM_BE: 1739 case BPF_ALU64 | BPF_END | BPF_FROM_LE: 1740 switch (imm32) { 1741 case 16: 1742 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1743 EMIT1(0x66); 1744 if (is_ereg(dst_reg)) 1745 EMIT1(0x41); 1746 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1747 1748 /* Emit 'movzwl eax, ax' */ 1749 if (is_ereg(dst_reg)) 1750 EMIT3(0x45, 0x0F, 0xB7); 1751 else 1752 EMIT2(0x0F, 0xB7); 1753 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1754 break; 1755 case 32: 1756 /* Emit 'bswap eax' to swap lower 4 bytes */ 1757 if (is_ereg(dst_reg)) 1758 EMIT2(0x41, 0x0F); 1759 else 1760 EMIT1(0x0F); 1761 EMIT1(add_1reg(0xC8, dst_reg)); 1762 break; 1763 case 64: 1764 /* Emit 'bswap rax' to swap 8 bytes */ 1765 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1766 add_1reg(0xC8, dst_reg)); 1767 break; 1768 } 1769 break; 1770 1771 case BPF_ALU | BPF_END | BPF_FROM_LE: 1772 switch (imm32) { 1773 case 16: 1774 /* 1775 * Emit 'movzwl eax, ax' to zero extend 16-bit 1776 * into 64 bit 1777 */ 1778 if (is_ereg(dst_reg)) 1779 EMIT3(0x45, 0x0F, 0xB7); 1780 else 1781 EMIT2(0x0F, 0xB7); 1782 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1783 break; 1784 case 32: 1785 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1786 if (is_ereg(dst_reg)) 1787 EMIT1(0x45); 1788 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1789 break; 1790 case 64: 1791 /* nop */ 1792 break; 1793 } 1794 break; 1795 1796 /* speculation barrier */ 1797 case BPF_ST | BPF_NOSPEC: 1798 EMIT_LFENCE(); 1799 break; 1800 1801 /* ST: *(u8*)(dst_reg + off) = imm */ 1802 case BPF_ST | BPF_MEM | BPF_B: 1803 if (is_ereg(dst_reg)) 1804 EMIT2(0x41, 0xC6); 1805 else 1806 EMIT1(0xC6); 1807 goto st; 1808 case BPF_ST | BPF_MEM | BPF_H: 1809 if (is_ereg(dst_reg)) 1810 EMIT3(0x66, 0x41, 0xC7); 1811 else 1812 EMIT2(0x66, 0xC7); 1813 goto st; 1814 case BPF_ST | BPF_MEM | BPF_W: 1815 if (is_ereg(dst_reg)) 1816 EMIT2(0x41, 0xC7); 1817 else 1818 EMIT1(0xC7); 1819 goto st; 1820 case BPF_ST | BPF_MEM | BPF_DW: 1821 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1822 1823 st: if (is_imm8(insn->off)) 1824 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1825 else 1826 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1827 1828 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1829 break; 1830 1831 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1832 case BPF_STX | BPF_MEM | BPF_B: 1833 case BPF_STX | BPF_MEM | BPF_H: 1834 case BPF_STX | BPF_MEM | BPF_W: 1835 case BPF_STX | BPF_MEM | BPF_DW: 1836 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1837 break; 1838 1839 case BPF_ST | BPF_PROBE_MEM32 | BPF_B: 1840 case BPF_ST | BPF_PROBE_MEM32 | BPF_H: 1841 case BPF_ST | BPF_PROBE_MEM32 | BPF_W: 1842 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: 1843 start_of_ldx = prog; 1844 emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm); 1845 goto populate_extable; 1846 1847 /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */ 1848 case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: 1849 case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: 1850 case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: 1851 case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: 1852 case BPF_STX | BPF_PROBE_MEM32 | BPF_B: 1853 case BPF_STX | BPF_PROBE_MEM32 | BPF_H: 1854 case BPF_STX | BPF_PROBE_MEM32 | BPF_W: 1855 case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: 1856 start_of_ldx = prog; 1857 if (BPF_CLASS(insn->code) == BPF_LDX) 1858 emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1859 else 1860 emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1861 populate_extable: 1862 { 1863 struct exception_table_entry *ex; 1864 u8 *_insn = image + proglen + (start_of_ldx - temp); 1865 s64 delta; 1866 1867 if (!bpf_prog->aux->extable) 1868 break; 1869 1870 if (excnt >= bpf_prog->aux->num_exentries) { 1871 pr_err("mem32 extable bug\n"); 1872 return -EFAULT; 1873 } 1874 ex = &bpf_prog->aux->extable[excnt++]; 1875 1876 delta = _insn - (u8 *)&ex->insn; 1877 /* switch ex to rw buffer for writes */ 1878 ex = (void *)rw_image + ((void *)ex - (void *)image); 1879 1880 ex->insn = delta; 1881 1882 ex->data = EX_TYPE_BPF; 1883 1884 ex->fixup = (prog - start_of_ldx) | 1885 ((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8); 1886 } 1887 break; 1888 1889 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1890 case BPF_LDX | BPF_MEM | BPF_B: 1891 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1892 case BPF_LDX | BPF_MEM | BPF_H: 1893 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1894 case BPF_LDX | BPF_MEM | BPF_W: 1895 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1896 case BPF_LDX | BPF_MEM | BPF_DW: 1897 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1898 /* LDXS: dst_reg = *(s8*)(src_reg + off) */ 1899 case BPF_LDX | BPF_MEMSX | BPF_B: 1900 case BPF_LDX | BPF_MEMSX | BPF_H: 1901 case BPF_LDX | BPF_MEMSX | BPF_W: 1902 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: 1903 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: 1904 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: 1905 insn_off = insn->off; 1906 1907 if (BPF_MODE(insn->code) == BPF_PROBE_MEM || 1908 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { 1909 /* Conservatively check that src_reg + insn->off is a kernel address: 1910 * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE 1911 * and 1912 * src_reg + insn->off < VSYSCALL_ADDR 1913 */ 1914 1915 u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR; 1916 u8 *end_of_jmp; 1917 1918 /* movabsq r10, VSYSCALL_ADDR */ 1919 emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32, 1920 (u32)(long)VSYSCALL_ADDR); 1921 1922 /* mov src_reg, r11 */ 1923 EMIT_mov(AUX_REG, src_reg); 1924 1925 if (insn->off) { 1926 /* add r11, insn->off */ 1927 maybe_emit_1mod(&prog, AUX_REG, true); 1928 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); 1929 } 1930 1931 /* sub r11, r10 */ 1932 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); 1933 EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); 1934 1935 /* movabsq r10, limit */ 1936 emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32, 1937 (u32)(long)limit); 1938 1939 /* cmp r10, r11 */ 1940 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); 1941 EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); 1942 1943 /* if unsigned '>', goto load */ 1944 EMIT2(X86_JA, 0); 1945 end_of_jmp = prog; 1946 1947 /* xor dst_reg, dst_reg */ 1948 emit_mov_imm32(&prog, false, dst_reg, 0); 1949 /* jmp byte_after_ldx */ 1950 EMIT2(0xEB, 0); 1951 1952 /* populate jmp_offset for JAE above to jump to start_of_ldx */ 1953 start_of_ldx = prog; 1954 end_of_jmp[-1] = start_of_ldx - end_of_jmp; 1955 } 1956 if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX || 1957 BPF_MODE(insn->code) == BPF_MEMSX) 1958 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); 1959 else 1960 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); 1961 if (BPF_MODE(insn->code) == BPF_PROBE_MEM || 1962 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { 1963 struct exception_table_entry *ex; 1964 u8 *_insn = image + proglen + (start_of_ldx - temp); 1965 s64 delta; 1966 1967 /* populate jmp_offset for JMP above */ 1968 start_of_ldx[-1] = prog - start_of_ldx; 1969 1970 if (!bpf_prog->aux->extable) 1971 break; 1972 1973 if (excnt >= bpf_prog->aux->num_exentries) { 1974 pr_err("ex gen bug\n"); 1975 return -EFAULT; 1976 } 1977 ex = &bpf_prog->aux->extable[excnt++]; 1978 1979 delta = _insn - (u8 *)&ex->insn; 1980 if (!is_simm32(delta)) { 1981 pr_err("extable->insn doesn't fit into 32-bit\n"); 1982 return -EFAULT; 1983 } 1984 /* switch ex to rw buffer for writes */ 1985 ex = (void *)rw_image + ((void *)ex - (void *)image); 1986 1987 ex->insn = delta; 1988 1989 ex->data = EX_TYPE_BPF; 1990 1991 if (dst_reg > BPF_REG_9) { 1992 pr_err("verifier error\n"); 1993 return -EFAULT; 1994 } 1995 /* 1996 * Compute size of x86 insn and its target dest x86 register. 1997 * ex_handler_bpf() will use lower 8 bits to adjust 1998 * pt_regs->ip to jump over this x86 instruction 1999 * and upper bits to figure out which pt_regs to zero out. 2000 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 2001 * of 4 bytes will be ignored and rbx will be zero inited. 2002 */ 2003 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 2004 } 2005 break; 2006 2007 case BPF_STX | BPF_ATOMIC | BPF_W: 2008 case BPF_STX | BPF_ATOMIC | BPF_DW: 2009 if (insn->imm == (BPF_AND | BPF_FETCH) || 2010 insn->imm == (BPF_OR | BPF_FETCH) || 2011 insn->imm == (BPF_XOR | BPF_FETCH)) { 2012 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 2013 u32 real_src_reg = src_reg; 2014 u32 real_dst_reg = dst_reg; 2015 u8 *branch_target; 2016 2017 /* 2018 * Can't be implemented with a single x86 insn. 2019 * Need to do a CMPXCHG loop. 2020 */ 2021 2022 /* Will need RAX as a CMPXCHG operand so save R0 */ 2023 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 2024 if (src_reg == BPF_REG_0) 2025 real_src_reg = BPF_REG_AX; 2026 if (dst_reg == BPF_REG_0) 2027 real_dst_reg = BPF_REG_AX; 2028 2029 branch_target = prog; 2030 /* Load old value */ 2031 emit_ldx(&prog, BPF_SIZE(insn->code), 2032 BPF_REG_0, real_dst_reg, insn->off); 2033 /* 2034 * Perform the (commutative) operation locally, 2035 * put the result in the AUX_REG. 2036 */ 2037 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 2038 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 2039 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 2040 add_2reg(0xC0, AUX_REG, real_src_reg)); 2041 /* Attempt to swap in new value */ 2042 err = emit_atomic(&prog, BPF_CMPXCHG, 2043 real_dst_reg, AUX_REG, 2044 insn->off, 2045 BPF_SIZE(insn->code)); 2046 if (WARN_ON(err)) 2047 return err; 2048 /* 2049 * ZF tells us whether we won the race. If it's 2050 * cleared we need to try again. 2051 */ 2052 EMIT2(X86_JNE, -(prog - branch_target) - 2); 2053 /* Return the pre-modification value */ 2054 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 2055 /* Restore R0 after clobbering RAX */ 2056 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 2057 break; 2058 } 2059 2060 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 2061 insn->off, BPF_SIZE(insn->code)); 2062 if (err) 2063 return err; 2064 break; 2065 2066 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: 2067 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: 2068 start_of_ldx = prog; 2069 err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code), 2070 dst_reg, src_reg, X86_REG_R12, insn->off); 2071 if (err) 2072 return err; 2073 goto populate_extable; 2074 2075 /* call */ 2076 case BPF_JMP | BPF_CALL: { 2077 u8 *ip = image + addrs[i - 1]; 2078 2079 func = (u8 *) __bpf_call_base + imm32; 2080 if (tail_call_reachable) { 2081 LOAD_TAIL_CALL_CNT_PTR(bpf_prog->aux->stack_depth); 2082 ip += 7; 2083 } 2084 if (!imm32) 2085 return -EINVAL; 2086 ip += x86_call_depth_emit_accounting(&prog, func, ip); 2087 if (emit_call(&prog, func, ip)) 2088 return -EINVAL; 2089 break; 2090 } 2091 2092 case BPF_JMP | BPF_TAIL_CALL: 2093 if (imm32) 2094 emit_bpf_tail_call_direct(bpf_prog, 2095 &bpf_prog->aux->poke_tab[imm32 - 1], 2096 &prog, image + addrs[i - 1], 2097 callee_regs_used, 2098 bpf_prog->aux->stack_depth, 2099 ctx); 2100 else 2101 emit_bpf_tail_call_indirect(bpf_prog, 2102 &prog, 2103 callee_regs_used, 2104 bpf_prog->aux->stack_depth, 2105 image + addrs[i - 1], 2106 ctx); 2107 break; 2108 2109 /* cond jump */ 2110 case BPF_JMP | BPF_JEQ | BPF_X: 2111 case BPF_JMP | BPF_JNE | BPF_X: 2112 case BPF_JMP | BPF_JGT | BPF_X: 2113 case BPF_JMP | BPF_JLT | BPF_X: 2114 case BPF_JMP | BPF_JGE | BPF_X: 2115 case BPF_JMP | BPF_JLE | BPF_X: 2116 case BPF_JMP | BPF_JSGT | BPF_X: 2117 case BPF_JMP | BPF_JSLT | BPF_X: 2118 case BPF_JMP | BPF_JSGE | BPF_X: 2119 case BPF_JMP | BPF_JSLE | BPF_X: 2120 case BPF_JMP32 | BPF_JEQ | BPF_X: 2121 case BPF_JMP32 | BPF_JNE | BPF_X: 2122 case BPF_JMP32 | BPF_JGT | BPF_X: 2123 case BPF_JMP32 | BPF_JLT | BPF_X: 2124 case BPF_JMP32 | BPF_JGE | BPF_X: 2125 case BPF_JMP32 | BPF_JLE | BPF_X: 2126 case BPF_JMP32 | BPF_JSGT | BPF_X: 2127 case BPF_JMP32 | BPF_JSLT | BPF_X: 2128 case BPF_JMP32 | BPF_JSGE | BPF_X: 2129 case BPF_JMP32 | BPF_JSLE | BPF_X: 2130 /* cmp dst_reg, src_reg */ 2131 maybe_emit_mod(&prog, dst_reg, src_reg, 2132 BPF_CLASS(insn->code) == BPF_JMP); 2133 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 2134 goto emit_cond_jmp; 2135 2136 case BPF_JMP | BPF_JSET | BPF_X: 2137 case BPF_JMP32 | BPF_JSET | BPF_X: 2138 /* test dst_reg, src_reg */ 2139 maybe_emit_mod(&prog, dst_reg, src_reg, 2140 BPF_CLASS(insn->code) == BPF_JMP); 2141 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 2142 goto emit_cond_jmp; 2143 2144 case BPF_JMP | BPF_JSET | BPF_K: 2145 case BPF_JMP32 | BPF_JSET | BPF_K: 2146 /* test dst_reg, imm32 */ 2147 maybe_emit_1mod(&prog, dst_reg, 2148 BPF_CLASS(insn->code) == BPF_JMP); 2149 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 2150 goto emit_cond_jmp; 2151 2152 case BPF_JMP | BPF_JEQ | BPF_K: 2153 case BPF_JMP | BPF_JNE | BPF_K: 2154 case BPF_JMP | BPF_JGT | BPF_K: 2155 case BPF_JMP | BPF_JLT | BPF_K: 2156 case BPF_JMP | BPF_JGE | BPF_K: 2157 case BPF_JMP | BPF_JLE | BPF_K: 2158 case BPF_JMP | BPF_JSGT | BPF_K: 2159 case BPF_JMP | BPF_JSLT | BPF_K: 2160 case BPF_JMP | BPF_JSGE | BPF_K: 2161 case BPF_JMP | BPF_JSLE | BPF_K: 2162 case BPF_JMP32 | BPF_JEQ | BPF_K: 2163 case BPF_JMP32 | BPF_JNE | BPF_K: 2164 case BPF_JMP32 | BPF_JGT | BPF_K: 2165 case BPF_JMP32 | BPF_JLT | BPF_K: 2166 case BPF_JMP32 | BPF_JGE | BPF_K: 2167 case BPF_JMP32 | BPF_JLE | BPF_K: 2168 case BPF_JMP32 | BPF_JSGT | BPF_K: 2169 case BPF_JMP32 | BPF_JSLT | BPF_K: 2170 case BPF_JMP32 | BPF_JSGE | BPF_K: 2171 case BPF_JMP32 | BPF_JSLE | BPF_K: 2172 /* test dst_reg, dst_reg to save one extra byte */ 2173 if (imm32 == 0) { 2174 maybe_emit_mod(&prog, dst_reg, dst_reg, 2175 BPF_CLASS(insn->code) == BPF_JMP); 2176 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 2177 goto emit_cond_jmp; 2178 } 2179 2180 /* cmp dst_reg, imm8/32 */ 2181 maybe_emit_1mod(&prog, dst_reg, 2182 BPF_CLASS(insn->code) == BPF_JMP); 2183 2184 if (is_imm8(imm32)) 2185 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 2186 else 2187 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 2188 2189 emit_cond_jmp: /* Convert BPF opcode to x86 */ 2190 switch (BPF_OP(insn->code)) { 2191 case BPF_JEQ: 2192 jmp_cond = X86_JE; 2193 break; 2194 case BPF_JSET: 2195 case BPF_JNE: 2196 jmp_cond = X86_JNE; 2197 break; 2198 case BPF_JGT: 2199 /* GT is unsigned '>', JA in x86 */ 2200 jmp_cond = X86_JA; 2201 break; 2202 case BPF_JLT: 2203 /* LT is unsigned '<', JB in x86 */ 2204 jmp_cond = X86_JB; 2205 break; 2206 case BPF_JGE: 2207 /* GE is unsigned '>=', JAE in x86 */ 2208 jmp_cond = X86_JAE; 2209 break; 2210 case BPF_JLE: 2211 /* LE is unsigned '<=', JBE in x86 */ 2212 jmp_cond = X86_JBE; 2213 break; 2214 case BPF_JSGT: 2215 /* Signed '>', GT in x86 */ 2216 jmp_cond = X86_JG; 2217 break; 2218 case BPF_JSLT: 2219 /* Signed '<', LT in x86 */ 2220 jmp_cond = X86_JL; 2221 break; 2222 case BPF_JSGE: 2223 /* Signed '>=', GE in x86 */ 2224 jmp_cond = X86_JGE; 2225 break; 2226 case BPF_JSLE: 2227 /* Signed '<=', LE in x86 */ 2228 jmp_cond = X86_JLE; 2229 break; 2230 default: /* to silence GCC warning */ 2231 return -EFAULT; 2232 } 2233 jmp_offset = addrs[i + insn->off] - addrs[i]; 2234 if (is_imm8(jmp_offset)) { 2235 if (jmp_padding) { 2236 /* To keep the jmp_offset valid, the extra bytes are 2237 * padded before the jump insn, so we subtract the 2238 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 2239 * 2240 * If the previous pass already emits an imm8 2241 * jmp_cond, then this BPF insn won't shrink, so 2242 * "nops" is 0. 2243 * 2244 * On the other hand, if the previous pass emits an 2245 * imm32 jmp_cond, the extra 4 bytes(*) is padded to 2246 * keep the image from shrinking further. 2247 * 2248 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 2249 * is 2 bytes, so the size difference is 4 bytes. 2250 */ 2251 nops = INSN_SZ_DIFF - 2; 2252 if (nops != 0 && nops != 4) { 2253 pr_err("unexpected jmp_cond padding: %d bytes\n", 2254 nops); 2255 return -EFAULT; 2256 } 2257 emit_nops(&prog, nops); 2258 } 2259 EMIT2(jmp_cond, jmp_offset); 2260 } else if (is_simm32(jmp_offset)) { 2261 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 2262 } else { 2263 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 2264 return -EFAULT; 2265 } 2266 2267 break; 2268 2269 case BPF_JMP | BPF_JA: 2270 case BPF_JMP32 | BPF_JA: 2271 if (BPF_CLASS(insn->code) == BPF_JMP) { 2272 if (insn->off == -1) 2273 /* -1 jmp instructions will always jump 2274 * backwards two bytes. Explicitly handling 2275 * this case avoids wasting too many passes 2276 * when there are long sequences of replaced 2277 * dead code. 2278 */ 2279 jmp_offset = -2; 2280 else 2281 jmp_offset = addrs[i + insn->off] - addrs[i]; 2282 } else { 2283 if (insn->imm == -1) 2284 jmp_offset = -2; 2285 else 2286 jmp_offset = addrs[i + insn->imm] - addrs[i]; 2287 } 2288 2289 if (!jmp_offset) { 2290 /* 2291 * If jmp_padding is enabled, the extra nops will 2292 * be inserted. Otherwise, optimize out nop jumps. 2293 */ 2294 if (jmp_padding) { 2295 /* There are 3 possible conditions. 2296 * (1) This BPF_JA is already optimized out in 2297 * the previous run, so there is no need 2298 * to pad any extra byte (0 byte). 2299 * (2) The previous pass emits an imm8 jmp, 2300 * so we pad 2 bytes to match the previous 2301 * insn size. 2302 * (3) Similarly, the previous pass emits an 2303 * imm32 jmp, and 5 bytes is padded. 2304 */ 2305 nops = INSN_SZ_DIFF; 2306 if (nops != 0 && nops != 2 && nops != 5) { 2307 pr_err("unexpected nop jump padding: %d bytes\n", 2308 nops); 2309 return -EFAULT; 2310 } 2311 emit_nops(&prog, nops); 2312 } 2313 break; 2314 } 2315 emit_jmp: 2316 if (is_imm8(jmp_offset)) { 2317 if (jmp_padding) { 2318 /* To avoid breaking jmp_offset, the extra bytes 2319 * are padded before the actual jmp insn, so 2320 * 2 bytes is subtracted from INSN_SZ_DIFF. 2321 * 2322 * If the previous pass already emits an imm8 2323 * jmp, there is nothing to pad (0 byte). 2324 * 2325 * If it emits an imm32 jmp (5 bytes) previously 2326 * and now an imm8 jmp (2 bytes), then we pad 2327 * (5 - 2 = 3) bytes to stop the image from 2328 * shrinking further. 2329 */ 2330 nops = INSN_SZ_DIFF - 2; 2331 if (nops != 0 && nops != 3) { 2332 pr_err("unexpected jump padding: %d bytes\n", 2333 nops); 2334 return -EFAULT; 2335 } 2336 emit_nops(&prog, INSN_SZ_DIFF - 2); 2337 } 2338 EMIT2(0xEB, jmp_offset); 2339 } else if (is_simm32(jmp_offset)) { 2340 EMIT1_off32(0xE9, jmp_offset); 2341 } else { 2342 pr_err("jmp gen bug %llx\n", jmp_offset); 2343 return -EFAULT; 2344 } 2345 break; 2346 2347 case BPF_JMP | BPF_EXIT: 2348 if (seen_exit) { 2349 jmp_offset = ctx->cleanup_addr - addrs[i]; 2350 goto emit_jmp; 2351 } 2352 seen_exit = true; 2353 /* Update cleanup_addr */ 2354 ctx->cleanup_addr = proglen; 2355 if (bpf_prog->aux->exception_boundary) { 2356 pop_callee_regs(&prog, all_callee_regs_used); 2357 pop_r12(&prog); 2358 } else { 2359 pop_callee_regs(&prog, callee_regs_used); 2360 if (arena_vm_start) 2361 pop_r12(&prog); 2362 } 2363 EMIT1(0xC9); /* leave */ 2364 emit_return(&prog, image + addrs[i - 1] + (prog - temp)); 2365 break; 2366 2367 default: 2368 /* 2369 * By design x86-64 JIT should support all BPF instructions. 2370 * This error will be seen if new instruction was added 2371 * to the interpreter, but not to the JIT, or if there is 2372 * junk in bpf_prog. 2373 */ 2374 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 2375 return -EINVAL; 2376 } 2377 2378 ilen = prog - temp; 2379 if (ilen > BPF_MAX_INSN_SIZE) { 2380 pr_err("bpf_jit: fatal insn size error\n"); 2381 return -EFAULT; 2382 } 2383 2384 if (image) { 2385 /* 2386 * When populating the image, assert that: 2387 * 2388 * i) We do not write beyond the allocated space, and 2389 * ii) addrs[i] did not change from the prior run, in order 2390 * to validate assumptions made for computing branch 2391 * displacements. 2392 */ 2393 if (unlikely(proglen + ilen > oldproglen || 2394 proglen + ilen != addrs[i])) { 2395 pr_err("bpf_jit: fatal error\n"); 2396 return -EFAULT; 2397 } 2398 memcpy(rw_image + proglen, temp, ilen); 2399 } 2400 proglen += ilen; 2401 addrs[i] = proglen; 2402 prog = temp; 2403 } 2404 2405 if (image && excnt != bpf_prog->aux->num_exentries) { 2406 pr_err("extable is not populated\n"); 2407 return -EFAULT; 2408 } 2409 return proglen; 2410 } 2411 2412 static void clean_stack_garbage(const struct btf_func_model *m, 2413 u8 **pprog, int nr_stack_slots, 2414 int stack_size) 2415 { 2416 int arg_size, off; 2417 u8 *prog; 2418 2419 /* Generally speaking, the compiler will pass the arguments 2420 * on-stack with "push" instruction, which will take 8-byte 2421 * on the stack. In this case, there won't be garbage values 2422 * while we copy the arguments from origin stack frame to current 2423 * in BPF_DW. 2424 * 2425 * However, sometimes the compiler will only allocate 4-byte on 2426 * the stack for the arguments. For now, this case will only 2427 * happen if there is only one argument on-stack and its size 2428 * not more than 4 byte. In this case, there will be garbage 2429 * values on the upper 4-byte where we store the argument on 2430 * current stack frame. 2431 * 2432 * arguments on origin stack: 2433 * 2434 * stack_arg_1(4-byte) xxx(4-byte) 2435 * 2436 * what we copy: 2437 * 2438 * stack_arg_1(8-byte): stack_arg_1(origin) xxx 2439 * 2440 * and the xxx is the garbage values which we should clean here. 2441 */ 2442 if (nr_stack_slots != 1) 2443 return; 2444 2445 /* the size of the last argument */ 2446 arg_size = m->arg_size[m->nr_args - 1]; 2447 if (arg_size <= 4) { 2448 off = -(stack_size - 4); 2449 prog = *pprog; 2450 /* mov DWORD PTR [rbp + off], 0 */ 2451 if (!is_imm8(off)) 2452 EMIT2_off32(0xC7, 0x85, off); 2453 else 2454 EMIT3(0xC7, 0x45, off); 2455 EMIT(0, 4); 2456 *pprog = prog; 2457 } 2458 } 2459 2460 /* get the count of the regs that are used to pass arguments */ 2461 static int get_nr_used_regs(const struct btf_func_model *m) 2462 { 2463 int i, arg_regs, nr_used_regs = 0; 2464 2465 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2466 arg_regs = (m->arg_size[i] + 7) / 8; 2467 if (nr_used_regs + arg_regs <= 6) 2468 nr_used_regs += arg_regs; 2469 2470 if (nr_used_regs >= 6) 2471 break; 2472 } 2473 2474 return nr_used_regs; 2475 } 2476 2477 static void save_args(const struct btf_func_model *m, u8 **prog, 2478 int stack_size, bool for_call_origin) 2479 { 2480 int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0; 2481 int i, j; 2482 2483 /* Store function arguments to stack. 2484 * For a function that accepts two pointers the sequence will be: 2485 * mov QWORD PTR [rbp-0x10],rdi 2486 * mov QWORD PTR [rbp-0x8],rsi 2487 */ 2488 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2489 arg_regs = (m->arg_size[i] + 7) / 8; 2490 2491 /* According to the research of Yonghong, struct members 2492 * should be all in register or all on the stack. 2493 * Meanwhile, the compiler will pass the argument on regs 2494 * if the remaining regs can hold the argument. 2495 * 2496 * Disorder of the args can happen. For example: 2497 * 2498 * struct foo_struct { 2499 * long a; 2500 * int b; 2501 * }; 2502 * int foo(char, char, char, char, char, struct foo_struct, 2503 * char); 2504 * 2505 * the arg1-5,arg7 will be passed by regs, and arg6 will 2506 * by stack. 2507 */ 2508 if (nr_regs + arg_regs > 6) { 2509 /* copy function arguments from origin stack frame 2510 * into current stack frame. 2511 * 2512 * The starting address of the arguments on-stack 2513 * is: 2514 * rbp + 8(push rbp) + 2515 * 8(return addr of origin call) + 2516 * 8(return addr of the caller) 2517 * which means: rbp + 24 2518 */ 2519 for (j = 0; j < arg_regs; j++) { 2520 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 2521 nr_stack_slots * 8 + 0x18); 2522 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0, 2523 -stack_size); 2524 2525 if (!nr_stack_slots) 2526 first_off = stack_size; 2527 stack_size -= 8; 2528 nr_stack_slots++; 2529 } 2530 } else { 2531 /* Only copy the arguments on-stack to current 2532 * 'stack_size' and ignore the regs, used to 2533 * prepare the arguments on-stack for origin call. 2534 */ 2535 if (for_call_origin) { 2536 nr_regs += arg_regs; 2537 continue; 2538 } 2539 2540 /* copy the arguments from regs into stack */ 2541 for (j = 0; j < arg_regs; j++) { 2542 emit_stx(prog, BPF_DW, BPF_REG_FP, 2543 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, 2544 -stack_size); 2545 stack_size -= 8; 2546 nr_regs++; 2547 } 2548 } 2549 } 2550 2551 clean_stack_garbage(m, prog, nr_stack_slots, first_off); 2552 } 2553 2554 static void restore_regs(const struct btf_func_model *m, u8 **prog, 2555 int stack_size) 2556 { 2557 int i, j, arg_regs, nr_regs = 0; 2558 2559 /* Restore function arguments from stack. 2560 * For a function that accepts two pointers the sequence will be: 2561 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 2562 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 2563 * 2564 * The logic here is similar to what we do in save_args() 2565 */ 2566 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2567 arg_regs = (m->arg_size[i] + 7) / 8; 2568 if (nr_regs + arg_regs <= 6) { 2569 for (j = 0; j < arg_regs; j++) { 2570 emit_ldx(prog, BPF_DW, 2571 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, 2572 BPF_REG_FP, 2573 -stack_size); 2574 stack_size -= 8; 2575 nr_regs++; 2576 } 2577 } else { 2578 stack_size -= 8 * arg_regs; 2579 } 2580 2581 if (nr_regs >= 6) 2582 break; 2583 } 2584 } 2585 2586 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 2587 struct bpf_tramp_link *l, int stack_size, 2588 int run_ctx_off, bool save_ret, 2589 void *image, void *rw_image) 2590 { 2591 u8 *prog = *pprog; 2592 u8 *jmp_insn; 2593 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 2594 struct bpf_prog *p = l->link.prog; 2595 u64 cookie = l->cookie; 2596 2597 /* mov rdi, cookie */ 2598 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); 2599 2600 /* Prepare struct bpf_tramp_run_ctx. 2601 * 2602 * bpf_tramp_run_ctx is already preserved by 2603 * arch_prepare_bpf_trampoline(). 2604 * 2605 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi 2606 */ 2607 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); 2608 2609 /* arg1: mov rdi, progs[i] */ 2610 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 2611 /* arg2: lea rsi, [rbp - ctx_cookie_off] */ 2612 if (!is_imm8(-run_ctx_off)) 2613 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off); 2614 else 2615 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); 2616 2617 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image))) 2618 return -EINVAL; 2619 /* remember prog start time returned by __bpf_prog_enter */ 2620 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 2621 2622 /* if (__bpf_prog_enter*(prog) == 0) 2623 * goto skip_exec_of_prog; 2624 */ 2625 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 2626 /* emit 2 nops that will be replaced with JE insn */ 2627 jmp_insn = prog; 2628 emit_nops(&prog, 2); 2629 2630 /* arg1: lea rdi, [rbp - stack_size] */ 2631 if (!is_imm8(-stack_size)) 2632 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size); 2633 else 2634 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 2635 /* arg2: progs[i]->insnsi for interpreter */ 2636 if (!p->jited) 2637 emit_mov_imm64(&prog, BPF_REG_2, 2638 (long) p->insnsi >> 32, 2639 (u32) (long) p->insnsi); 2640 /* call JITed bpf program or interpreter */ 2641 if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image))) 2642 return -EINVAL; 2643 2644 /* 2645 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 2646 * of the previous call which is then passed on the stack to 2647 * the next BPF program. 2648 * 2649 * BPF_TRAMP_FENTRY trampoline may need to return the return 2650 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 2651 */ 2652 if (save_ret) 2653 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2654 2655 /* replace 2 nops with JE insn, since jmp target is known */ 2656 jmp_insn[0] = X86_JE; 2657 jmp_insn[1] = prog - jmp_insn - 2; 2658 2659 /* arg1: mov rdi, progs[i] */ 2660 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 2661 /* arg2: mov rsi, rbx <- start time in nsec */ 2662 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 2663 /* arg3: lea rdx, [rbp - run_ctx_off] */ 2664 if (!is_imm8(-run_ctx_off)) 2665 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off); 2666 else 2667 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); 2668 if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image))) 2669 return -EINVAL; 2670 2671 *pprog = prog; 2672 return 0; 2673 } 2674 2675 static void emit_align(u8 **pprog, u32 align) 2676 { 2677 u8 *target, *prog = *pprog; 2678 2679 target = PTR_ALIGN(prog, align); 2680 if (target != prog) 2681 emit_nops(&prog, target - prog); 2682 2683 *pprog = prog; 2684 } 2685 2686 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 2687 { 2688 u8 *prog = *pprog; 2689 s64 offset; 2690 2691 offset = func - (ip + 2 + 4); 2692 if (!is_simm32(offset)) { 2693 pr_err("Target %p is out of range\n", func); 2694 return -EINVAL; 2695 } 2696 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 2697 *pprog = prog; 2698 return 0; 2699 } 2700 2701 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 2702 struct bpf_tramp_links *tl, int stack_size, 2703 int run_ctx_off, bool save_ret, 2704 void *image, void *rw_image) 2705 { 2706 int i; 2707 u8 *prog = *pprog; 2708 2709 for (i = 0; i < tl->nr_links; i++) { 2710 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, 2711 run_ctx_off, save_ret, image, rw_image)) 2712 return -EINVAL; 2713 } 2714 *pprog = prog; 2715 return 0; 2716 } 2717 2718 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 2719 struct bpf_tramp_links *tl, int stack_size, 2720 int run_ctx_off, u8 **branches, 2721 void *image, void *rw_image) 2722 { 2723 u8 *prog = *pprog; 2724 int i; 2725 2726 /* The first fmod_ret program will receive a garbage return value. 2727 * Set this to 0 to avoid confusing the program. 2728 */ 2729 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 2730 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2731 for (i = 0; i < tl->nr_links; i++) { 2732 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true, 2733 image, rw_image)) 2734 return -EINVAL; 2735 2736 /* mod_ret prog stored return value into [rbp - 8]. Emit: 2737 * if (*(u64 *)(rbp - 8) != 0) 2738 * goto do_fexit; 2739 */ 2740 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 2741 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 2742 2743 /* Save the location of the branch and Generate 6 nops 2744 * (4 bytes for an offset and 2 bytes for the jump) These nops 2745 * are replaced with a conditional jump once do_fexit (i.e. the 2746 * start of the fexit invocation) is finalized. 2747 */ 2748 branches[i] = prog; 2749 emit_nops(&prog, 4 + 2); 2750 } 2751 2752 *pprog = prog; 2753 return 0; 2754 } 2755 2756 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ 2757 #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \ 2758 __LOAD_TCC_PTR(-round_up(stack, 8) - 8) 2759 2760 /* Example: 2761 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 2762 * its 'struct btf_func_model' will be nr_args=2 2763 * The assembly code when eth_type_trans is executing after trampoline: 2764 * 2765 * push rbp 2766 * mov rbp, rsp 2767 * sub rsp, 16 // space for skb and dev 2768 * push rbx // temp regs to pass start time 2769 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 2770 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 2771 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2772 * mov rbx, rax // remember start time in bpf stats are enabled 2773 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 2774 * call addr_of_jited_FENTRY_prog 2775 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2776 * mov rsi, rbx // prog start time 2777 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2778 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 2779 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 2780 * pop rbx 2781 * leave 2782 * ret 2783 * 2784 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 2785 * replaced with 'call generated_bpf_trampoline'. When it returns 2786 * eth_type_trans will continue executing with original skb and dev pointers. 2787 * 2788 * The assembly code when eth_type_trans is called from trampoline: 2789 * 2790 * push rbp 2791 * mov rbp, rsp 2792 * sub rsp, 24 // space for skb, dev, return value 2793 * push rbx // temp regs to pass start time 2794 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 2795 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 2796 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2797 * mov rbx, rax // remember start time if bpf stats are enabled 2798 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2799 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 2800 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2801 * mov rsi, rbx // prog start time 2802 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2803 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 2804 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 2805 * call eth_type_trans+5 // execute body of eth_type_trans 2806 * mov qword ptr [rbp - 8], rax // save return value 2807 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2808 * mov rbx, rax // remember start time in bpf stats are enabled 2809 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2810 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 2811 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2812 * mov rsi, rbx // prog start time 2813 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2814 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 2815 * pop rbx 2816 * leave 2817 * add rsp, 8 // skip eth_type_trans's frame 2818 * ret // return to its caller 2819 */ 2820 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image, 2821 void *rw_image_end, void *image, 2822 const struct btf_func_model *m, u32 flags, 2823 struct bpf_tramp_links *tlinks, 2824 void *func_addr) 2825 { 2826 int i, ret, nr_regs = m->nr_args, stack_size = 0; 2827 int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off; 2828 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2829 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2830 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2831 void *orig_call = func_addr; 2832 u8 **branches = NULL; 2833 u8 *prog; 2834 bool save_ret; 2835 2836 /* 2837 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is 2838 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG 2839 * because @func_addr. 2840 */ 2841 WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) && 2842 (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET))); 2843 2844 /* extra registers for struct arguments */ 2845 for (i = 0; i < m->nr_args; i++) { 2846 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) 2847 nr_regs += (m->arg_size[i] + 7) / 8 - 1; 2848 } 2849 2850 /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 2851 * are passed through regs, the remains are through stack. 2852 */ 2853 if (nr_regs > MAX_BPF_FUNC_ARGS) 2854 return -ENOTSUPP; 2855 2856 /* Generated trampoline stack layout: 2857 * 2858 * RBP + 8 [ return address ] 2859 * RBP + 0 [ RBP ] 2860 * 2861 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 2862 * BPF_TRAMP_F_RET_FENTRY_RET flags 2863 * 2864 * [ reg_argN ] always 2865 * [ ... ] 2866 * RBP - regs_off [ reg_arg1 ] program's ctx pointer 2867 * 2868 * RBP - nregs_off [ regs count ] always 2869 * 2870 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2871 * 2872 * RBP - rbx_off [ rbx value ] always 2873 * 2874 * RBP - run_ctx_off [ bpf_tramp_run_ctx ] 2875 * 2876 * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG 2877 * [ ... ] 2878 * [ stack_arg2 ] 2879 * RBP - arg_stack_off [ stack_arg1 ] 2880 * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX 2881 */ 2882 2883 /* room for return value of orig_call or fentry prog */ 2884 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2885 if (save_ret) 2886 stack_size += 8; 2887 2888 stack_size += nr_regs * 8; 2889 regs_off = stack_size; 2890 2891 /* regs count */ 2892 stack_size += 8; 2893 nregs_off = stack_size; 2894 2895 if (flags & BPF_TRAMP_F_IP_ARG) 2896 stack_size += 8; /* room for IP address argument */ 2897 2898 ip_off = stack_size; 2899 2900 stack_size += 8; 2901 rbx_off = stack_size; 2902 2903 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; 2904 run_ctx_off = stack_size; 2905 2906 if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) { 2907 /* the space that used to pass arguments on-stack */ 2908 stack_size += (nr_regs - get_nr_used_regs(m)) * 8; 2909 /* make sure the stack pointer is 16-byte aligned if we 2910 * need pass arguments on stack, which means 2911 * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)] 2912 * should be 16-byte aligned. Following code depend on 2913 * that stack_size is already 8-byte aligned. 2914 */ 2915 stack_size += (stack_size % 16) ? 0 : 8; 2916 } 2917 2918 arg_stack_off = stack_size; 2919 2920 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 2921 /* skip patched call instruction and point orig_call to actual 2922 * body of the kernel function. 2923 */ 2924 if (is_endbr(*(u32 *)orig_call)) 2925 orig_call += ENDBR_INSN_SIZE; 2926 orig_call += X86_PATCH_SIZE; 2927 } 2928 2929 prog = rw_image; 2930 2931 if (flags & BPF_TRAMP_F_INDIRECT) { 2932 /* 2933 * Indirect call for bpf_struct_ops 2934 */ 2935 emit_cfi(&prog, cfi_get_func_hash(func_addr)); 2936 } else { 2937 /* 2938 * Direct-call fentry stub, as such it needs accounting for the 2939 * __fentry__ call. 2940 */ 2941 x86_call_depth_emit_accounting(&prog, NULL, image); 2942 } 2943 EMIT1(0x55); /* push rbp */ 2944 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2945 if (!is_imm8(stack_size)) { 2946 /* sub rsp, stack_size */ 2947 EMIT3_off32(0x48, 0x81, 0xEC, stack_size); 2948 } else { 2949 /* sub rsp, stack_size */ 2950 EMIT4(0x48, 0x83, 0xEC, stack_size); 2951 } 2952 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) 2953 EMIT1(0x50); /* push rax */ 2954 /* mov QWORD PTR [rbp - rbx_off], rbx */ 2955 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off); 2956 2957 /* Store number of argument registers of the traced function: 2958 * mov rax, nr_regs 2959 * mov QWORD PTR [rbp - nregs_off], rax 2960 */ 2961 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs); 2962 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off); 2963 2964 if (flags & BPF_TRAMP_F_IP_ARG) { 2965 /* Store IP address of the traced function: 2966 * movabsq rax, func_addr 2967 * mov QWORD PTR [rbp - ip_off], rax 2968 */ 2969 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr); 2970 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 2971 } 2972 2973 save_args(m, &prog, regs_off, false); 2974 2975 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2976 /* arg1: mov rdi, im */ 2977 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2978 if (emit_rsb_call(&prog, __bpf_tramp_enter, 2979 image + (prog - (u8 *)rw_image))) { 2980 ret = -EINVAL; 2981 goto cleanup; 2982 } 2983 } 2984 2985 if (fentry->nr_links) { 2986 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, 2987 flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image)) 2988 return -EINVAL; 2989 } 2990 2991 if (fmod_ret->nr_links) { 2992 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), 2993 GFP_KERNEL); 2994 if (!branches) 2995 return -ENOMEM; 2996 2997 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 2998 run_ctx_off, branches, image, rw_image)) { 2999 ret = -EINVAL; 3000 goto cleanup; 3001 } 3002 } 3003 3004 if (flags & BPF_TRAMP_F_CALL_ORIG) { 3005 restore_regs(m, &prog, regs_off); 3006 save_args(m, &prog, arg_stack_off, true); 3007 3008 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { 3009 /* Before calling the original function, load the 3010 * tail_call_cnt_ptr from stack to rax. 3011 */ 3012 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); 3013 } 3014 3015 if (flags & BPF_TRAMP_F_ORIG_STACK) { 3016 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); 3017 EMIT2(0xff, 0xd3); /* call *rbx */ 3018 } else { 3019 /* call original function */ 3020 if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) { 3021 ret = -EINVAL; 3022 goto cleanup; 3023 } 3024 } 3025 /* remember return value in a stack for bpf prog to access */ 3026 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 3027 im->ip_after_call = image + (prog - (u8 *)rw_image); 3028 emit_nops(&prog, X86_PATCH_SIZE); 3029 } 3030 3031 if (fmod_ret->nr_links) { 3032 /* From Intel 64 and IA-32 Architectures Optimization 3033 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 3034 * Coding Rule 11: All branch targets should be 16-byte 3035 * aligned. 3036 */ 3037 emit_align(&prog, 16); 3038 /* Update the branches saved in invoke_bpf_mod_ret with the 3039 * aligned address of do_fexit. 3040 */ 3041 for (i = 0; i < fmod_ret->nr_links; i++) { 3042 emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image), 3043 image + (branches[i] - (u8 *)rw_image), X86_JNE); 3044 } 3045 } 3046 3047 if (fexit->nr_links) { 3048 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, 3049 false, image, rw_image)) { 3050 ret = -EINVAL; 3051 goto cleanup; 3052 } 3053 } 3054 3055 if (flags & BPF_TRAMP_F_RESTORE_REGS) 3056 restore_regs(m, &prog, regs_off); 3057 3058 /* This needs to be done regardless. If there were fmod_ret programs, 3059 * the return value is only updated on the stack and still needs to be 3060 * restored to R0. 3061 */ 3062 if (flags & BPF_TRAMP_F_CALL_ORIG) { 3063 im->ip_epilogue = image + (prog - (u8 *)rw_image); 3064 /* arg1: mov rdi, im */ 3065 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 3066 if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) { 3067 ret = -EINVAL; 3068 goto cleanup; 3069 } 3070 } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { 3071 /* Before running the original function, load the 3072 * tail_call_cnt_ptr from stack to rax. 3073 */ 3074 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); 3075 } 3076 3077 /* restore return value of orig_call or fentry prog back into RAX */ 3078 if (save_ret) 3079 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 3080 3081 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); 3082 EMIT1(0xC9); /* leave */ 3083 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 3084 /* skip our return address and return to parent */ 3085 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 3086 } 3087 emit_return(&prog, image + (prog - (u8 *)rw_image)); 3088 /* Make sure the trampoline generation logic doesn't overflow */ 3089 if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) { 3090 ret = -EFAULT; 3091 goto cleanup; 3092 } 3093 ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY; 3094 3095 cleanup: 3096 kfree(branches); 3097 return ret; 3098 } 3099 3100 void *arch_alloc_bpf_trampoline(unsigned int size) 3101 { 3102 return bpf_prog_pack_alloc(size, jit_fill_hole); 3103 } 3104 3105 void arch_free_bpf_trampoline(void *image, unsigned int size) 3106 { 3107 bpf_prog_pack_free(image, size); 3108 } 3109 3110 int arch_protect_bpf_trampoline(void *image, unsigned int size) 3111 { 3112 return 0; 3113 } 3114 3115 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 3116 const struct btf_func_model *m, u32 flags, 3117 struct bpf_tramp_links *tlinks, 3118 void *func_addr) 3119 { 3120 void *rw_image, *tmp; 3121 int ret; 3122 u32 size = image_end - image; 3123 3124 /* rw_image doesn't need to be in module memory range, so we can 3125 * use kvmalloc. 3126 */ 3127 rw_image = kvmalloc(size, GFP_KERNEL); 3128 if (!rw_image) 3129 return -ENOMEM; 3130 3131 ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m, 3132 flags, tlinks, func_addr); 3133 if (ret < 0) 3134 goto out; 3135 3136 tmp = bpf_arch_text_copy(image, rw_image, size); 3137 if (IS_ERR(tmp)) 3138 ret = PTR_ERR(tmp); 3139 out: 3140 kvfree(rw_image); 3141 return ret; 3142 } 3143 3144 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, 3145 struct bpf_tramp_links *tlinks, void *func_addr) 3146 { 3147 struct bpf_tramp_image im; 3148 void *image; 3149 int ret; 3150 3151 /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline(). 3152 * This will NOT cause fragmentation in direct map, as we do not 3153 * call set_memory_*() on this buffer. 3154 * 3155 * We cannot use kvmalloc here, because we need image to be in 3156 * module memory range. 3157 */ 3158 image = bpf_jit_alloc_exec(PAGE_SIZE); 3159 if (!image) 3160 return -ENOMEM; 3161 3162 ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image, 3163 m, flags, tlinks, func_addr); 3164 bpf_jit_free_exec(image); 3165 return ret; 3166 } 3167 3168 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf) 3169 { 3170 u8 *jg_reloc, *prog = *pprog; 3171 int pivot, err, jg_bytes = 1; 3172 s64 jg_offset; 3173 3174 if (a == b) { 3175 /* Leaf node of recursion, i.e. not a range of indices 3176 * anymore. 3177 */ 3178 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 3179 if (!is_simm32(progs[a])) 3180 return -1; 3181 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 3182 progs[a]); 3183 err = emit_cond_near_jump(&prog, /* je func */ 3184 (void *)progs[a], image + (prog - buf), 3185 X86_JE); 3186 if (err) 3187 return err; 3188 3189 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf)); 3190 3191 *pprog = prog; 3192 return 0; 3193 } 3194 3195 /* Not a leaf node, so we pivot, and recursively descend into 3196 * the lower and upper ranges. 3197 */ 3198 pivot = (b - a) / 2; 3199 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 3200 if (!is_simm32(progs[a + pivot])) 3201 return -1; 3202 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 3203 3204 if (pivot > 2) { /* jg upper_part */ 3205 /* Require near jump. */ 3206 jg_bytes = 4; 3207 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 3208 } else { 3209 EMIT2(X86_JG, 0); 3210 } 3211 jg_reloc = prog; 3212 3213 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 3214 progs, image, buf); 3215 if (err) 3216 return err; 3217 3218 /* From Intel 64 and IA-32 Architectures Optimization 3219 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 3220 * Coding Rule 11: All branch targets should be 16-byte 3221 * aligned. 3222 */ 3223 emit_align(&prog, 16); 3224 jg_offset = prog - jg_reloc; 3225 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 3226 3227 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 3228 b, progs, image, buf); 3229 if (err) 3230 return err; 3231 3232 *pprog = prog; 3233 return 0; 3234 } 3235 3236 static int cmp_ips(const void *a, const void *b) 3237 { 3238 const s64 *ipa = a; 3239 const s64 *ipb = b; 3240 3241 if (*ipa > *ipb) 3242 return 1; 3243 if (*ipa < *ipb) 3244 return -1; 3245 return 0; 3246 } 3247 3248 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) 3249 { 3250 u8 *prog = buf; 3251 3252 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 3253 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); 3254 } 3255 3256 struct x64_jit_data { 3257 struct bpf_binary_header *rw_header; 3258 struct bpf_binary_header *header; 3259 int *addrs; 3260 u8 *image; 3261 int proglen; 3262 struct jit_context ctx; 3263 }; 3264 3265 #define MAX_PASSES 20 3266 #define PADDING_PASSES (MAX_PASSES - 5) 3267 3268 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 3269 { 3270 struct bpf_binary_header *rw_header = NULL; 3271 struct bpf_binary_header *header = NULL; 3272 struct bpf_prog *tmp, *orig_prog = prog; 3273 struct x64_jit_data *jit_data; 3274 int proglen, oldproglen = 0; 3275 struct jit_context ctx = {}; 3276 bool tmp_blinded = false; 3277 bool extra_pass = false; 3278 bool padding = false; 3279 u8 *rw_image = NULL; 3280 u8 *image = NULL; 3281 int *addrs; 3282 int pass; 3283 int i; 3284 3285 if (!prog->jit_requested) 3286 return orig_prog; 3287 3288 tmp = bpf_jit_blind_constants(prog); 3289 /* 3290 * If blinding was requested and we failed during blinding, 3291 * we must fall back to the interpreter. 3292 */ 3293 if (IS_ERR(tmp)) 3294 return orig_prog; 3295 if (tmp != prog) { 3296 tmp_blinded = true; 3297 prog = tmp; 3298 } 3299 3300 jit_data = prog->aux->jit_data; 3301 if (!jit_data) { 3302 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 3303 if (!jit_data) { 3304 prog = orig_prog; 3305 goto out; 3306 } 3307 prog->aux->jit_data = jit_data; 3308 } 3309 addrs = jit_data->addrs; 3310 if (addrs) { 3311 ctx = jit_data->ctx; 3312 oldproglen = jit_data->proglen; 3313 image = jit_data->image; 3314 header = jit_data->header; 3315 rw_header = jit_data->rw_header; 3316 rw_image = (void *)rw_header + ((void *)image - (void *)header); 3317 extra_pass = true; 3318 padding = true; 3319 goto skip_init_addrs; 3320 } 3321 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 3322 if (!addrs) { 3323 prog = orig_prog; 3324 goto out_addrs; 3325 } 3326 3327 /* 3328 * Before first pass, make a rough estimation of addrs[] 3329 * each BPF instruction is translated to less than 64 bytes 3330 */ 3331 for (proglen = 0, i = 0; i <= prog->len; i++) { 3332 proglen += 64; 3333 addrs[i] = proglen; 3334 } 3335 ctx.cleanup_addr = proglen; 3336 skip_init_addrs: 3337 3338 /* 3339 * JITed image shrinks with every pass and the loop iterates 3340 * until the image stops shrinking. Very large BPF programs 3341 * may converge on the last pass. In such case do one more 3342 * pass to emit the final image. 3343 */ 3344 for (pass = 0; pass < MAX_PASSES || image; pass++) { 3345 if (!padding && pass >= PADDING_PASSES) 3346 padding = true; 3347 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); 3348 if (proglen <= 0) { 3349 out_image: 3350 image = NULL; 3351 if (header) { 3352 bpf_arch_text_copy(&header->size, &rw_header->size, 3353 sizeof(rw_header->size)); 3354 bpf_jit_binary_pack_free(header, rw_header); 3355 } 3356 /* Fall back to interpreter mode */ 3357 prog = orig_prog; 3358 if (extra_pass) { 3359 prog->bpf_func = NULL; 3360 prog->jited = 0; 3361 prog->jited_len = 0; 3362 } 3363 goto out_addrs; 3364 } 3365 if (image) { 3366 if (proglen != oldproglen) { 3367 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 3368 proglen, oldproglen); 3369 goto out_image; 3370 } 3371 break; 3372 } 3373 if (proglen == oldproglen) { 3374 /* 3375 * The number of entries in extable is the number of BPF_LDX 3376 * insns that access kernel memory via "pointer to BTF type". 3377 * The verifier changed their opcode from LDX|MEM|size 3378 * to LDX|PROBE_MEM|size to make JITing easier. 3379 */ 3380 u32 align = __alignof__(struct exception_table_entry); 3381 u32 extable_size = prog->aux->num_exentries * 3382 sizeof(struct exception_table_entry); 3383 3384 /* allocate module memory for x86 insns and extable */ 3385 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, 3386 &image, align, &rw_header, &rw_image, 3387 jit_fill_hole); 3388 if (!header) { 3389 prog = orig_prog; 3390 goto out_addrs; 3391 } 3392 prog->aux->extable = (void *) image + roundup(proglen, align); 3393 } 3394 oldproglen = proglen; 3395 cond_resched(); 3396 } 3397 3398 if (bpf_jit_enable > 1) 3399 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image); 3400 3401 if (image) { 3402 if (!prog->is_func || extra_pass) { 3403 /* 3404 * bpf_jit_binary_pack_finalize fails in two scenarios: 3405 * 1) header is not pointing to proper module memory; 3406 * 2) the arch doesn't support bpf_arch_text_copy(). 3407 * 3408 * Both cases are serious bugs and justify WARN_ON. 3409 */ 3410 if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) { 3411 /* header has been freed */ 3412 header = NULL; 3413 goto out_image; 3414 } 3415 3416 bpf_tail_call_direct_fixup(prog); 3417 } else { 3418 jit_data->addrs = addrs; 3419 jit_data->ctx = ctx; 3420 jit_data->proglen = proglen; 3421 jit_data->image = image; 3422 jit_data->header = header; 3423 jit_data->rw_header = rw_header; 3424 } 3425 /* 3426 * ctx.prog_offset is used when CFI preambles put code *before* 3427 * the function. See emit_cfi(). For FineIBT specifically this code 3428 * can also be executed and bpf_prog_kallsyms_add() will 3429 * generate an additional symbol to cover this, hence also 3430 * decrement proglen. 3431 */ 3432 prog->bpf_func = (void *)image + cfi_get_offset(); 3433 prog->jited = 1; 3434 prog->jited_len = proglen - cfi_get_offset(); 3435 } else { 3436 prog = orig_prog; 3437 } 3438 3439 if (!image || !prog->is_func || extra_pass) { 3440 if (image) 3441 bpf_prog_fill_jited_linfo(prog, addrs + 1); 3442 out_addrs: 3443 kvfree(addrs); 3444 kfree(jit_data); 3445 prog->aux->jit_data = NULL; 3446 } 3447 out: 3448 if (tmp_blinded) 3449 bpf_jit_prog_release_other(prog, prog == orig_prog ? 3450 tmp : orig_prog); 3451 return prog; 3452 } 3453 3454 bool bpf_jit_supports_kfunc_call(void) 3455 { 3456 return true; 3457 } 3458 3459 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 3460 { 3461 if (text_poke_copy(dst, src, len) == NULL) 3462 return ERR_PTR(-EINVAL); 3463 return dst; 3464 } 3465 3466 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ 3467 bool bpf_jit_supports_subprog_tailcalls(void) 3468 { 3469 return true; 3470 } 3471 3472 bool bpf_jit_supports_percpu_insn(void) 3473 { 3474 return true; 3475 } 3476 3477 void bpf_jit_free(struct bpf_prog *prog) 3478 { 3479 if (prog->jited) { 3480 struct x64_jit_data *jit_data = prog->aux->jit_data; 3481 struct bpf_binary_header *hdr; 3482 3483 /* 3484 * If we fail the final pass of JIT (from jit_subprogs), 3485 * the program may not be finalized yet. Call finalize here 3486 * before freeing it. 3487 */ 3488 if (jit_data) { 3489 bpf_jit_binary_pack_finalize(jit_data->header, 3490 jit_data->rw_header); 3491 kvfree(jit_data->addrs); 3492 kfree(jit_data); 3493 } 3494 prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset(); 3495 hdr = bpf_jit_binary_pack_hdr(prog); 3496 bpf_jit_binary_pack_free(hdr, NULL); 3497 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); 3498 } 3499 3500 bpf_prog_unlock_free(prog); 3501 } 3502 3503 bool bpf_jit_supports_exceptions(void) 3504 { 3505 /* We unwind through both kernel frames (starting from within bpf_throw 3506 * call) and BPF frames. Therefore we require ORC unwinder to be enabled 3507 * to walk kernel frames and reach BPF frames in the stack trace. 3508 */ 3509 return IS_ENABLED(CONFIG_UNWINDER_ORC); 3510 } 3511 3512 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) 3513 { 3514 #if defined(CONFIG_UNWINDER_ORC) 3515 struct unwind_state state; 3516 unsigned long addr; 3517 3518 for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state); 3519 unwind_next_frame(&state)) { 3520 addr = unwind_get_return_address(&state); 3521 if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp)) 3522 break; 3523 } 3524 return; 3525 #endif 3526 WARN(1, "verification of programs using bpf_throw should have failed\n"); 3527 } 3528 3529 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, 3530 struct bpf_prog *new, struct bpf_prog *old) 3531 { 3532 u8 *old_addr, *new_addr, *old_bypass_addr; 3533 int ret; 3534 3535 old_bypass_addr = old ? NULL : poke->bypass_addr; 3536 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL; 3537 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL; 3538 3539 /* 3540 * On program loading or teardown, the program's kallsym entry 3541 * might not be in place, so we use __bpf_arch_text_poke to skip 3542 * the kallsyms check. 3543 */ 3544 if (new) { 3545 ret = __bpf_arch_text_poke(poke->tailcall_target, 3546 BPF_MOD_JUMP, 3547 old_addr, new_addr); 3548 BUG_ON(ret < 0); 3549 if (!old) { 3550 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 3551 BPF_MOD_JUMP, 3552 poke->bypass_addr, 3553 NULL); 3554 BUG_ON(ret < 0); 3555 } 3556 } else { 3557 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 3558 BPF_MOD_JUMP, 3559 old_bypass_addr, 3560 poke->bypass_addr); 3561 BUG_ON(ret < 0); 3562 /* let other CPUs finish the execution of program 3563 * so that it will not possible to expose them 3564 * to invalid nop, stack unwind, nop state 3565 */ 3566 if (!ret) 3567 synchronize_rcu(); 3568 ret = __bpf_arch_text_poke(poke->tailcall_target, 3569 BPF_MOD_JUMP, 3570 old_addr, NULL); 3571 BUG_ON(ret < 0); 3572 } 3573 } 3574 3575 bool bpf_jit_supports_arena(void) 3576 { 3577 return true; 3578 } 3579 3580 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) 3581 { 3582 if (!in_arena) 3583 return true; 3584 switch (insn->code) { 3585 case BPF_STX | BPF_ATOMIC | BPF_W: 3586 case BPF_STX | BPF_ATOMIC | BPF_DW: 3587 if (insn->imm == (BPF_AND | BPF_FETCH) || 3588 insn->imm == (BPF_OR | BPF_FETCH) || 3589 insn->imm == (BPF_XOR | BPF_FETCH)) 3590 return false; 3591 } 3592 return true; 3593 } 3594 3595 bool bpf_jit_supports_ptr_xchg(void) 3596 { 3597 return true; 3598 } 3599 3600 /* x86-64 JIT emits its own code to filter user addresses so return 0 here */ 3601 u64 bpf_arch_uaddress_limit(void) 3602 { 3603 return 0; 3604 } 3605