1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <asm/extable.h> 15 #include <asm/ftrace.h> 16 #include <asm/set_memory.h> 17 #include <asm/nospec-branch.h> 18 #include <asm/text-patching.h> 19 #include <asm/unwind.h> 20 #include <asm/cfi.h> 21 22 static bool all_callee_regs_used[4] = {true, true, true, true}; 23 24 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 25 { 26 if (len == 1) 27 *ptr = bytes; 28 else if (len == 2) 29 *(u16 *)ptr = bytes; 30 else { 31 *(u32 *)ptr = bytes; 32 barrier(); 33 } 34 return ptr + len; 35 } 36 37 #define EMIT(bytes, len) \ 38 do { prog = emit_code(prog, bytes, len); } while (0) 39 40 #define EMIT1(b1) EMIT(b1, 1) 41 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 42 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 43 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 44 45 #define EMIT1_off32(b1, off) \ 46 do { EMIT1(b1); EMIT(off, 4); } while (0) 47 #define EMIT2_off32(b1, b2, off) \ 48 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 49 #define EMIT3_off32(b1, b2, b3, off) \ 50 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 51 #define EMIT4_off32(b1, b2, b3, b4, off) \ 52 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 53 54 #ifdef CONFIG_X86_KERNEL_IBT 55 #define EMIT_ENDBR() EMIT(gen_endbr(), 4) 56 #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4) 57 #else 58 #define EMIT_ENDBR() 59 #define EMIT_ENDBR_POISON() 60 #endif 61 62 static bool is_imm8(int value) 63 { 64 return value <= 127 && value >= -128; 65 } 66 67 /* 68 * Let us limit the positive offset to be <= 123. 69 * This is to ensure eventual jit convergence For the following patterns: 70 * ... 71 * pass4, final_proglen=4391: 72 * ... 73 * 20e: 48 85 ff test rdi,rdi 74 * 211: 74 7d je 0x290 75 * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] 76 * ... 77 * 289: 48 85 ff test rdi,rdi 78 * 28c: 74 17 je 0x2a5 79 * 28e: e9 7f ff ff ff jmp 0x212 80 * 293: bf 03 00 00 00 mov edi,0x3 81 * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125) 82 * and insn at 0x28e is 5-byte jmp insn with offset -129. 83 * 84 * pass5, final_proglen=4392: 85 * ... 86 * 20e: 48 85 ff test rdi,rdi 87 * 211: 0f 84 80 00 00 00 je 0x297 88 * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] 89 * ... 90 * 28d: 48 85 ff test rdi,rdi 91 * 290: 74 1a je 0x2ac 92 * 292: eb 84 jmp 0x218 93 * 294: bf 03 00 00 00 mov edi,0x3 94 * Note that insn at 0x211 is 6-byte cond jump insn now since its offset 95 * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80). 96 * At the same time, insn at 0x292 is a 2-byte insn since its offset is 97 * -124. 98 * 99 * pass6 will repeat the same code as in pass4 and this will prevent 100 * eventual convergence. 101 * 102 * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes) 103 * cycle in the above. In the above example je offset <= 0x7c should work. 104 * 105 * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence 106 * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should 107 * avoid no convergence issue. 108 * 109 * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn 110 * to maximum 123 (0x7b). This way, the jit pass can eventually converge. 111 */ 112 static bool is_imm8_jmp_offset(int value) 113 { 114 return value <= 123 && value >= -128; 115 } 116 117 static bool is_simm32(s64 value) 118 { 119 return value == (s64)(s32)value; 120 } 121 122 static bool is_uimm32(u64 value) 123 { 124 return value == (u64)(u32)value; 125 } 126 127 /* mov dst, src */ 128 #define EMIT_mov(DST, SRC) \ 129 do { \ 130 if (DST != SRC) \ 131 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 132 } while (0) 133 134 static int bpf_size_to_x86_bytes(int bpf_size) 135 { 136 if (bpf_size == BPF_W) 137 return 4; 138 else if (bpf_size == BPF_H) 139 return 2; 140 else if (bpf_size == BPF_B) 141 return 1; 142 else if (bpf_size == BPF_DW) 143 return 4; /* imm32 */ 144 else 145 return 0; 146 } 147 148 /* 149 * List of x86 cond jumps opcodes (. + s8) 150 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 151 */ 152 #define X86_JB 0x72 153 #define X86_JAE 0x73 154 #define X86_JE 0x74 155 #define X86_JNE 0x75 156 #define X86_JBE 0x76 157 #define X86_JA 0x77 158 #define X86_JL 0x7C 159 #define X86_JGE 0x7D 160 #define X86_JLE 0x7E 161 #define X86_JG 0x7F 162 163 /* Pick a register outside of BPF range for JIT internal work */ 164 #define AUX_REG (MAX_BPF_JIT_REG + 1) 165 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 166 #define X86_REG_R12 (MAX_BPF_JIT_REG + 3) 167 168 /* 169 * The following table maps BPF registers to x86-64 registers. 170 * 171 * x86-64 register R12 is unused, since if used as base address 172 * register in load/store instructions, it always needs an 173 * extra byte of encoding and is callee saved. 174 * 175 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 176 * trampoline. x86-64 register R10 is used for blinding (if enabled). 177 */ 178 static const int reg2hex[] = { 179 [BPF_REG_0] = 0, /* RAX */ 180 [BPF_REG_1] = 7, /* RDI */ 181 [BPF_REG_2] = 6, /* RSI */ 182 [BPF_REG_3] = 2, /* RDX */ 183 [BPF_REG_4] = 1, /* RCX */ 184 [BPF_REG_5] = 0, /* R8 */ 185 [BPF_REG_6] = 3, /* RBX callee saved */ 186 [BPF_REG_7] = 5, /* R13 callee saved */ 187 [BPF_REG_8] = 6, /* R14 callee saved */ 188 [BPF_REG_9] = 7, /* R15 callee saved */ 189 [BPF_REG_FP] = 5, /* RBP readonly */ 190 [BPF_REG_AX] = 2, /* R10 temp register */ 191 [AUX_REG] = 3, /* R11 temp register */ 192 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 193 [X86_REG_R12] = 4, /* R12 callee saved */ 194 }; 195 196 static const int reg2pt_regs[] = { 197 [BPF_REG_0] = offsetof(struct pt_regs, ax), 198 [BPF_REG_1] = offsetof(struct pt_regs, di), 199 [BPF_REG_2] = offsetof(struct pt_regs, si), 200 [BPF_REG_3] = offsetof(struct pt_regs, dx), 201 [BPF_REG_4] = offsetof(struct pt_regs, cx), 202 [BPF_REG_5] = offsetof(struct pt_regs, r8), 203 [BPF_REG_6] = offsetof(struct pt_regs, bx), 204 [BPF_REG_7] = offsetof(struct pt_regs, r13), 205 [BPF_REG_8] = offsetof(struct pt_regs, r14), 206 [BPF_REG_9] = offsetof(struct pt_regs, r15), 207 }; 208 209 /* 210 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 211 * which need extra byte of encoding. 212 * rax,rcx,...,rbp have simpler encoding 213 */ 214 static bool is_ereg(u32 reg) 215 { 216 return (1 << reg) & (BIT(BPF_REG_5) | 217 BIT(AUX_REG) | 218 BIT(BPF_REG_7) | 219 BIT(BPF_REG_8) | 220 BIT(BPF_REG_9) | 221 BIT(X86_REG_R9) | 222 BIT(X86_REG_R12) | 223 BIT(BPF_REG_AX)); 224 } 225 226 /* 227 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 228 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 229 * of encoding. al,cl,dl,bl have simpler encoding. 230 */ 231 static bool is_ereg_8l(u32 reg) 232 { 233 return is_ereg(reg) || 234 (1 << reg) & (BIT(BPF_REG_1) | 235 BIT(BPF_REG_2) | 236 BIT(BPF_REG_FP)); 237 } 238 239 static bool is_axreg(u32 reg) 240 { 241 return reg == BPF_REG_0; 242 } 243 244 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 245 static u8 add_1mod(u8 byte, u32 reg) 246 { 247 if (is_ereg(reg)) 248 byte |= 1; 249 return byte; 250 } 251 252 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 253 { 254 if (is_ereg(r1)) 255 byte |= 1; 256 if (is_ereg(r2)) 257 byte |= 4; 258 return byte; 259 } 260 261 static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index) 262 { 263 if (is_ereg(r1)) 264 byte |= 1; 265 if (is_ereg(index)) 266 byte |= 2; 267 if (is_ereg(r2)) 268 byte |= 4; 269 return byte; 270 } 271 272 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 273 static u8 add_1reg(u8 byte, u32 dst_reg) 274 { 275 return byte + reg2hex[dst_reg]; 276 } 277 278 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 279 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 280 { 281 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 282 } 283 284 /* Some 1-byte opcodes for binary ALU operations */ 285 static u8 simple_alu_opcodes[] = { 286 [BPF_ADD] = 0x01, 287 [BPF_SUB] = 0x29, 288 [BPF_AND] = 0x21, 289 [BPF_OR] = 0x09, 290 [BPF_XOR] = 0x31, 291 [BPF_LSH] = 0xE0, 292 [BPF_RSH] = 0xE8, 293 [BPF_ARSH] = 0xF8, 294 }; 295 296 static void jit_fill_hole(void *area, unsigned int size) 297 { 298 /* Fill whole space with INT3 instructions */ 299 memset(area, 0xcc, size); 300 } 301 302 int bpf_arch_text_invalidate(void *dst, size_t len) 303 { 304 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); 305 } 306 307 struct jit_context { 308 int cleanup_addr; /* Epilogue code offset */ 309 310 /* 311 * Program specific offsets of labels in the code; these rely on the 312 * JIT doing at least 2 passes, recording the position on the first 313 * pass, only to generate the correct offset on the second pass. 314 */ 315 int tail_call_direct_label; 316 int tail_call_indirect_label; 317 }; 318 319 /* Maximum number of bytes emitted while JITing one eBPF insn */ 320 #define BPF_MAX_INSN_SIZE 128 321 #define BPF_INSN_SAFETY 64 322 323 /* Number of bytes emit_patch() needs to generate instructions */ 324 #define X86_PATCH_SIZE 5 325 /* Number of bytes that will be skipped on tailcall */ 326 #define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE) 327 328 static void push_r9(u8 **pprog) 329 { 330 u8 *prog = *pprog; 331 332 EMIT2(0x41, 0x51); /* push r9 */ 333 *pprog = prog; 334 } 335 336 static void pop_r9(u8 **pprog) 337 { 338 u8 *prog = *pprog; 339 340 EMIT2(0x41, 0x59); /* pop r9 */ 341 *pprog = prog; 342 } 343 344 static void push_r12(u8 **pprog) 345 { 346 u8 *prog = *pprog; 347 348 EMIT2(0x41, 0x54); /* push r12 */ 349 *pprog = prog; 350 } 351 352 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 353 { 354 u8 *prog = *pprog; 355 356 if (callee_regs_used[0]) 357 EMIT1(0x53); /* push rbx */ 358 if (callee_regs_used[1]) 359 EMIT2(0x41, 0x55); /* push r13 */ 360 if (callee_regs_used[2]) 361 EMIT2(0x41, 0x56); /* push r14 */ 362 if (callee_regs_used[3]) 363 EMIT2(0x41, 0x57); /* push r15 */ 364 *pprog = prog; 365 } 366 367 static void pop_r12(u8 **pprog) 368 { 369 u8 *prog = *pprog; 370 371 EMIT2(0x41, 0x5C); /* pop r12 */ 372 *pprog = prog; 373 } 374 375 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 376 { 377 u8 *prog = *pprog; 378 379 if (callee_regs_used[3]) 380 EMIT2(0x41, 0x5F); /* pop r15 */ 381 if (callee_regs_used[2]) 382 EMIT2(0x41, 0x5E); /* pop r14 */ 383 if (callee_regs_used[1]) 384 EMIT2(0x41, 0x5D); /* pop r13 */ 385 if (callee_regs_used[0]) 386 EMIT1(0x5B); /* pop rbx */ 387 *pprog = prog; 388 } 389 390 static void emit_nops(u8 **pprog, int len) 391 { 392 u8 *prog = *pprog; 393 int i, noplen; 394 395 while (len > 0) { 396 noplen = len; 397 398 if (noplen > ASM_NOP_MAX) 399 noplen = ASM_NOP_MAX; 400 401 for (i = 0; i < noplen; i++) 402 EMIT1(x86_nops[noplen][i]); 403 len -= noplen; 404 } 405 406 *pprog = prog; 407 } 408 409 /* 410 * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT 411 * in arch/x86/kernel/alternative.c 412 */ 413 414 static void emit_fineibt(u8 **pprog, u32 hash) 415 { 416 u8 *prog = *pprog; 417 418 EMIT_ENDBR(); 419 EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */ 420 EMIT2(0x74, 0x07); /* jz.d8 +7 */ 421 EMIT2(0x0f, 0x0b); /* ud2 */ 422 EMIT1(0x90); /* nop */ 423 EMIT_ENDBR_POISON(); 424 425 *pprog = prog; 426 } 427 428 static void emit_kcfi(u8 **pprog, u32 hash) 429 { 430 u8 *prog = *pprog; 431 432 EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ 433 #ifdef CONFIG_CALL_PADDING 434 EMIT1(0x90); 435 EMIT1(0x90); 436 EMIT1(0x90); 437 EMIT1(0x90); 438 EMIT1(0x90); 439 EMIT1(0x90); 440 EMIT1(0x90); 441 EMIT1(0x90); 442 EMIT1(0x90); 443 EMIT1(0x90); 444 EMIT1(0x90); 445 #endif 446 EMIT_ENDBR(); 447 448 *pprog = prog; 449 } 450 451 static void emit_cfi(u8 **pprog, u32 hash) 452 { 453 u8 *prog = *pprog; 454 455 switch (cfi_mode) { 456 case CFI_FINEIBT: 457 emit_fineibt(&prog, hash); 458 break; 459 460 case CFI_KCFI: 461 emit_kcfi(&prog, hash); 462 break; 463 464 default: 465 EMIT_ENDBR(); 466 break; 467 } 468 469 *pprog = prog; 470 } 471 472 static void emit_prologue_tail_call(u8 **pprog, bool is_subprog) 473 { 474 u8 *prog = *pprog; 475 476 if (!is_subprog) { 477 /* cmp rax, MAX_TAIL_CALL_CNT */ 478 EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT); 479 EMIT2(X86_JA, 6); /* ja 6 */ 480 /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT. 481 * case1: entry of main prog. 482 * case2: tail callee of main prog. 483 */ 484 EMIT1(0x50); /* push rax */ 485 /* Make rax as tail_call_cnt_ptr. */ 486 EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */ 487 EMIT2(0xEB, 1); /* jmp 1 */ 488 /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT. 489 * case: tail callee of subprog. 490 */ 491 EMIT1(0x50); /* push rax */ 492 /* push tail_call_cnt_ptr */ 493 EMIT1(0x50); /* push rax */ 494 } else { /* is_subprog */ 495 /* rax is tail_call_cnt_ptr. */ 496 EMIT1(0x50); /* push rax */ 497 EMIT1(0x50); /* push rax */ 498 } 499 500 *pprog = prog; 501 } 502 503 /* 504 * Emit x86-64 prologue code for BPF program. 505 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 506 * while jumping to another program 507 */ 508 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 509 bool tail_call_reachable, bool is_subprog, 510 bool is_exception_cb) 511 { 512 u8 *prog = *pprog; 513 514 emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash); 515 /* BPF trampoline can be made to work without these nops, 516 * but let's waste 5 bytes for now and optimize later 517 */ 518 emit_nops(&prog, X86_PATCH_SIZE); 519 if (!ebpf_from_cbpf) { 520 if (tail_call_reachable && !is_subprog) 521 /* When it's the entry of the whole tailcall context, 522 * zeroing rax means initialising tail_call_cnt. 523 */ 524 EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */ 525 else 526 /* Keep the same instruction layout. */ 527 emit_nops(&prog, 3); /* nop3 */ 528 } 529 /* Exception callback receives FP as third parameter */ 530 if (is_exception_cb) { 531 EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */ 532 EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */ 533 /* The main frame must have exception_boundary as true, so we 534 * first restore those callee-saved regs from stack, before 535 * reusing the stack frame. 536 */ 537 pop_callee_regs(&prog, all_callee_regs_used); 538 pop_r12(&prog); 539 /* Reset the stack frame. */ 540 EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */ 541 } else { 542 EMIT1(0x55); /* push rbp */ 543 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 544 } 545 546 /* X86_TAIL_CALL_OFFSET is here */ 547 EMIT_ENDBR(); 548 549 /* sub rsp, rounded_stack_depth */ 550 if (stack_depth) 551 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 552 if (tail_call_reachable) 553 emit_prologue_tail_call(&prog, is_subprog); 554 *pprog = prog; 555 } 556 557 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 558 { 559 u8 *prog = *pprog; 560 s64 offset; 561 562 offset = func - (ip + X86_PATCH_SIZE); 563 if (!is_simm32(offset)) { 564 pr_err("Target call %p is out of range\n", func); 565 return -ERANGE; 566 } 567 EMIT1_off32(opcode, offset); 568 *pprog = prog; 569 return 0; 570 } 571 572 static int emit_call(u8 **pprog, void *func, void *ip) 573 { 574 return emit_patch(pprog, func, ip, 0xE8); 575 } 576 577 static int emit_rsb_call(u8 **pprog, void *func, void *ip) 578 { 579 OPTIMIZER_HIDE_VAR(func); 580 ip += x86_call_depth_emit_accounting(pprog, func, ip); 581 return emit_patch(pprog, func, ip, 0xE8); 582 } 583 584 static int emit_jump(u8 **pprog, void *func, void *ip) 585 { 586 return emit_patch(pprog, func, ip, 0xE9); 587 } 588 589 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 590 void *old_addr, void *new_addr) 591 { 592 const u8 *nop_insn = x86_nops[5]; 593 u8 old_insn[X86_PATCH_SIZE]; 594 u8 new_insn[X86_PATCH_SIZE]; 595 u8 *prog; 596 int ret; 597 598 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 599 if (old_addr) { 600 prog = old_insn; 601 ret = t == BPF_MOD_CALL ? 602 emit_call(&prog, old_addr, ip) : 603 emit_jump(&prog, old_addr, ip); 604 if (ret) 605 return ret; 606 } 607 608 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 609 if (new_addr) { 610 prog = new_insn; 611 ret = t == BPF_MOD_CALL ? 612 emit_call(&prog, new_addr, ip) : 613 emit_jump(&prog, new_addr, ip); 614 if (ret) 615 return ret; 616 } 617 618 ret = -EBUSY; 619 mutex_lock(&text_mutex); 620 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 621 goto out; 622 ret = 1; 623 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 624 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 625 ret = 0; 626 } 627 out: 628 mutex_unlock(&text_mutex); 629 return ret; 630 } 631 632 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 633 void *old_addr, void *new_addr) 634 { 635 if (!is_kernel_text((long)ip) && 636 !is_bpf_text_address((long)ip)) 637 /* BPF poking in modules is not supported */ 638 return -EINVAL; 639 640 /* 641 * See emit_prologue(), for IBT builds the trampoline hook is preceded 642 * with an ENDBR instruction. 643 */ 644 if (is_endbr(*(u32 *)ip)) 645 ip += ENDBR_INSN_SIZE; 646 647 return __bpf_arch_text_poke(ip, t, old_addr, new_addr); 648 } 649 650 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 651 652 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 653 { 654 u8 *prog = *pprog; 655 656 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 657 EMIT_LFENCE(); 658 EMIT2(0xFF, 0xE0 + reg); 659 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 660 OPTIMIZER_HIDE_VAR(reg); 661 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) 662 emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip); 663 else 664 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 665 } else { 666 EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ 667 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS)) 668 EMIT1(0xCC); /* int3 */ 669 } 670 671 *pprog = prog; 672 } 673 674 static void emit_return(u8 **pprog, u8 *ip) 675 { 676 u8 *prog = *pprog; 677 678 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { 679 emit_jump(&prog, x86_return_thunk, ip); 680 } else { 681 EMIT1(0xC3); /* ret */ 682 if (IS_ENABLED(CONFIG_MITIGATION_SLS)) 683 EMIT1(0xCC); /* int3 */ 684 } 685 686 *pprog = prog; 687 } 688 689 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8)) 690 691 /* 692 * Generate the following code: 693 * 694 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 695 * if (index >= array->map.max_entries) 696 * goto out; 697 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) 698 * goto out; 699 * prog = array->ptrs[index]; 700 * if (prog == NULL) 701 * goto out; 702 * goto *(prog->bpf_func + prologue_size); 703 * out: 704 */ 705 static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, 706 u8 **pprog, bool *callee_regs_used, 707 u32 stack_depth, u8 *ip, 708 struct jit_context *ctx) 709 { 710 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); 711 u8 *prog = *pprog, *start = *pprog; 712 int offset; 713 714 /* 715 * rdi - pointer to ctx 716 * rsi - pointer to bpf_array 717 * rdx - index in bpf_array 718 */ 719 720 /* 721 * if (index >= array->map.max_entries) 722 * goto out; 723 */ 724 EMIT2(0x89, 0xD2); /* mov edx, edx */ 725 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 726 offsetof(struct bpf_array, map.max_entries)); 727 728 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 729 EMIT2(X86_JBE, offset); /* jbe out */ 730 731 /* 732 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) 733 * goto out; 734 */ 735 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ 736 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ 737 738 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 739 EMIT2(X86_JAE, offset); /* jae out */ 740 741 /* prog = array->ptrs[index]; */ 742 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 743 offsetof(struct bpf_array, ptrs)); 744 745 /* 746 * if (prog == NULL) 747 * goto out; 748 */ 749 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 750 751 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 752 EMIT2(X86_JE, offset); /* je out */ 753 754 /* Inc tail_call_cnt if the slot is populated. */ 755 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ 756 757 if (bpf_prog->aux->exception_boundary) { 758 pop_callee_regs(&prog, all_callee_regs_used); 759 pop_r12(&prog); 760 } else { 761 pop_callee_regs(&prog, callee_regs_used); 762 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) 763 pop_r12(&prog); 764 } 765 766 /* Pop tail_call_cnt_ptr. */ 767 EMIT1(0x58); /* pop rax */ 768 /* Pop tail_call_cnt, if it's main prog. 769 * Pop tail_call_cnt_ptr, if it's subprog. 770 */ 771 EMIT1(0x58); /* pop rax */ 772 if (stack_depth) 773 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 774 round_up(stack_depth, 8)); 775 776 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 777 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 778 offsetof(struct bpf_prog, bpf_func)); 779 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 780 X86_TAIL_CALL_OFFSET); 781 /* 782 * Now we're ready to jump into next BPF program 783 * rdi == ctx (1st arg) 784 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 785 */ 786 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 787 788 /* out: */ 789 ctx->tail_call_indirect_label = prog - start; 790 *pprog = prog; 791 } 792 793 static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, 794 struct bpf_jit_poke_descriptor *poke, 795 u8 **pprog, u8 *ip, 796 bool *callee_regs_used, u32 stack_depth, 797 struct jit_context *ctx) 798 { 799 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); 800 u8 *prog = *pprog, *start = *pprog; 801 int offset; 802 803 /* 804 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) 805 * goto out; 806 */ 807 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ 808 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ 809 810 offset = ctx->tail_call_direct_label - (prog + 2 - start); 811 EMIT2(X86_JAE, offset); /* jae out */ 812 813 poke->tailcall_bypass = ip + (prog - start); 814 poke->adj_off = X86_TAIL_CALL_OFFSET; 815 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 816 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 817 818 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 819 poke->tailcall_bypass); 820 821 /* Inc tail_call_cnt if the slot is populated. */ 822 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ 823 824 if (bpf_prog->aux->exception_boundary) { 825 pop_callee_regs(&prog, all_callee_regs_used); 826 pop_r12(&prog); 827 } else { 828 pop_callee_regs(&prog, callee_regs_used); 829 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) 830 pop_r12(&prog); 831 } 832 833 /* Pop tail_call_cnt_ptr. */ 834 EMIT1(0x58); /* pop rax */ 835 /* Pop tail_call_cnt, if it's main prog. 836 * Pop tail_call_cnt_ptr, if it's subprog. 837 */ 838 EMIT1(0x58); /* pop rax */ 839 if (stack_depth) 840 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 841 842 emit_nops(&prog, X86_PATCH_SIZE); 843 844 /* out: */ 845 ctx->tail_call_direct_label = prog - start; 846 847 *pprog = prog; 848 } 849 850 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 851 { 852 struct bpf_jit_poke_descriptor *poke; 853 struct bpf_array *array; 854 struct bpf_prog *target; 855 int i, ret; 856 857 for (i = 0; i < prog->aux->size_poke_tab; i++) { 858 poke = &prog->aux->poke_tab[i]; 859 if (poke->aux && poke->aux != prog->aux) 860 continue; 861 862 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 863 864 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 865 continue; 866 867 array = container_of(poke->tail_call.map, struct bpf_array, map); 868 mutex_lock(&array->aux->poke_mutex); 869 target = array->ptrs[poke->tail_call.key]; 870 if (target) { 871 ret = __bpf_arch_text_poke(poke->tailcall_target, 872 BPF_MOD_JUMP, NULL, 873 (u8 *)target->bpf_func + 874 poke->adj_off); 875 BUG_ON(ret < 0); 876 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 877 BPF_MOD_JUMP, 878 (u8 *)poke->tailcall_target + 879 X86_PATCH_SIZE, NULL); 880 BUG_ON(ret < 0); 881 } 882 WRITE_ONCE(poke->tailcall_target_stable, true); 883 mutex_unlock(&array->aux->poke_mutex); 884 } 885 } 886 887 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 888 u32 dst_reg, const u32 imm32) 889 { 890 u8 *prog = *pprog; 891 u8 b1, b2, b3; 892 893 /* 894 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 895 * (which zero-extends imm32) to save 2 bytes. 896 */ 897 if (sign_propagate && (s32)imm32 < 0) { 898 /* 'mov %rax, imm32' sign extends imm32 */ 899 b1 = add_1mod(0x48, dst_reg); 900 b2 = 0xC7; 901 b3 = 0xC0; 902 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 903 goto done; 904 } 905 906 /* 907 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 908 * to save 3 bytes. 909 */ 910 if (imm32 == 0) { 911 if (is_ereg(dst_reg)) 912 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 913 b2 = 0x31; /* xor */ 914 b3 = 0xC0; 915 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 916 goto done; 917 } 918 919 /* mov %eax, imm32 */ 920 if (is_ereg(dst_reg)) 921 EMIT1(add_1mod(0x40, dst_reg)); 922 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 923 done: 924 *pprog = prog; 925 } 926 927 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 928 const u32 imm32_hi, const u32 imm32_lo) 929 { 930 u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo; 931 u8 *prog = *pprog; 932 933 if (is_uimm32(imm64)) { 934 /* 935 * For emitting plain u32, where sign bit must not be 936 * propagated LLVM tends to load imm64 over mov32 937 * directly, so save couple of bytes by just doing 938 * 'mov %eax, imm32' instead. 939 */ 940 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 941 } else if (is_simm32(imm64)) { 942 emit_mov_imm32(&prog, true, dst_reg, imm32_lo); 943 } else { 944 /* movabsq rax, imm64 */ 945 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 946 EMIT(imm32_lo, 4); 947 EMIT(imm32_hi, 4); 948 } 949 950 *pprog = prog; 951 } 952 953 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 954 { 955 u8 *prog = *pprog; 956 957 if (is64) { 958 /* mov dst, src */ 959 EMIT_mov(dst_reg, src_reg); 960 } else { 961 /* mov32 dst, src */ 962 if (is_ereg(dst_reg) || is_ereg(src_reg)) 963 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 964 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 965 } 966 967 *pprog = prog; 968 } 969 970 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg, 971 u32 src_reg) 972 { 973 u8 *prog = *pprog; 974 975 if (is64) { 976 /* movs[b,w,l]q dst, src */ 977 if (num_bits == 8) 978 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe, 979 add_2reg(0xC0, src_reg, dst_reg)); 980 else if (num_bits == 16) 981 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf, 982 add_2reg(0xC0, src_reg, dst_reg)); 983 else if (num_bits == 32) 984 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63, 985 add_2reg(0xC0, src_reg, dst_reg)); 986 } else { 987 /* movs[b,w]l dst, src */ 988 if (num_bits == 8) { 989 EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe, 990 add_2reg(0xC0, src_reg, dst_reg)); 991 } else if (num_bits == 16) { 992 if (is_ereg(dst_reg) || is_ereg(src_reg)) 993 EMIT1(add_2mod(0x40, src_reg, dst_reg)); 994 EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf, 995 add_2reg(0xC0, src_reg, dst_reg)); 996 } 997 } 998 999 *pprog = prog; 1000 } 1001 1002 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 1003 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 1004 { 1005 u8 *prog = *pprog; 1006 1007 if (is_imm8(off)) { 1008 /* 1-byte signed displacement. 1009 * 1010 * If off == 0 we could skip this and save one extra byte, but 1011 * special case of x86 R13 which always needs an offset is not 1012 * worth the hassle 1013 */ 1014 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 1015 } else { 1016 /* 4-byte signed displacement */ 1017 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 1018 } 1019 *pprog = prog; 1020 } 1021 1022 static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off) 1023 { 1024 u8 *prog = *pprog; 1025 1026 if (is_imm8(off)) { 1027 EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off); 1028 } else { 1029 EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off); 1030 } 1031 *pprog = prog; 1032 } 1033 1034 /* 1035 * Emit a REX byte if it will be necessary to address these registers 1036 */ 1037 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 1038 { 1039 u8 *prog = *pprog; 1040 1041 if (is64) 1042 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 1043 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 1044 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 1045 *pprog = prog; 1046 } 1047 1048 /* 1049 * Similar version of maybe_emit_mod() for a single register 1050 */ 1051 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 1052 { 1053 u8 *prog = *pprog; 1054 1055 if (is64) 1056 EMIT1(add_1mod(0x48, reg)); 1057 else if (is_ereg(reg)) 1058 EMIT1(add_1mod(0x40, reg)); 1059 *pprog = prog; 1060 } 1061 1062 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1063 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 1064 { 1065 u8 *prog = *pprog; 1066 1067 switch (size) { 1068 case BPF_B: 1069 /* Emit 'movzx rax, byte ptr [rax + off]' */ 1070 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 1071 break; 1072 case BPF_H: 1073 /* Emit 'movzx rax, word ptr [rax + off]' */ 1074 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 1075 break; 1076 case BPF_W: 1077 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 1078 if (is_ereg(dst_reg) || is_ereg(src_reg)) 1079 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 1080 else 1081 EMIT1(0x8B); 1082 break; 1083 case BPF_DW: 1084 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 1085 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 1086 break; 1087 } 1088 emit_insn_suffix(&prog, src_reg, dst_reg, off); 1089 *pprog = prog; 1090 } 1091 1092 /* LDSX: dst_reg = *(s8*)(src_reg + off) */ 1093 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 1094 { 1095 u8 *prog = *pprog; 1096 1097 switch (size) { 1098 case BPF_B: 1099 /* Emit 'movsx rax, byte ptr [rax + off]' */ 1100 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE); 1101 break; 1102 case BPF_H: 1103 /* Emit 'movsx rax, word ptr [rax + off]' */ 1104 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF); 1105 break; 1106 case BPF_W: 1107 /* Emit 'movsx rax, dword ptr [rax+0x14]' */ 1108 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63); 1109 break; 1110 } 1111 emit_insn_suffix(&prog, src_reg, dst_reg, off); 1112 *pprog = prog; 1113 } 1114 1115 static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) 1116 { 1117 u8 *prog = *pprog; 1118 1119 switch (size) { 1120 case BPF_B: 1121 /* movzx rax, byte ptr [rax + r12 + off] */ 1122 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6); 1123 break; 1124 case BPF_H: 1125 /* movzx rax, word ptr [rax + r12 + off] */ 1126 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7); 1127 break; 1128 case BPF_W: 1129 /* mov eax, dword ptr [rax + r12 + off] */ 1130 EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B); 1131 break; 1132 case BPF_DW: 1133 /* mov rax, qword ptr [rax + r12 + off] */ 1134 EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B); 1135 break; 1136 } 1137 emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off); 1138 *pprog = prog; 1139 } 1140 1141 static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 1142 { 1143 emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off); 1144 } 1145 1146 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1147 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 1148 { 1149 u8 *prog = *pprog; 1150 1151 switch (size) { 1152 case BPF_B: 1153 /* Emit 'mov byte ptr [rax + off], al' */ 1154 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 1155 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 1156 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 1157 else 1158 EMIT1(0x88); 1159 break; 1160 case BPF_H: 1161 if (is_ereg(dst_reg) || is_ereg(src_reg)) 1162 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 1163 else 1164 EMIT2(0x66, 0x89); 1165 break; 1166 case BPF_W: 1167 if (is_ereg(dst_reg) || is_ereg(src_reg)) 1168 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 1169 else 1170 EMIT1(0x89); 1171 break; 1172 case BPF_DW: 1173 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 1174 break; 1175 } 1176 emit_insn_suffix(&prog, dst_reg, src_reg, off); 1177 *pprog = prog; 1178 } 1179 1180 /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */ 1181 static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) 1182 { 1183 u8 *prog = *pprog; 1184 1185 switch (size) { 1186 case BPF_B: 1187 /* mov byte ptr [rax + r12 + off], al */ 1188 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88); 1189 break; 1190 case BPF_H: 1191 /* mov word ptr [rax + r12 + off], ax */ 1192 EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89); 1193 break; 1194 case BPF_W: 1195 /* mov dword ptr [rax + r12 + 1], eax */ 1196 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89); 1197 break; 1198 case BPF_DW: 1199 /* mov qword ptr [rax + r12 + 1], rax */ 1200 EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89); 1201 break; 1202 } 1203 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); 1204 *pprog = prog; 1205 } 1206 1207 static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 1208 { 1209 emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off); 1210 } 1211 1212 /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */ 1213 static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm) 1214 { 1215 u8 *prog = *pprog; 1216 1217 switch (size) { 1218 case BPF_B: 1219 /* mov byte ptr [rax + r12 + off], imm8 */ 1220 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6); 1221 break; 1222 case BPF_H: 1223 /* mov word ptr [rax + r12 + off], imm16 */ 1224 EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7); 1225 break; 1226 case BPF_W: 1227 /* mov dword ptr [rax + r12 + 1], imm32 */ 1228 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7); 1229 break; 1230 case BPF_DW: 1231 /* mov qword ptr [rax + r12 + 1], imm32 */ 1232 EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7); 1233 break; 1234 } 1235 emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off); 1236 EMIT(imm, bpf_size_to_x86_bytes(size)); 1237 *pprog = prog; 1238 } 1239 1240 static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm) 1241 { 1242 emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm); 1243 } 1244 1245 static int emit_atomic(u8 **pprog, u8 atomic_op, 1246 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 1247 { 1248 u8 *prog = *pprog; 1249 1250 EMIT1(0xF0); /* lock prefix */ 1251 1252 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 1253 1254 /* emit opcode */ 1255 switch (atomic_op) { 1256 case BPF_ADD: 1257 case BPF_AND: 1258 case BPF_OR: 1259 case BPF_XOR: 1260 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 1261 EMIT1(simple_alu_opcodes[atomic_op]); 1262 break; 1263 case BPF_ADD | BPF_FETCH: 1264 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 1265 EMIT2(0x0F, 0xC1); 1266 break; 1267 case BPF_XCHG: 1268 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 1269 EMIT1(0x87); 1270 break; 1271 case BPF_CMPXCHG: 1272 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 1273 EMIT2(0x0F, 0xB1); 1274 break; 1275 default: 1276 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 1277 return -EFAULT; 1278 } 1279 1280 emit_insn_suffix(&prog, dst_reg, src_reg, off); 1281 1282 *pprog = prog; 1283 return 0; 1284 } 1285 1286 static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size, 1287 u32 dst_reg, u32 src_reg, u32 index_reg, int off) 1288 { 1289 u8 *prog = *pprog; 1290 1291 EMIT1(0xF0); /* lock prefix */ 1292 switch (size) { 1293 case BPF_W: 1294 EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg)); 1295 break; 1296 case BPF_DW: 1297 EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg)); 1298 break; 1299 default: 1300 pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n"); 1301 return -EFAULT; 1302 } 1303 1304 /* emit opcode */ 1305 switch (atomic_op) { 1306 case BPF_ADD: 1307 case BPF_AND: 1308 case BPF_OR: 1309 case BPF_XOR: 1310 /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */ 1311 EMIT1(simple_alu_opcodes[atomic_op]); 1312 break; 1313 case BPF_ADD | BPF_FETCH: 1314 /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */ 1315 EMIT2(0x0F, 0xC1); 1316 break; 1317 case BPF_XCHG: 1318 /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */ 1319 EMIT1(0x87); 1320 break; 1321 case BPF_CMPXCHG: 1322 /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */ 1323 EMIT2(0x0F, 0xB1); 1324 break; 1325 default: 1326 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 1327 return -EFAULT; 1328 } 1329 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); 1330 *pprog = prog; 1331 return 0; 1332 } 1333 1334 #define DONT_CLEAR 1 1335 1336 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 1337 { 1338 u32 reg = x->fixup >> 8; 1339 1340 /* jump over faulting load and clear dest register */ 1341 if (reg != DONT_CLEAR) 1342 *(unsigned long *)((void *)regs + reg) = 0; 1343 regs->ip += x->fixup & 0xff; 1344 return true; 1345 } 1346 1347 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 1348 bool *regs_used) 1349 { 1350 int i; 1351 1352 for (i = 1; i <= insn_cnt; i++, insn++) { 1353 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 1354 regs_used[0] = true; 1355 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 1356 regs_used[1] = true; 1357 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 1358 regs_used[2] = true; 1359 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 1360 regs_used[3] = true; 1361 } 1362 } 1363 1364 /* emit the 3-byte VEX prefix 1365 * 1366 * r: same as rex.r, extra bit for ModRM reg field 1367 * x: same as rex.x, extra bit for SIB index field 1368 * b: same as rex.b, extra bit for ModRM r/m, or SIB base 1369 * m: opcode map select, encoding escape bytes e.g. 0x0f38 1370 * w: same as rex.w (32 bit or 64 bit) or opcode specific 1371 * src_reg2: additional source reg (encoded as BPF reg) 1372 * l: vector length (128 bit or 256 bit) or reserved 1373 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3) 1374 */ 1375 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m, 1376 bool w, u8 src_reg2, bool l, u8 pp) 1377 { 1378 u8 *prog = *pprog; 1379 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */ 1380 u8 b1, b2; 1381 u8 vvvv = reg2hex[src_reg2]; 1382 1383 /* reg2hex gives only the lower 3 bit of vvvv */ 1384 if (is_ereg(src_reg2)) 1385 vvvv |= 1 << 3; 1386 1387 /* 1388 * 2nd byte of 3-byte VEX prefix 1389 * ~ means bit inverted encoding 1390 * 1391 * 7 0 1392 * +---+---+---+---+---+---+---+---+ 1393 * |~R |~X |~B | m | 1394 * +---+---+---+---+---+---+---+---+ 1395 */ 1396 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f); 1397 /* 1398 * 3rd byte of 3-byte VEX prefix 1399 * 1400 * 7 0 1401 * +---+---+---+---+---+---+---+---+ 1402 * | W | ~vvvv | L | pp | 1403 * +---+---+---+---+---+---+---+---+ 1404 */ 1405 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3); 1406 1407 EMIT3(b0, b1, b2); 1408 *pprog = prog; 1409 } 1410 1411 /* emit BMI2 shift instruction */ 1412 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) 1413 { 1414 u8 *prog = *pprog; 1415 bool r = is_ereg(dst_reg); 1416 u8 m = 2; /* escape code 0f38 */ 1417 1418 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op); 1419 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg)); 1420 *pprog = prog; 1421 } 1422 1423 static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr) 1424 { 1425 u8 *prog = *pprog; 1426 1427 /* movabs r9, priv_frame_ptr */ 1428 emit_mov_imm64(&prog, X86_REG_R9, (__force long) priv_frame_ptr >> 32, 1429 (u32) (__force long) priv_frame_ptr); 1430 1431 #ifdef CONFIG_SMP 1432 /* add <r9>, gs:[<off>] */ 1433 EMIT2(0x65, 0x4c); 1434 EMIT3(0x03, 0x0c, 0x25); 1435 EMIT((u32)(unsigned long)&this_cpu_off, 4); 1436 #endif 1437 1438 *pprog = prog; 1439 } 1440 1441 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 1442 1443 #define __LOAD_TCC_PTR(off) \ 1444 EMIT3_off32(0x48, 0x8B, 0x85, off) 1445 /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */ 1446 #define LOAD_TAIL_CALL_CNT_PTR(stack) \ 1447 __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack)) 1448 1449 /* Memory size/value to protect private stack overflow/underflow */ 1450 #define PRIV_STACK_GUARD_SZ 8 1451 #define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL 1452 1453 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, 1454 int oldproglen, struct jit_context *ctx, bool jmp_padding) 1455 { 1456 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 1457 struct bpf_insn *insn = bpf_prog->insnsi; 1458 bool callee_regs_used[4] = {}; 1459 int insn_cnt = bpf_prog->len; 1460 bool seen_exit = false; 1461 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 1462 void __percpu *priv_frame_ptr = NULL; 1463 u64 arena_vm_start, user_vm_start; 1464 void __percpu *priv_stack_ptr; 1465 int i, excnt = 0; 1466 int ilen, proglen = 0; 1467 u8 *prog = temp; 1468 u32 stack_depth; 1469 int err; 1470 1471 stack_depth = bpf_prog->aux->stack_depth; 1472 priv_stack_ptr = bpf_prog->aux->priv_stack_ptr; 1473 if (priv_stack_ptr) { 1474 priv_frame_ptr = priv_stack_ptr + PRIV_STACK_GUARD_SZ + round_up(stack_depth, 8); 1475 stack_depth = 0; 1476 } 1477 1478 arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena); 1479 user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena); 1480 1481 detect_reg_usage(insn, insn_cnt, callee_regs_used); 1482 1483 emit_prologue(&prog, stack_depth, 1484 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 1485 bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb); 1486 /* Exception callback will clobber callee regs for its own use, and 1487 * restore the original callee regs from main prog's stack frame. 1488 */ 1489 if (bpf_prog->aux->exception_boundary) { 1490 /* We also need to save r12, which is not mapped to any BPF 1491 * register, as we throw after entry into the kernel, which may 1492 * overwrite r12. 1493 */ 1494 push_r12(&prog); 1495 push_callee_regs(&prog, all_callee_regs_used); 1496 } else { 1497 if (arena_vm_start) 1498 push_r12(&prog); 1499 push_callee_regs(&prog, callee_regs_used); 1500 } 1501 if (arena_vm_start) 1502 emit_mov_imm64(&prog, X86_REG_R12, 1503 arena_vm_start >> 32, (u32) arena_vm_start); 1504 1505 if (priv_frame_ptr) 1506 emit_priv_frame_ptr(&prog, priv_frame_ptr); 1507 1508 ilen = prog - temp; 1509 if (rw_image) 1510 memcpy(rw_image + proglen, temp, ilen); 1511 proglen += ilen; 1512 addrs[0] = proglen; 1513 prog = temp; 1514 1515 for (i = 1; i <= insn_cnt; i++, insn++) { 1516 const s32 imm32 = insn->imm; 1517 u32 dst_reg = insn->dst_reg; 1518 u32 src_reg = insn->src_reg; 1519 u8 b2 = 0, b3 = 0; 1520 u8 *start_of_ldx; 1521 s64 jmp_offset; 1522 s16 insn_off; 1523 u8 jmp_cond; 1524 u8 *func; 1525 int nops; 1526 1527 if (priv_frame_ptr) { 1528 if (src_reg == BPF_REG_FP) 1529 src_reg = X86_REG_R9; 1530 1531 if (dst_reg == BPF_REG_FP) 1532 dst_reg = X86_REG_R9; 1533 } 1534 1535 switch (insn->code) { 1536 /* ALU */ 1537 case BPF_ALU | BPF_ADD | BPF_X: 1538 case BPF_ALU | BPF_SUB | BPF_X: 1539 case BPF_ALU | BPF_AND | BPF_X: 1540 case BPF_ALU | BPF_OR | BPF_X: 1541 case BPF_ALU | BPF_XOR | BPF_X: 1542 case BPF_ALU64 | BPF_ADD | BPF_X: 1543 case BPF_ALU64 | BPF_SUB | BPF_X: 1544 case BPF_ALU64 | BPF_AND | BPF_X: 1545 case BPF_ALU64 | BPF_OR | BPF_X: 1546 case BPF_ALU64 | BPF_XOR | BPF_X: 1547 maybe_emit_mod(&prog, dst_reg, src_reg, 1548 BPF_CLASS(insn->code) == BPF_ALU64); 1549 b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 1550 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 1551 break; 1552 1553 case BPF_ALU64 | BPF_MOV | BPF_X: 1554 if (insn_is_cast_user(insn)) { 1555 if (dst_reg != src_reg) 1556 /* 32-bit mov */ 1557 emit_mov_reg(&prog, false, dst_reg, src_reg); 1558 /* shl dst_reg, 32 */ 1559 maybe_emit_1mod(&prog, dst_reg, true); 1560 EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32); 1561 1562 /* or dst_reg, user_vm_start */ 1563 maybe_emit_1mod(&prog, dst_reg, true); 1564 if (is_axreg(dst_reg)) 1565 EMIT1_off32(0x0D, user_vm_start >> 32); 1566 else 1567 EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32); 1568 1569 /* rol dst_reg, 32 */ 1570 maybe_emit_1mod(&prog, dst_reg, true); 1571 EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32); 1572 1573 /* xor r11, r11 */ 1574 EMIT3(0x4D, 0x31, 0xDB); 1575 1576 /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */ 1577 maybe_emit_mod(&prog, dst_reg, dst_reg, false); 1578 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1579 1580 /* cmove r11, dst_reg; if so, set dst_reg to zero */ 1581 /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */ 1582 maybe_emit_mod(&prog, AUX_REG, dst_reg, true); 1583 EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg)); 1584 break; 1585 } else if (insn_is_mov_percpu_addr(insn)) { 1586 /* mov <dst>, <src> (if necessary) */ 1587 EMIT_mov(dst_reg, src_reg); 1588 #ifdef CONFIG_SMP 1589 /* add <dst>, gs:[<off>] */ 1590 EMIT2(0x65, add_1mod(0x48, dst_reg)); 1591 EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25); 1592 EMIT((u32)(unsigned long)&this_cpu_off, 4); 1593 #endif 1594 break; 1595 } 1596 fallthrough; 1597 case BPF_ALU | BPF_MOV | BPF_X: 1598 if (insn->off == 0) 1599 emit_mov_reg(&prog, 1600 BPF_CLASS(insn->code) == BPF_ALU64, 1601 dst_reg, src_reg); 1602 else 1603 emit_movsx_reg(&prog, insn->off, 1604 BPF_CLASS(insn->code) == BPF_ALU64, 1605 dst_reg, src_reg); 1606 break; 1607 1608 /* neg dst */ 1609 case BPF_ALU | BPF_NEG: 1610 case BPF_ALU64 | BPF_NEG: 1611 maybe_emit_1mod(&prog, dst_reg, 1612 BPF_CLASS(insn->code) == BPF_ALU64); 1613 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 1614 break; 1615 1616 case BPF_ALU | BPF_ADD | BPF_K: 1617 case BPF_ALU | BPF_SUB | BPF_K: 1618 case BPF_ALU | BPF_AND | BPF_K: 1619 case BPF_ALU | BPF_OR | BPF_K: 1620 case BPF_ALU | BPF_XOR | BPF_K: 1621 case BPF_ALU64 | BPF_ADD | BPF_K: 1622 case BPF_ALU64 | BPF_SUB | BPF_K: 1623 case BPF_ALU64 | BPF_AND | BPF_K: 1624 case BPF_ALU64 | BPF_OR | BPF_K: 1625 case BPF_ALU64 | BPF_XOR | BPF_K: 1626 maybe_emit_1mod(&prog, dst_reg, 1627 BPF_CLASS(insn->code) == BPF_ALU64); 1628 1629 /* 1630 * b3 holds 'normal' opcode, b2 short form only valid 1631 * in case dst is eax/rax. 1632 */ 1633 switch (BPF_OP(insn->code)) { 1634 case BPF_ADD: 1635 b3 = 0xC0; 1636 b2 = 0x05; 1637 break; 1638 case BPF_SUB: 1639 b3 = 0xE8; 1640 b2 = 0x2D; 1641 break; 1642 case BPF_AND: 1643 b3 = 0xE0; 1644 b2 = 0x25; 1645 break; 1646 case BPF_OR: 1647 b3 = 0xC8; 1648 b2 = 0x0D; 1649 break; 1650 case BPF_XOR: 1651 b3 = 0xF0; 1652 b2 = 0x35; 1653 break; 1654 } 1655 1656 if (is_imm8(imm32)) 1657 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 1658 else if (is_axreg(dst_reg)) 1659 EMIT1_off32(b2, imm32); 1660 else 1661 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 1662 break; 1663 1664 case BPF_ALU64 | BPF_MOV | BPF_K: 1665 case BPF_ALU | BPF_MOV | BPF_K: 1666 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 1667 dst_reg, imm32); 1668 break; 1669 1670 case BPF_LD | BPF_IMM | BPF_DW: 1671 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 1672 insn++; 1673 i++; 1674 break; 1675 1676 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 1677 case BPF_ALU | BPF_MOD | BPF_X: 1678 case BPF_ALU | BPF_DIV | BPF_X: 1679 case BPF_ALU | BPF_MOD | BPF_K: 1680 case BPF_ALU | BPF_DIV | BPF_K: 1681 case BPF_ALU64 | BPF_MOD | BPF_X: 1682 case BPF_ALU64 | BPF_DIV | BPF_X: 1683 case BPF_ALU64 | BPF_MOD | BPF_K: 1684 case BPF_ALU64 | BPF_DIV | BPF_K: { 1685 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1686 1687 if (dst_reg != BPF_REG_0) 1688 EMIT1(0x50); /* push rax */ 1689 if (dst_reg != BPF_REG_3) 1690 EMIT1(0x52); /* push rdx */ 1691 1692 if (BPF_SRC(insn->code) == BPF_X) { 1693 if (src_reg == BPF_REG_0 || 1694 src_reg == BPF_REG_3) { 1695 /* mov r11, src_reg */ 1696 EMIT_mov(AUX_REG, src_reg); 1697 src_reg = AUX_REG; 1698 } 1699 } else { 1700 /* mov r11, imm32 */ 1701 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 1702 src_reg = AUX_REG; 1703 } 1704 1705 if (dst_reg != BPF_REG_0) 1706 /* mov rax, dst_reg */ 1707 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 1708 1709 if (insn->off == 0) { 1710 /* 1711 * xor edx, edx 1712 * equivalent to 'xor rdx, rdx', but one byte less 1713 */ 1714 EMIT2(0x31, 0xd2); 1715 1716 /* div src_reg */ 1717 maybe_emit_1mod(&prog, src_reg, is64); 1718 EMIT2(0xF7, add_1reg(0xF0, src_reg)); 1719 } else { 1720 if (BPF_CLASS(insn->code) == BPF_ALU) 1721 EMIT1(0x99); /* cdq */ 1722 else 1723 EMIT2(0x48, 0x99); /* cqo */ 1724 1725 /* idiv src_reg */ 1726 maybe_emit_1mod(&prog, src_reg, is64); 1727 EMIT2(0xF7, add_1reg(0xF8, src_reg)); 1728 } 1729 1730 if (BPF_OP(insn->code) == BPF_MOD && 1731 dst_reg != BPF_REG_3) 1732 /* mov dst_reg, rdx */ 1733 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 1734 else if (BPF_OP(insn->code) == BPF_DIV && 1735 dst_reg != BPF_REG_0) 1736 /* mov dst_reg, rax */ 1737 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 1738 1739 if (dst_reg != BPF_REG_3) 1740 EMIT1(0x5A); /* pop rdx */ 1741 if (dst_reg != BPF_REG_0) 1742 EMIT1(0x58); /* pop rax */ 1743 break; 1744 } 1745 1746 case BPF_ALU | BPF_MUL | BPF_K: 1747 case BPF_ALU64 | BPF_MUL | BPF_K: 1748 maybe_emit_mod(&prog, dst_reg, dst_reg, 1749 BPF_CLASS(insn->code) == BPF_ALU64); 1750 1751 if (is_imm8(imm32)) 1752 /* imul dst_reg, dst_reg, imm8 */ 1753 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1754 imm32); 1755 else 1756 /* imul dst_reg, dst_reg, imm32 */ 1757 EMIT2_off32(0x69, 1758 add_2reg(0xC0, dst_reg, dst_reg), 1759 imm32); 1760 break; 1761 1762 case BPF_ALU | BPF_MUL | BPF_X: 1763 case BPF_ALU64 | BPF_MUL | BPF_X: 1764 maybe_emit_mod(&prog, src_reg, dst_reg, 1765 BPF_CLASS(insn->code) == BPF_ALU64); 1766 1767 /* imul dst_reg, src_reg */ 1768 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1769 break; 1770 1771 /* Shifts */ 1772 case BPF_ALU | BPF_LSH | BPF_K: 1773 case BPF_ALU | BPF_RSH | BPF_K: 1774 case BPF_ALU | BPF_ARSH | BPF_K: 1775 case BPF_ALU64 | BPF_LSH | BPF_K: 1776 case BPF_ALU64 | BPF_RSH | BPF_K: 1777 case BPF_ALU64 | BPF_ARSH | BPF_K: 1778 maybe_emit_1mod(&prog, dst_reg, 1779 BPF_CLASS(insn->code) == BPF_ALU64); 1780 1781 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1782 if (imm32 == 1) 1783 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1784 else 1785 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1786 break; 1787 1788 case BPF_ALU | BPF_LSH | BPF_X: 1789 case BPF_ALU | BPF_RSH | BPF_X: 1790 case BPF_ALU | BPF_ARSH | BPF_X: 1791 case BPF_ALU64 | BPF_LSH | BPF_X: 1792 case BPF_ALU64 | BPF_RSH | BPF_X: 1793 case BPF_ALU64 | BPF_ARSH | BPF_X: 1794 /* BMI2 shifts aren't better when shift count is already in rcx */ 1795 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) { 1796 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */ 1797 bool w = (BPF_CLASS(insn->code) == BPF_ALU64); 1798 u8 op; 1799 1800 switch (BPF_OP(insn->code)) { 1801 case BPF_LSH: 1802 op = 1; /* prefix 0x66 */ 1803 break; 1804 case BPF_RSH: 1805 op = 3; /* prefix 0xf2 */ 1806 break; 1807 case BPF_ARSH: 1808 op = 2; /* prefix 0xf3 */ 1809 break; 1810 } 1811 1812 emit_shiftx(&prog, dst_reg, src_reg, w, op); 1813 1814 break; 1815 } 1816 1817 if (src_reg != BPF_REG_4) { /* common case */ 1818 /* Check for bad case when dst_reg == rcx */ 1819 if (dst_reg == BPF_REG_4) { 1820 /* mov r11, dst_reg */ 1821 EMIT_mov(AUX_REG, dst_reg); 1822 dst_reg = AUX_REG; 1823 } else { 1824 EMIT1(0x51); /* push rcx */ 1825 } 1826 /* mov rcx, src_reg */ 1827 EMIT_mov(BPF_REG_4, src_reg); 1828 } 1829 1830 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1831 maybe_emit_1mod(&prog, dst_reg, 1832 BPF_CLASS(insn->code) == BPF_ALU64); 1833 1834 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1835 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1836 1837 if (src_reg != BPF_REG_4) { 1838 if (insn->dst_reg == BPF_REG_4) 1839 /* mov dst_reg, r11 */ 1840 EMIT_mov(insn->dst_reg, AUX_REG); 1841 else 1842 EMIT1(0x59); /* pop rcx */ 1843 } 1844 1845 break; 1846 1847 case BPF_ALU | BPF_END | BPF_FROM_BE: 1848 case BPF_ALU64 | BPF_END | BPF_FROM_LE: 1849 switch (imm32) { 1850 case 16: 1851 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1852 EMIT1(0x66); 1853 if (is_ereg(dst_reg)) 1854 EMIT1(0x41); 1855 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1856 1857 /* Emit 'movzwl eax, ax' */ 1858 if (is_ereg(dst_reg)) 1859 EMIT3(0x45, 0x0F, 0xB7); 1860 else 1861 EMIT2(0x0F, 0xB7); 1862 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1863 break; 1864 case 32: 1865 /* Emit 'bswap eax' to swap lower 4 bytes */ 1866 if (is_ereg(dst_reg)) 1867 EMIT2(0x41, 0x0F); 1868 else 1869 EMIT1(0x0F); 1870 EMIT1(add_1reg(0xC8, dst_reg)); 1871 break; 1872 case 64: 1873 /* Emit 'bswap rax' to swap 8 bytes */ 1874 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1875 add_1reg(0xC8, dst_reg)); 1876 break; 1877 } 1878 break; 1879 1880 case BPF_ALU | BPF_END | BPF_FROM_LE: 1881 switch (imm32) { 1882 case 16: 1883 /* 1884 * Emit 'movzwl eax, ax' to zero extend 16-bit 1885 * into 64 bit 1886 */ 1887 if (is_ereg(dst_reg)) 1888 EMIT3(0x45, 0x0F, 0xB7); 1889 else 1890 EMIT2(0x0F, 0xB7); 1891 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1892 break; 1893 case 32: 1894 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1895 if (is_ereg(dst_reg)) 1896 EMIT1(0x45); 1897 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1898 break; 1899 case 64: 1900 /* nop */ 1901 break; 1902 } 1903 break; 1904 1905 /* speculation barrier */ 1906 case BPF_ST | BPF_NOSPEC: 1907 EMIT_LFENCE(); 1908 break; 1909 1910 /* ST: *(u8*)(dst_reg + off) = imm */ 1911 case BPF_ST | BPF_MEM | BPF_B: 1912 if (is_ereg(dst_reg)) 1913 EMIT2(0x41, 0xC6); 1914 else 1915 EMIT1(0xC6); 1916 goto st; 1917 case BPF_ST | BPF_MEM | BPF_H: 1918 if (is_ereg(dst_reg)) 1919 EMIT3(0x66, 0x41, 0xC7); 1920 else 1921 EMIT2(0x66, 0xC7); 1922 goto st; 1923 case BPF_ST | BPF_MEM | BPF_W: 1924 if (is_ereg(dst_reg)) 1925 EMIT2(0x41, 0xC7); 1926 else 1927 EMIT1(0xC7); 1928 goto st; 1929 case BPF_ST | BPF_MEM | BPF_DW: 1930 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1931 1932 st: if (is_imm8(insn->off)) 1933 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1934 else 1935 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1936 1937 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1938 break; 1939 1940 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1941 case BPF_STX | BPF_MEM | BPF_B: 1942 case BPF_STX | BPF_MEM | BPF_H: 1943 case BPF_STX | BPF_MEM | BPF_W: 1944 case BPF_STX | BPF_MEM | BPF_DW: 1945 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1946 break; 1947 1948 case BPF_ST | BPF_PROBE_MEM32 | BPF_B: 1949 case BPF_ST | BPF_PROBE_MEM32 | BPF_H: 1950 case BPF_ST | BPF_PROBE_MEM32 | BPF_W: 1951 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: 1952 start_of_ldx = prog; 1953 emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm); 1954 goto populate_extable; 1955 1956 /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */ 1957 case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: 1958 case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: 1959 case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: 1960 case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: 1961 case BPF_STX | BPF_PROBE_MEM32 | BPF_B: 1962 case BPF_STX | BPF_PROBE_MEM32 | BPF_H: 1963 case BPF_STX | BPF_PROBE_MEM32 | BPF_W: 1964 case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: 1965 start_of_ldx = prog; 1966 if (BPF_CLASS(insn->code) == BPF_LDX) 1967 emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1968 else 1969 emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1970 populate_extable: 1971 { 1972 struct exception_table_entry *ex; 1973 u8 *_insn = image + proglen + (start_of_ldx - temp); 1974 s64 delta; 1975 1976 if (!bpf_prog->aux->extable) 1977 break; 1978 1979 if (excnt >= bpf_prog->aux->num_exentries) { 1980 pr_err("mem32 extable bug\n"); 1981 return -EFAULT; 1982 } 1983 ex = &bpf_prog->aux->extable[excnt++]; 1984 1985 delta = _insn - (u8 *)&ex->insn; 1986 /* switch ex to rw buffer for writes */ 1987 ex = (void *)rw_image + ((void *)ex - (void *)image); 1988 1989 ex->insn = delta; 1990 1991 ex->data = EX_TYPE_BPF; 1992 1993 ex->fixup = (prog - start_of_ldx) | 1994 ((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8); 1995 } 1996 break; 1997 1998 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1999 case BPF_LDX | BPF_MEM | BPF_B: 2000 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 2001 case BPF_LDX | BPF_MEM | BPF_H: 2002 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 2003 case BPF_LDX | BPF_MEM | BPF_W: 2004 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 2005 case BPF_LDX | BPF_MEM | BPF_DW: 2006 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 2007 /* LDXS: dst_reg = *(s8*)(src_reg + off) */ 2008 case BPF_LDX | BPF_MEMSX | BPF_B: 2009 case BPF_LDX | BPF_MEMSX | BPF_H: 2010 case BPF_LDX | BPF_MEMSX | BPF_W: 2011 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: 2012 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: 2013 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: 2014 insn_off = insn->off; 2015 2016 if (BPF_MODE(insn->code) == BPF_PROBE_MEM || 2017 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { 2018 /* Conservatively check that src_reg + insn->off is a kernel address: 2019 * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE 2020 * and 2021 * src_reg + insn->off < VSYSCALL_ADDR 2022 */ 2023 2024 u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR; 2025 u8 *end_of_jmp; 2026 2027 /* movabsq r10, VSYSCALL_ADDR */ 2028 emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32, 2029 (u32)(long)VSYSCALL_ADDR); 2030 2031 /* mov src_reg, r11 */ 2032 EMIT_mov(AUX_REG, src_reg); 2033 2034 if (insn->off) { 2035 /* add r11, insn->off */ 2036 maybe_emit_1mod(&prog, AUX_REG, true); 2037 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); 2038 } 2039 2040 /* sub r11, r10 */ 2041 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); 2042 EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); 2043 2044 /* movabsq r10, limit */ 2045 emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32, 2046 (u32)(long)limit); 2047 2048 /* cmp r10, r11 */ 2049 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); 2050 EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); 2051 2052 /* if unsigned '>', goto load */ 2053 EMIT2(X86_JA, 0); 2054 end_of_jmp = prog; 2055 2056 /* xor dst_reg, dst_reg */ 2057 emit_mov_imm32(&prog, false, dst_reg, 0); 2058 /* jmp byte_after_ldx */ 2059 EMIT2(0xEB, 0); 2060 2061 /* populate jmp_offset for JAE above to jump to start_of_ldx */ 2062 start_of_ldx = prog; 2063 end_of_jmp[-1] = start_of_ldx - end_of_jmp; 2064 } 2065 if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX || 2066 BPF_MODE(insn->code) == BPF_MEMSX) 2067 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); 2068 else 2069 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); 2070 if (BPF_MODE(insn->code) == BPF_PROBE_MEM || 2071 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { 2072 struct exception_table_entry *ex; 2073 u8 *_insn = image + proglen + (start_of_ldx - temp); 2074 s64 delta; 2075 2076 /* populate jmp_offset for JMP above */ 2077 start_of_ldx[-1] = prog - start_of_ldx; 2078 2079 if (!bpf_prog->aux->extable) 2080 break; 2081 2082 if (excnt >= bpf_prog->aux->num_exentries) { 2083 pr_err("ex gen bug\n"); 2084 return -EFAULT; 2085 } 2086 ex = &bpf_prog->aux->extable[excnt++]; 2087 2088 delta = _insn - (u8 *)&ex->insn; 2089 if (!is_simm32(delta)) { 2090 pr_err("extable->insn doesn't fit into 32-bit\n"); 2091 return -EFAULT; 2092 } 2093 /* switch ex to rw buffer for writes */ 2094 ex = (void *)rw_image + ((void *)ex - (void *)image); 2095 2096 ex->insn = delta; 2097 2098 ex->data = EX_TYPE_BPF; 2099 2100 if (dst_reg > BPF_REG_9) { 2101 pr_err("verifier error\n"); 2102 return -EFAULT; 2103 } 2104 /* 2105 * Compute size of x86 insn and its target dest x86 register. 2106 * ex_handler_bpf() will use lower 8 bits to adjust 2107 * pt_regs->ip to jump over this x86 instruction 2108 * and upper bits to figure out which pt_regs to zero out. 2109 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 2110 * of 4 bytes will be ignored and rbx will be zero inited. 2111 */ 2112 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 2113 } 2114 break; 2115 2116 case BPF_STX | BPF_ATOMIC | BPF_W: 2117 case BPF_STX | BPF_ATOMIC | BPF_DW: 2118 if (insn->imm == (BPF_AND | BPF_FETCH) || 2119 insn->imm == (BPF_OR | BPF_FETCH) || 2120 insn->imm == (BPF_XOR | BPF_FETCH)) { 2121 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 2122 u32 real_src_reg = src_reg; 2123 u32 real_dst_reg = dst_reg; 2124 u8 *branch_target; 2125 2126 /* 2127 * Can't be implemented with a single x86 insn. 2128 * Need to do a CMPXCHG loop. 2129 */ 2130 2131 /* Will need RAX as a CMPXCHG operand so save R0 */ 2132 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 2133 if (src_reg == BPF_REG_0) 2134 real_src_reg = BPF_REG_AX; 2135 if (dst_reg == BPF_REG_0) 2136 real_dst_reg = BPF_REG_AX; 2137 2138 branch_target = prog; 2139 /* Load old value */ 2140 emit_ldx(&prog, BPF_SIZE(insn->code), 2141 BPF_REG_0, real_dst_reg, insn->off); 2142 /* 2143 * Perform the (commutative) operation locally, 2144 * put the result in the AUX_REG. 2145 */ 2146 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 2147 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 2148 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 2149 add_2reg(0xC0, AUX_REG, real_src_reg)); 2150 /* Attempt to swap in new value */ 2151 err = emit_atomic(&prog, BPF_CMPXCHG, 2152 real_dst_reg, AUX_REG, 2153 insn->off, 2154 BPF_SIZE(insn->code)); 2155 if (WARN_ON(err)) 2156 return err; 2157 /* 2158 * ZF tells us whether we won the race. If it's 2159 * cleared we need to try again. 2160 */ 2161 EMIT2(X86_JNE, -(prog - branch_target) - 2); 2162 /* Return the pre-modification value */ 2163 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 2164 /* Restore R0 after clobbering RAX */ 2165 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 2166 break; 2167 } 2168 2169 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 2170 insn->off, BPF_SIZE(insn->code)); 2171 if (err) 2172 return err; 2173 break; 2174 2175 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: 2176 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: 2177 start_of_ldx = prog; 2178 err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code), 2179 dst_reg, src_reg, X86_REG_R12, insn->off); 2180 if (err) 2181 return err; 2182 goto populate_extable; 2183 2184 /* call */ 2185 case BPF_JMP | BPF_CALL: { 2186 u8 *ip = image + addrs[i - 1]; 2187 2188 func = (u8 *) __bpf_call_base + imm32; 2189 if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) { 2190 LOAD_TAIL_CALL_CNT_PTR(stack_depth); 2191 ip += 7; 2192 } 2193 if (!imm32) 2194 return -EINVAL; 2195 if (priv_frame_ptr) { 2196 push_r9(&prog); 2197 ip += 2; 2198 } 2199 ip += x86_call_depth_emit_accounting(&prog, func, ip); 2200 if (emit_call(&prog, func, ip)) 2201 return -EINVAL; 2202 if (priv_frame_ptr) 2203 pop_r9(&prog); 2204 break; 2205 } 2206 2207 case BPF_JMP | BPF_TAIL_CALL: 2208 if (imm32) 2209 emit_bpf_tail_call_direct(bpf_prog, 2210 &bpf_prog->aux->poke_tab[imm32 - 1], 2211 &prog, image + addrs[i - 1], 2212 callee_regs_used, 2213 stack_depth, 2214 ctx); 2215 else 2216 emit_bpf_tail_call_indirect(bpf_prog, 2217 &prog, 2218 callee_regs_used, 2219 stack_depth, 2220 image + addrs[i - 1], 2221 ctx); 2222 break; 2223 2224 /* cond jump */ 2225 case BPF_JMP | BPF_JEQ | BPF_X: 2226 case BPF_JMP | BPF_JNE | BPF_X: 2227 case BPF_JMP | BPF_JGT | BPF_X: 2228 case BPF_JMP | BPF_JLT | BPF_X: 2229 case BPF_JMP | BPF_JGE | BPF_X: 2230 case BPF_JMP | BPF_JLE | BPF_X: 2231 case BPF_JMP | BPF_JSGT | BPF_X: 2232 case BPF_JMP | BPF_JSLT | BPF_X: 2233 case BPF_JMP | BPF_JSGE | BPF_X: 2234 case BPF_JMP | BPF_JSLE | BPF_X: 2235 case BPF_JMP32 | BPF_JEQ | BPF_X: 2236 case BPF_JMP32 | BPF_JNE | BPF_X: 2237 case BPF_JMP32 | BPF_JGT | BPF_X: 2238 case BPF_JMP32 | BPF_JLT | BPF_X: 2239 case BPF_JMP32 | BPF_JGE | BPF_X: 2240 case BPF_JMP32 | BPF_JLE | BPF_X: 2241 case BPF_JMP32 | BPF_JSGT | BPF_X: 2242 case BPF_JMP32 | BPF_JSLT | BPF_X: 2243 case BPF_JMP32 | BPF_JSGE | BPF_X: 2244 case BPF_JMP32 | BPF_JSLE | BPF_X: 2245 /* cmp dst_reg, src_reg */ 2246 maybe_emit_mod(&prog, dst_reg, src_reg, 2247 BPF_CLASS(insn->code) == BPF_JMP); 2248 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 2249 goto emit_cond_jmp; 2250 2251 case BPF_JMP | BPF_JSET | BPF_X: 2252 case BPF_JMP32 | BPF_JSET | BPF_X: 2253 /* test dst_reg, src_reg */ 2254 maybe_emit_mod(&prog, dst_reg, src_reg, 2255 BPF_CLASS(insn->code) == BPF_JMP); 2256 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 2257 goto emit_cond_jmp; 2258 2259 case BPF_JMP | BPF_JSET | BPF_K: 2260 case BPF_JMP32 | BPF_JSET | BPF_K: 2261 /* test dst_reg, imm32 */ 2262 maybe_emit_1mod(&prog, dst_reg, 2263 BPF_CLASS(insn->code) == BPF_JMP); 2264 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 2265 goto emit_cond_jmp; 2266 2267 case BPF_JMP | BPF_JEQ | BPF_K: 2268 case BPF_JMP | BPF_JNE | BPF_K: 2269 case BPF_JMP | BPF_JGT | BPF_K: 2270 case BPF_JMP | BPF_JLT | BPF_K: 2271 case BPF_JMP | BPF_JGE | BPF_K: 2272 case BPF_JMP | BPF_JLE | BPF_K: 2273 case BPF_JMP | BPF_JSGT | BPF_K: 2274 case BPF_JMP | BPF_JSLT | BPF_K: 2275 case BPF_JMP | BPF_JSGE | BPF_K: 2276 case BPF_JMP | BPF_JSLE | BPF_K: 2277 case BPF_JMP32 | BPF_JEQ | BPF_K: 2278 case BPF_JMP32 | BPF_JNE | BPF_K: 2279 case BPF_JMP32 | BPF_JGT | BPF_K: 2280 case BPF_JMP32 | BPF_JLT | BPF_K: 2281 case BPF_JMP32 | BPF_JGE | BPF_K: 2282 case BPF_JMP32 | BPF_JLE | BPF_K: 2283 case BPF_JMP32 | BPF_JSGT | BPF_K: 2284 case BPF_JMP32 | BPF_JSLT | BPF_K: 2285 case BPF_JMP32 | BPF_JSGE | BPF_K: 2286 case BPF_JMP32 | BPF_JSLE | BPF_K: 2287 /* test dst_reg, dst_reg to save one extra byte */ 2288 if (imm32 == 0) { 2289 maybe_emit_mod(&prog, dst_reg, dst_reg, 2290 BPF_CLASS(insn->code) == BPF_JMP); 2291 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 2292 goto emit_cond_jmp; 2293 } 2294 2295 /* cmp dst_reg, imm8/32 */ 2296 maybe_emit_1mod(&prog, dst_reg, 2297 BPF_CLASS(insn->code) == BPF_JMP); 2298 2299 if (is_imm8(imm32)) 2300 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 2301 else 2302 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 2303 2304 emit_cond_jmp: /* Convert BPF opcode to x86 */ 2305 switch (BPF_OP(insn->code)) { 2306 case BPF_JEQ: 2307 jmp_cond = X86_JE; 2308 break; 2309 case BPF_JSET: 2310 case BPF_JNE: 2311 jmp_cond = X86_JNE; 2312 break; 2313 case BPF_JGT: 2314 /* GT is unsigned '>', JA in x86 */ 2315 jmp_cond = X86_JA; 2316 break; 2317 case BPF_JLT: 2318 /* LT is unsigned '<', JB in x86 */ 2319 jmp_cond = X86_JB; 2320 break; 2321 case BPF_JGE: 2322 /* GE is unsigned '>=', JAE in x86 */ 2323 jmp_cond = X86_JAE; 2324 break; 2325 case BPF_JLE: 2326 /* LE is unsigned '<=', JBE in x86 */ 2327 jmp_cond = X86_JBE; 2328 break; 2329 case BPF_JSGT: 2330 /* Signed '>', GT in x86 */ 2331 jmp_cond = X86_JG; 2332 break; 2333 case BPF_JSLT: 2334 /* Signed '<', LT in x86 */ 2335 jmp_cond = X86_JL; 2336 break; 2337 case BPF_JSGE: 2338 /* Signed '>=', GE in x86 */ 2339 jmp_cond = X86_JGE; 2340 break; 2341 case BPF_JSLE: 2342 /* Signed '<=', LE in x86 */ 2343 jmp_cond = X86_JLE; 2344 break; 2345 default: /* to silence GCC warning */ 2346 return -EFAULT; 2347 } 2348 jmp_offset = addrs[i + insn->off] - addrs[i]; 2349 if (is_imm8_jmp_offset(jmp_offset)) { 2350 if (jmp_padding) { 2351 /* To keep the jmp_offset valid, the extra bytes are 2352 * padded before the jump insn, so we subtract the 2353 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 2354 * 2355 * If the previous pass already emits an imm8 2356 * jmp_cond, then this BPF insn won't shrink, so 2357 * "nops" is 0. 2358 * 2359 * On the other hand, if the previous pass emits an 2360 * imm32 jmp_cond, the extra 4 bytes(*) is padded to 2361 * keep the image from shrinking further. 2362 * 2363 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 2364 * is 2 bytes, so the size difference is 4 bytes. 2365 */ 2366 nops = INSN_SZ_DIFF - 2; 2367 if (nops != 0 && nops != 4) { 2368 pr_err("unexpected jmp_cond padding: %d bytes\n", 2369 nops); 2370 return -EFAULT; 2371 } 2372 emit_nops(&prog, nops); 2373 } 2374 EMIT2(jmp_cond, jmp_offset); 2375 } else if (is_simm32(jmp_offset)) { 2376 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 2377 } else { 2378 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 2379 return -EFAULT; 2380 } 2381 2382 break; 2383 2384 case BPF_JMP | BPF_JA: 2385 case BPF_JMP32 | BPF_JA: 2386 if (BPF_CLASS(insn->code) == BPF_JMP) { 2387 if (insn->off == -1) 2388 /* -1 jmp instructions will always jump 2389 * backwards two bytes. Explicitly handling 2390 * this case avoids wasting too many passes 2391 * when there are long sequences of replaced 2392 * dead code. 2393 */ 2394 jmp_offset = -2; 2395 else 2396 jmp_offset = addrs[i + insn->off] - addrs[i]; 2397 } else { 2398 if (insn->imm == -1) 2399 jmp_offset = -2; 2400 else 2401 jmp_offset = addrs[i + insn->imm] - addrs[i]; 2402 } 2403 2404 if (!jmp_offset) { 2405 /* 2406 * If jmp_padding is enabled, the extra nops will 2407 * be inserted. Otherwise, optimize out nop jumps. 2408 */ 2409 if (jmp_padding) { 2410 /* There are 3 possible conditions. 2411 * (1) This BPF_JA is already optimized out in 2412 * the previous run, so there is no need 2413 * to pad any extra byte (0 byte). 2414 * (2) The previous pass emits an imm8 jmp, 2415 * so we pad 2 bytes to match the previous 2416 * insn size. 2417 * (3) Similarly, the previous pass emits an 2418 * imm32 jmp, and 5 bytes is padded. 2419 */ 2420 nops = INSN_SZ_DIFF; 2421 if (nops != 0 && nops != 2 && nops != 5) { 2422 pr_err("unexpected nop jump padding: %d bytes\n", 2423 nops); 2424 return -EFAULT; 2425 } 2426 emit_nops(&prog, nops); 2427 } 2428 break; 2429 } 2430 emit_jmp: 2431 if (is_imm8_jmp_offset(jmp_offset)) { 2432 if (jmp_padding) { 2433 /* To avoid breaking jmp_offset, the extra bytes 2434 * are padded before the actual jmp insn, so 2435 * 2 bytes is subtracted from INSN_SZ_DIFF. 2436 * 2437 * If the previous pass already emits an imm8 2438 * jmp, there is nothing to pad (0 byte). 2439 * 2440 * If it emits an imm32 jmp (5 bytes) previously 2441 * and now an imm8 jmp (2 bytes), then we pad 2442 * (5 - 2 = 3) bytes to stop the image from 2443 * shrinking further. 2444 */ 2445 nops = INSN_SZ_DIFF - 2; 2446 if (nops != 0 && nops != 3) { 2447 pr_err("unexpected jump padding: %d bytes\n", 2448 nops); 2449 return -EFAULT; 2450 } 2451 emit_nops(&prog, INSN_SZ_DIFF - 2); 2452 } 2453 EMIT2(0xEB, jmp_offset); 2454 } else if (is_simm32(jmp_offset)) { 2455 EMIT1_off32(0xE9, jmp_offset); 2456 } else { 2457 pr_err("jmp gen bug %llx\n", jmp_offset); 2458 return -EFAULT; 2459 } 2460 break; 2461 2462 case BPF_JMP | BPF_EXIT: 2463 if (seen_exit) { 2464 jmp_offset = ctx->cleanup_addr - addrs[i]; 2465 goto emit_jmp; 2466 } 2467 seen_exit = true; 2468 /* Update cleanup_addr */ 2469 ctx->cleanup_addr = proglen; 2470 if (bpf_prog->aux->exception_boundary) { 2471 pop_callee_regs(&prog, all_callee_regs_used); 2472 pop_r12(&prog); 2473 } else { 2474 pop_callee_regs(&prog, callee_regs_used); 2475 if (arena_vm_start) 2476 pop_r12(&prog); 2477 } 2478 EMIT1(0xC9); /* leave */ 2479 emit_return(&prog, image + addrs[i - 1] + (prog - temp)); 2480 break; 2481 2482 default: 2483 /* 2484 * By design x86-64 JIT should support all BPF instructions. 2485 * This error will be seen if new instruction was added 2486 * to the interpreter, but not to the JIT, or if there is 2487 * junk in bpf_prog. 2488 */ 2489 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 2490 return -EINVAL; 2491 } 2492 2493 ilen = prog - temp; 2494 if (ilen > BPF_MAX_INSN_SIZE) { 2495 pr_err("bpf_jit: fatal insn size error\n"); 2496 return -EFAULT; 2497 } 2498 2499 if (image) { 2500 /* 2501 * When populating the image, assert that: 2502 * 2503 * i) We do not write beyond the allocated space, and 2504 * ii) addrs[i] did not change from the prior run, in order 2505 * to validate assumptions made for computing branch 2506 * displacements. 2507 */ 2508 if (unlikely(proglen + ilen > oldproglen || 2509 proglen + ilen != addrs[i])) { 2510 pr_err("bpf_jit: fatal error\n"); 2511 return -EFAULT; 2512 } 2513 memcpy(rw_image + proglen, temp, ilen); 2514 } 2515 proglen += ilen; 2516 addrs[i] = proglen; 2517 prog = temp; 2518 } 2519 2520 if (image && excnt != bpf_prog->aux->num_exentries) { 2521 pr_err("extable is not populated\n"); 2522 return -EFAULT; 2523 } 2524 return proglen; 2525 } 2526 2527 static void clean_stack_garbage(const struct btf_func_model *m, 2528 u8 **pprog, int nr_stack_slots, 2529 int stack_size) 2530 { 2531 int arg_size, off; 2532 u8 *prog; 2533 2534 /* Generally speaking, the compiler will pass the arguments 2535 * on-stack with "push" instruction, which will take 8-byte 2536 * on the stack. In this case, there won't be garbage values 2537 * while we copy the arguments from origin stack frame to current 2538 * in BPF_DW. 2539 * 2540 * However, sometimes the compiler will only allocate 4-byte on 2541 * the stack for the arguments. For now, this case will only 2542 * happen if there is only one argument on-stack and its size 2543 * not more than 4 byte. In this case, there will be garbage 2544 * values on the upper 4-byte where we store the argument on 2545 * current stack frame. 2546 * 2547 * arguments on origin stack: 2548 * 2549 * stack_arg_1(4-byte) xxx(4-byte) 2550 * 2551 * what we copy: 2552 * 2553 * stack_arg_1(8-byte): stack_arg_1(origin) xxx 2554 * 2555 * and the xxx is the garbage values which we should clean here. 2556 */ 2557 if (nr_stack_slots != 1) 2558 return; 2559 2560 /* the size of the last argument */ 2561 arg_size = m->arg_size[m->nr_args - 1]; 2562 if (arg_size <= 4) { 2563 off = -(stack_size - 4); 2564 prog = *pprog; 2565 /* mov DWORD PTR [rbp + off], 0 */ 2566 if (!is_imm8(off)) 2567 EMIT2_off32(0xC7, 0x85, off); 2568 else 2569 EMIT3(0xC7, 0x45, off); 2570 EMIT(0, 4); 2571 *pprog = prog; 2572 } 2573 } 2574 2575 /* get the count of the regs that are used to pass arguments */ 2576 static int get_nr_used_regs(const struct btf_func_model *m) 2577 { 2578 int i, arg_regs, nr_used_regs = 0; 2579 2580 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2581 arg_regs = (m->arg_size[i] + 7) / 8; 2582 if (nr_used_regs + arg_regs <= 6) 2583 nr_used_regs += arg_regs; 2584 2585 if (nr_used_regs >= 6) 2586 break; 2587 } 2588 2589 return nr_used_regs; 2590 } 2591 2592 static void save_args(const struct btf_func_model *m, u8 **prog, 2593 int stack_size, bool for_call_origin) 2594 { 2595 int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0; 2596 int i, j; 2597 2598 /* Store function arguments to stack. 2599 * For a function that accepts two pointers the sequence will be: 2600 * mov QWORD PTR [rbp-0x10],rdi 2601 * mov QWORD PTR [rbp-0x8],rsi 2602 */ 2603 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2604 arg_regs = (m->arg_size[i] + 7) / 8; 2605 2606 /* According to the research of Yonghong, struct members 2607 * should be all in register or all on the stack. 2608 * Meanwhile, the compiler will pass the argument on regs 2609 * if the remaining regs can hold the argument. 2610 * 2611 * Disorder of the args can happen. For example: 2612 * 2613 * struct foo_struct { 2614 * long a; 2615 * int b; 2616 * }; 2617 * int foo(char, char, char, char, char, struct foo_struct, 2618 * char); 2619 * 2620 * the arg1-5,arg7 will be passed by regs, and arg6 will 2621 * by stack. 2622 */ 2623 if (nr_regs + arg_regs > 6) { 2624 /* copy function arguments from origin stack frame 2625 * into current stack frame. 2626 * 2627 * The starting address of the arguments on-stack 2628 * is: 2629 * rbp + 8(push rbp) + 2630 * 8(return addr of origin call) + 2631 * 8(return addr of the caller) 2632 * which means: rbp + 24 2633 */ 2634 for (j = 0; j < arg_regs; j++) { 2635 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 2636 nr_stack_slots * 8 + 0x18); 2637 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0, 2638 -stack_size); 2639 2640 if (!nr_stack_slots) 2641 first_off = stack_size; 2642 stack_size -= 8; 2643 nr_stack_slots++; 2644 } 2645 } else { 2646 /* Only copy the arguments on-stack to current 2647 * 'stack_size' and ignore the regs, used to 2648 * prepare the arguments on-stack for origin call. 2649 */ 2650 if (for_call_origin) { 2651 nr_regs += arg_regs; 2652 continue; 2653 } 2654 2655 /* copy the arguments from regs into stack */ 2656 for (j = 0; j < arg_regs; j++) { 2657 emit_stx(prog, BPF_DW, BPF_REG_FP, 2658 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, 2659 -stack_size); 2660 stack_size -= 8; 2661 nr_regs++; 2662 } 2663 } 2664 } 2665 2666 clean_stack_garbage(m, prog, nr_stack_slots, first_off); 2667 } 2668 2669 static void restore_regs(const struct btf_func_model *m, u8 **prog, 2670 int stack_size) 2671 { 2672 int i, j, arg_regs, nr_regs = 0; 2673 2674 /* Restore function arguments from stack. 2675 * For a function that accepts two pointers the sequence will be: 2676 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 2677 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 2678 * 2679 * The logic here is similar to what we do in save_args() 2680 */ 2681 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2682 arg_regs = (m->arg_size[i] + 7) / 8; 2683 if (nr_regs + arg_regs <= 6) { 2684 for (j = 0; j < arg_regs; j++) { 2685 emit_ldx(prog, BPF_DW, 2686 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, 2687 BPF_REG_FP, 2688 -stack_size); 2689 stack_size -= 8; 2690 nr_regs++; 2691 } 2692 } else { 2693 stack_size -= 8 * arg_regs; 2694 } 2695 2696 if (nr_regs >= 6) 2697 break; 2698 } 2699 } 2700 2701 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 2702 struct bpf_tramp_link *l, int stack_size, 2703 int run_ctx_off, bool save_ret, 2704 void *image, void *rw_image) 2705 { 2706 u8 *prog = *pprog; 2707 u8 *jmp_insn; 2708 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 2709 struct bpf_prog *p = l->link.prog; 2710 u64 cookie = l->cookie; 2711 2712 /* mov rdi, cookie */ 2713 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); 2714 2715 /* Prepare struct bpf_tramp_run_ctx. 2716 * 2717 * bpf_tramp_run_ctx is already preserved by 2718 * arch_prepare_bpf_trampoline(). 2719 * 2720 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi 2721 */ 2722 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); 2723 2724 /* arg1: mov rdi, progs[i] */ 2725 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 2726 /* arg2: lea rsi, [rbp - ctx_cookie_off] */ 2727 if (!is_imm8(-run_ctx_off)) 2728 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off); 2729 else 2730 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); 2731 2732 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image))) 2733 return -EINVAL; 2734 /* remember prog start time returned by __bpf_prog_enter */ 2735 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 2736 2737 /* if (__bpf_prog_enter*(prog) == 0) 2738 * goto skip_exec_of_prog; 2739 */ 2740 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 2741 /* emit 2 nops that will be replaced with JE insn */ 2742 jmp_insn = prog; 2743 emit_nops(&prog, 2); 2744 2745 /* arg1: lea rdi, [rbp - stack_size] */ 2746 if (!is_imm8(-stack_size)) 2747 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size); 2748 else 2749 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 2750 /* arg2: progs[i]->insnsi for interpreter */ 2751 if (!p->jited) 2752 emit_mov_imm64(&prog, BPF_REG_2, 2753 (long) p->insnsi >> 32, 2754 (u32) (long) p->insnsi); 2755 /* call JITed bpf program or interpreter */ 2756 if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image))) 2757 return -EINVAL; 2758 2759 /* 2760 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 2761 * of the previous call which is then passed on the stack to 2762 * the next BPF program. 2763 * 2764 * BPF_TRAMP_FENTRY trampoline may need to return the return 2765 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 2766 */ 2767 if (save_ret) 2768 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2769 2770 /* replace 2 nops with JE insn, since jmp target is known */ 2771 jmp_insn[0] = X86_JE; 2772 jmp_insn[1] = prog - jmp_insn - 2; 2773 2774 /* arg1: mov rdi, progs[i] */ 2775 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 2776 /* arg2: mov rsi, rbx <- start time in nsec */ 2777 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 2778 /* arg3: lea rdx, [rbp - run_ctx_off] */ 2779 if (!is_imm8(-run_ctx_off)) 2780 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off); 2781 else 2782 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); 2783 if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image))) 2784 return -EINVAL; 2785 2786 *pprog = prog; 2787 return 0; 2788 } 2789 2790 static void emit_align(u8 **pprog, u32 align) 2791 { 2792 u8 *target, *prog = *pprog; 2793 2794 target = PTR_ALIGN(prog, align); 2795 if (target != prog) 2796 emit_nops(&prog, target - prog); 2797 2798 *pprog = prog; 2799 } 2800 2801 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 2802 { 2803 u8 *prog = *pprog; 2804 s64 offset; 2805 2806 offset = func - (ip + 2 + 4); 2807 if (!is_simm32(offset)) { 2808 pr_err("Target %p is out of range\n", func); 2809 return -EINVAL; 2810 } 2811 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 2812 *pprog = prog; 2813 return 0; 2814 } 2815 2816 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 2817 struct bpf_tramp_links *tl, int stack_size, 2818 int run_ctx_off, bool save_ret, 2819 void *image, void *rw_image) 2820 { 2821 int i; 2822 u8 *prog = *pprog; 2823 2824 for (i = 0; i < tl->nr_links; i++) { 2825 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, 2826 run_ctx_off, save_ret, image, rw_image)) 2827 return -EINVAL; 2828 } 2829 *pprog = prog; 2830 return 0; 2831 } 2832 2833 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 2834 struct bpf_tramp_links *tl, int stack_size, 2835 int run_ctx_off, u8 **branches, 2836 void *image, void *rw_image) 2837 { 2838 u8 *prog = *pprog; 2839 int i; 2840 2841 /* The first fmod_ret program will receive a garbage return value. 2842 * Set this to 0 to avoid confusing the program. 2843 */ 2844 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 2845 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2846 for (i = 0; i < tl->nr_links; i++) { 2847 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true, 2848 image, rw_image)) 2849 return -EINVAL; 2850 2851 /* mod_ret prog stored return value into [rbp - 8]. Emit: 2852 * if (*(u64 *)(rbp - 8) != 0) 2853 * goto do_fexit; 2854 */ 2855 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 2856 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 2857 2858 /* Save the location of the branch and Generate 6 nops 2859 * (4 bytes for an offset and 2 bytes for the jump) These nops 2860 * are replaced with a conditional jump once do_fexit (i.e. the 2861 * start of the fexit invocation) is finalized. 2862 */ 2863 branches[i] = prog; 2864 emit_nops(&prog, 4 + 2); 2865 } 2866 2867 *pprog = prog; 2868 return 0; 2869 } 2870 2871 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ 2872 #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \ 2873 __LOAD_TCC_PTR(-round_up(stack, 8) - 8) 2874 2875 /* Example: 2876 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 2877 * its 'struct btf_func_model' will be nr_args=2 2878 * The assembly code when eth_type_trans is executing after trampoline: 2879 * 2880 * push rbp 2881 * mov rbp, rsp 2882 * sub rsp, 16 // space for skb and dev 2883 * push rbx // temp regs to pass start time 2884 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 2885 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 2886 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2887 * mov rbx, rax // remember start time in bpf stats are enabled 2888 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 2889 * call addr_of_jited_FENTRY_prog 2890 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2891 * mov rsi, rbx // prog start time 2892 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2893 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 2894 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 2895 * pop rbx 2896 * leave 2897 * ret 2898 * 2899 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 2900 * replaced with 'call generated_bpf_trampoline'. When it returns 2901 * eth_type_trans will continue executing with original skb and dev pointers. 2902 * 2903 * The assembly code when eth_type_trans is called from trampoline: 2904 * 2905 * push rbp 2906 * mov rbp, rsp 2907 * sub rsp, 24 // space for skb, dev, return value 2908 * push rbx // temp regs to pass start time 2909 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 2910 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 2911 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2912 * mov rbx, rax // remember start time if bpf stats are enabled 2913 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2914 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 2915 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2916 * mov rsi, rbx // prog start time 2917 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2918 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 2919 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 2920 * call eth_type_trans+5 // execute body of eth_type_trans 2921 * mov qword ptr [rbp - 8], rax // save return value 2922 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2923 * mov rbx, rax // remember start time in bpf stats are enabled 2924 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2925 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 2926 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2927 * mov rsi, rbx // prog start time 2928 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2929 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 2930 * pop rbx 2931 * leave 2932 * add rsp, 8 // skip eth_type_trans's frame 2933 * ret // return to its caller 2934 */ 2935 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image, 2936 void *rw_image_end, void *image, 2937 const struct btf_func_model *m, u32 flags, 2938 struct bpf_tramp_links *tlinks, 2939 void *func_addr) 2940 { 2941 int i, ret, nr_regs = m->nr_args, stack_size = 0; 2942 int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off; 2943 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2944 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2945 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2946 void *orig_call = func_addr; 2947 u8 **branches = NULL; 2948 u8 *prog; 2949 bool save_ret; 2950 2951 /* 2952 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is 2953 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG 2954 * because @func_addr. 2955 */ 2956 WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) && 2957 (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET))); 2958 2959 /* extra registers for struct arguments */ 2960 for (i = 0; i < m->nr_args; i++) { 2961 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) 2962 nr_regs += (m->arg_size[i] + 7) / 8 - 1; 2963 } 2964 2965 /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 2966 * are passed through regs, the remains are through stack. 2967 */ 2968 if (nr_regs > MAX_BPF_FUNC_ARGS) 2969 return -ENOTSUPP; 2970 2971 /* Generated trampoline stack layout: 2972 * 2973 * RBP + 8 [ return address ] 2974 * RBP + 0 [ RBP ] 2975 * 2976 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 2977 * BPF_TRAMP_F_RET_FENTRY_RET flags 2978 * 2979 * [ reg_argN ] always 2980 * [ ... ] 2981 * RBP - regs_off [ reg_arg1 ] program's ctx pointer 2982 * 2983 * RBP - nregs_off [ regs count ] always 2984 * 2985 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2986 * 2987 * RBP - rbx_off [ rbx value ] always 2988 * 2989 * RBP - run_ctx_off [ bpf_tramp_run_ctx ] 2990 * 2991 * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG 2992 * [ ... ] 2993 * [ stack_arg2 ] 2994 * RBP - arg_stack_off [ stack_arg1 ] 2995 * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX 2996 */ 2997 2998 /* room for return value of orig_call or fentry prog */ 2999 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 3000 if (save_ret) 3001 stack_size += 8; 3002 3003 stack_size += nr_regs * 8; 3004 regs_off = stack_size; 3005 3006 /* regs count */ 3007 stack_size += 8; 3008 nregs_off = stack_size; 3009 3010 if (flags & BPF_TRAMP_F_IP_ARG) 3011 stack_size += 8; /* room for IP address argument */ 3012 3013 ip_off = stack_size; 3014 3015 stack_size += 8; 3016 rbx_off = stack_size; 3017 3018 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; 3019 run_ctx_off = stack_size; 3020 3021 if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) { 3022 /* the space that used to pass arguments on-stack */ 3023 stack_size += (nr_regs - get_nr_used_regs(m)) * 8; 3024 /* make sure the stack pointer is 16-byte aligned if we 3025 * need pass arguments on stack, which means 3026 * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)] 3027 * should be 16-byte aligned. Following code depend on 3028 * that stack_size is already 8-byte aligned. 3029 */ 3030 stack_size += (stack_size % 16) ? 0 : 8; 3031 } 3032 3033 arg_stack_off = stack_size; 3034 3035 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 3036 /* skip patched call instruction and point orig_call to actual 3037 * body of the kernel function. 3038 */ 3039 if (is_endbr(*(u32 *)orig_call)) 3040 orig_call += ENDBR_INSN_SIZE; 3041 orig_call += X86_PATCH_SIZE; 3042 } 3043 3044 prog = rw_image; 3045 3046 if (flags & BPF_TRAMP_F_INDIRECT) { 3047 /* 3048 * Indirect call for bpf_struct_ops 3049 */ 3050 emit_cfi(&prog, cfi_get_func_hash(func_addr)); 3051 } else { 3052 /* 3053 * Direct-call fentry stub, as such it needs accounting for the 3054 * __fentry__ call. 3055 */ 3056 x86_call_depth_emit_accounting(&prog, NULL, image); 3057 } 3058 EMIT1(0x55); /* push rbp */ 3059 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 3060 if (!is_imm8(stack_size)) { 3061 /* sub rsp, stack_size */ 3062 EMIT3_off32(0x48, 0x81, 0xEC, stack_size); 3063 } else { 3064 /* sub rsp, stack_size */ 3065 EMIT4(0x48, 0x83, 0xEC, stack_size); 3066 } 3067 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) 3068 EMIT1(0x50); /* push rax */ 3069 /* mov QWORD PTR [rbp - rbx_off], rbx */ 3070 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off); 3071 3072 /* Store number of argument registers of the traced function: 3073 * mov rax, nr_regs 3074 * mov QWORD PTR [rbp - nregs_off], rax 3075 */ 3076 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs); 3077 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off); 3078 3079 if (flags & BPF_TRAMP_F_IP_ARG) { 3080 /* Store IP address of the traced function: 3081 * movabsq rax, func_addr 3082 * mov QWORD PTR [rbp - ip_off], rax 3083 */ 3084 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr); 3085 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 3086 } 3087 3088 save_args(m, &prog, regs_off, false); 3089 3090 if (flags & BPF_TRAMP_F_CALL_ORIG) { 3091 /* arg1: mov rdi, im */ 3092 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 3093 if (emit_rsb_call(&prog, __bpf_tramp_enter, 3094 image + (prog - (u8 *)rw_image))) { 3095 ret = -EINVAL; 3096 goto cleanup; 3097 } 3098 } 3099 3100 if (fentry->nr_links) { 3101 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, 3102 flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image)) 3103 return -EINVAL; 3104 } 3105 3106 if (fmod_ret->nr_links) { 3107 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), 3108 GFP_KERNEL); 3109 if (!branches) 3110 return -ENOMEM; 3111 3112 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 3113 run_ctx_off, branches, image, rw_image)) { 3114 ret = -EINVAL; 3115 goto cleanup; 3116 } 3117 } 3118 3119 if (flags & BPF_TRAMP_F_CALL_ORIG) { 3120 restore_regs(m, &prog, regs_off); 3121 save_args(m, &prog, arg_stack_off, true); 3122 3123 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { 3124 /* Before calling the original function, load the 3125 * tail_call_cnt_ptr from stack to rax. 3126 */ 3127 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); 3128 } 3129 3130 if (flags & BPF_TRAMP_F_ORIG_STACK) { 3131 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); 3132 EMIT2(0xff, 0xd3); /* call *rbx */ 3133 } else { 3134 /* call original function */ 3135 if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) { 3136 ret = -EINVAL; 3137 goto cleanup; 3138 } 3139 } 3140 /* remember return value in a stack for bpf prog to access */ 3141 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 3142 im->ip_after_call = image + (prog - (u8 *)rw_image); 3143 emit_nops(&prog, X86_PATCH_SIZE); 3144 } 3145 3146 if (fmod_ret->nr_links) { 3147 /* From Intel 64 and IA-32 Architectures Optimization 3148 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 3149 * Coding Rule 11: All branch targets should be 16-byte 3150 * aligned. 3151 */ 3152 emit_align(&prog, 16); 3153 /* Update the branches saved in invoke_bpf_mod_ret with the 3154 * aligned address of do_fexit. 3155 */ 3156 for (i = 0; i < fmod_ret->nr_links; i++) { 3157 emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image), 3158 image + (branches[i] - (u8 *)rw_image), X86_JNE); 3159 } 3160 } 3161 3162 if (fexit->nr_links) { 3163 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, 3164 false, image, rw_image)) { 3165 ret = -EINVAL; 3166 goto cleanup; 3167 } 3168 } 3169 3170 if (flags & BPF_TRAMP_F_RESTORE_REGS) 3171 restore_regs(m, &prog, regs_off); 3172 3173 /* This needs to be done regardless. If there were fmod_ret programs, 3174 * the return value is only updated on the stack and still needs to be 3175 * restored to R0. 3176 */ 3177 if (flags & BPF_TRAMP_F_CALL_ORIG) { 3178 im->ip_epilogue = image + (prog - (u8 *)rw_image); 3179 /* arg1: mov rdi, im */ 3180 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 3181 if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) { 3182 ret = -EINVAL; 3183 goto cleanup; 3184 } 3185 } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { 3186 /* Before running the original function, load the 3187 * tail_call_cnt_ptr from stack to rax. 3188 */ 3189 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); 3190 } 3191 3192 /* restore return value of orig_call or fentry prog back into RAX */ 3193 if (save_ret) 3194 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 3195 3196 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); 3197 EMIT1(0xC9); /* leave */ 3198 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 3199 /* skip our return address and return to parent */ 3200 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 3201 } 3202 emit_return(&prog, image + (prog - (u8 *)rw_image)); 3203 /* Make sure the trampoline generation logic doesn't overflow */ 3204 if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) { 3205 ret = -EFAULT; 3206 goto cleanup; 3207 } 3208 ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY; 3209 3210 cleanup: 3211 kfree(branches); 3212 return ret; 3213 } 3214 3215 void *arch_alloc_bpf_trampoline(unsigned int size) 3216 { 3217 return bpf_prog_pack_alloc(size, jit_fill_hole); 3218 } 3219 3220 void arch_free_bpf_trampoline(void *image, unsigned int size) 3221 { 3222 bpf_prog_pack_free(image, size); 3223 } 3224 3225 int arch_protect_bpf_trampoline(void *image, unsigned int size) 3226 { 3227 return 0; 3228 } 3229 3230 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 3231 const struct btf_func_model *m, u32 flags, 3232 struct bpf_tramp_links *tlinks, 3233 void *func_addr) 3234 { 3235 void *rw_image, *tmp; 3236 int ret; 3237 u32 size = image_end - image; 3238 3239 /* rw_image doesn't need to be in module memory range, so we can 3240 * use kvmalloc. 3241 */ 3242 rw_image = kvmalloc(size, GFP_KERNEL); 3243 if (!rw_image) 3244 return -ENOMEM; 3245 3246 ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m, 3247 flags, tlinks, func_addr); 3248 if (ret < 0) 3249 goto out; 3250 3251 tmp = bpf_arch_text_copy(image, rw_image, size); 3252 if (IS_ERR(tmp)) 3253 ret = PTR_ERR(tmp); 3254 out: 3255 kvfree(rw_image); 3256 return ret; 3257 } 3258 3259 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, 3260 struct bpf_tramp_links *tlinks, void *func_addr) 3261 { 3262 struct bpf_tramp_image im; 3263 void *image; 3264 int ret; 3265 3266 /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline(). 3267 * This will NOT cause fragmentation in direct map, as we do not 3268 * call set_memory_*() on this buffer. 3269 * 3270 * We cannot use kvmalloc here, because we need image to be in 3271 * module memory range. 3272 */ 3273 image = bpf_jit_alloc_exec(PAGE_SIZE); 3274 if (!image) 3275 return -ENOMEM; 3276 3277 ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image, 3278 m, flags, tlinks, func_addr); 3279 bpf_jit_free_exec(image); 3280 return ret; 3281 } 3282 3283 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf) 3284 { 3285 u8 *jg_reloc, *prog = *pprog; 3286 int pivot, err, jg_bytes = 1; 3287 s64 jg_offset; 3288 3289 if (a == b) { 3290 /* Leaf node of recursion, i.e. not a range of indices 3291 * anymore. 3292 */ 3293 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 3294 if (!is_simm32(progs[a])) 3295 return -1; 3296 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 3297 progs[a]); 3298 err = emit_cond_near_jump(&prog, /* je func */ 3299 (void *)progs[a], image + (prog - buf), 3300 X86_JE); 3301 if (err) 3302 return err; 3303 3304 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf)); 3305 3306 *pprog = prog; 3307 return 0; 3308 } 3309 3310 /* Not a leaf node, so we pivot, and recursively descend into 3311 * the lower and upper ranges. 3312 */ 3313 pivot = (b - a) / 2; 3314 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 3315 if (!is_simm32(progs[a + pivot])) 3316 return -1; 3317 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 3318 3319 if (pivot > 2) { /* jg upper_part */ 3320 /* Require near jump. */ 3321 jg_bytes = 4; 3322 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 3323 } else { 3324 EMIT2(X86_JG, 0); 3325 } 3326 jg_reloc = prog; 3327 3328 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 3329 progs, image, buf); 3330 if (err) 3331 return err; 3332 3333 /* From Intel 64 and IA-32 Architectures Optimization 3334 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 3335 * Coding Rule 11: All branch targets should be 16-byte 3336 * aligned. 3337 */ 3338 emit_align(&prog, 16); 3339 jg_offset = prog - jg_reloc; 3340 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 3341 3342 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 3343 b, progs, image, buf); 3344 if (err) 3345 return err; 3346 3347 *pprog = prog; 3348 return 0; 3349 } 3350 3351 static int cmp_ips(const void *a, const void *b) 3352 { 3353 const s64 *ipa = a; 3354 const s64 *ipb = b; 3355 3356 if (*ipa > *ipb) 3357 return 1; 3358 if (*ipa < *ipb) 3359 return -1; 3360 return 0; 3361 } 3362 3363 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) 3364 { 3365 u8 *prog = buf; 3366 3367 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 3368 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); 3369 } 3370 3371 static const char *bpf_get_prog_name(struct bpf_prog *prog) 3372 { 3373 if (prog->aux->ksym.prog) 3374 return prog->aux->ksym.name; 3375 return prog->aux->name; 3376 } 3377 3378 static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size) 3379 { 3380 int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; 3381 u64 *stack_ptr; 3382 3383 for_each_possible_cpu(cpu) { 3384 stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu); 3385 stack_ptr[0] = PRIV_STACK_GUARD_VAL; 3386 stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL; 3387 } 3388 } 3389 3390 static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size, 3391 struct bpf_prog *prog) 3392 { 3393 int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; 3394 u64 *stack_ptr; 3395 3396 for_each_possible_cpu(cpu) { 3397 stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu); 3398 if (stack_ptr[0] != PRIV_STACK_GUARD_VAL || 3399 stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) { 3400 pr_err("BPF private stack overflow/underflow detected for prog %sx\n", 3401 bpf_get_prog_name(prog)); 3402 break; 3403 } 3404 } 3405 } 3406 3407 struct x64_jit_data { 3408 struct bpf_binary_header *rw_header; 3409 struct bpf_binary_header *header; 3410 int *addrs; 3411 u8 *image; 3412 int proglen; 3413 struct jit_context ctx; 3414 }; 3415 3416 #define MAX_PASSES 20 3417 #define PADDING_PASSES (MAX_PASSES - 5) 3418 3419 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 3420 { 3421 struct bpf_binary_header *rw_header = NULL; 3422 struct bpf_binary_header *header = NULL; 3423 struct bpf_prog *tmp, *orig_prog = prog; 3424 void __percpu *priv_stack_ptr = NULL; 3425 struct x64_jit_data *jit_data; 3426 int priv_stack_alloc_sz; 3427 int proglen, oldproglen = 0; 3428 struct jit_context ctx = {}; 3429 bool tmp_blinded = false; 3430 bool extra_pass = false; 3431 bool padding = false; 3432 u8 *rw_image = NULL; 3433 u8 *image = NULL; 3434 int *addrs; 3435 int pass; 3436 int i; 3437 3438 if (!prog->jit_requested) 3439 return orig_prog; 3440 3441 tmp = bpf_jit_blind_constants(prog); 3442 /* 3443 * If blinding was requested and we failed during blinding, 3444 * we must fall back to the interpreter. 3445 */ 3446 if (IS_ERR(tmp)) 3447 return orig_prog; 3448 if (tmp != prog) { 3449 tmp_blinded = true; 3450 prog = tmp; 3451 } 3452 3453 jit_data = prog->aux->jit_data; 3454 if (!jit_data) { 3455 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 3456 if (!jit_data) { 3457 prog = orig_prog; 3458 goto out; 3459 } 3460 prog->aux->jit_data = jit_data; 3461 } 3462 priv_stack_ptr = prog->aux->priv_stack_ptr; 3463 if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) { 3464 /* Allocate actual private stack size with verifier-calculated 3465 * stack size plus two memory guards to protect overflow and 3466 * underflow. 3467 */ 3468 priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) + 3469 2 * PRIV_STACK_GUARD_SZ; 3470 priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 8, GFP_KERNEL); 3471 if (!priv_stack_ptr) { 3472 prog = orig_prog; 3473 goto out_priv_stack; 3474 } 3475 3476 priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz); 3477 prog->aux->priv_stack_ptr = priv_stack_ptr; 3478 } 3479 addrs = jit_data->addrs; 3480 if (addrs) { 3481 ctx = jit_data->ctx; 3482 oldproglen = jit_data->proglen; 3483 image = jit_data->image; 3484 header = jit_data->header; 3485 rw_header = jit_data->rw_header; 3486 rw_image = (void *)rw_header + ((void *)image - (void *)header); 3487 extra_pass = true; 3488 padding = true; 3489 goto skip_init_addrs; 3490 } 3491 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 3492 if (!addrs) { 3493 prog = orig_prog; 3494 goto out_addrs; 3495 } 3496 3497 /* 3498 * Before first pass, make a rough estimation of addrs[] 3499 * each BPF instruction is translated to less than 64 bytes 3500 */ 3501 for (proglen = 0, i = 0; i <= prog->len; i++) { 3502 proglen += 64; 3503 addrs[i] = proglen; 3504 } 3505 ctx.cleanup_addr = proglen; 3506 skip_init_addrs: 3507 3508 /* 3509 * JITed image shrinks with every pass and the loop iterates 3510 * until the image stops shrinking. Very large BPF programs 3511 * may converge on the last pass. In such case do one more 3512 * pass to emit the final image. 3513 */ 3514 for (pass = 0; pass < MAX_PASSES || image; pass++) { 3515 if (!padding && pass >= PADDING_PASSES) 3516 padding = true; 3517 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); 3518 if (proglen <= 0) { 3519 out_image: 3520 image = NULL; 3521 if (header) { 3522 bpf_arch_text_copy(&header->size, &rw_header->size, 3523 sizeof(rw_header->size)); 3524 bpf_jit_binary_pack_free(header, rw_header); 3525 } 3526 /* Fall back to interpreter mode */ 3527 prog = orig_prog; 3528 if (extra_pass) { 3529 prog->bpf_func = NULL; 3530 prog->jited = 0; 3531 prog->jited_len = 0; 3532 } 3533 goto out_addrs; 3534 } 3535 if (image) { 3536 if (proglen != oldproglen) { 3537 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 3538 proglen, oldproglen); 3539 goto out_image; 3540 } 3541 break; 3542 } 3543 if (proglen == oldproglen) { 3544 /* 3545 * The number of entries in extable is the number of BPF_LDX 3546 * insns that access kernel memory via "pointer to BTF type". 3547 * The verifier changed their opcode from LDX|MEM|size 3548 * to LDX|PROBE_MEM|size to make JITing easier. 3549 */ 3550 u32 align = __alignof__(struct exception_table_entry); 3551 u32 extable_size = prog->aux->num_exentries * 3552 sizeof(struct exception_table_entry); 3553 3554 /* allocate module memory for x86 insns and extable */ 3555 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, 3556 &image, align, &rw_header, &rw_image, 3557 jit_fill_hole); 3558 if (!header) { 3559 prog = orig_prog; 3560 goto out_addrs; 3561 } 3562 prog->aux->extable = (void *) image + roundup(proglen, align); 3563 } 3564 oldproglen = proglen; 3565 cond_resched(); 3566 } 3567 3568 if (bpf_jit_enable > 1) 3569 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image); 3570 3571 if (image) { 3572 if (!prog->is_func || extra_pass) { 3573 /* 3574 * bpf_jit_binary_pack_finalize fails in two scenarios: 3575 * 1) header is not pointing to proper module memory; 3576 * 2) the arch doesn't support bpf_arch_text_copy(). 3577 * 3578 * Both cases are serious bugs and justify WARN_ON. 3579 */ 3580 if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) { 3581 /* header has been freed */ 3582 header = NULL; 3583 goto out_image; 3584 } 3585 3586 bpf_tail_call_direct_fixup(prog); 3587 } else { 3588 jit_data->addrs = addrs; 3589 jit_data->ctx = ctx; 3590 jit_data->proglen = proglen; 3591 jit_data->image = image; 3592 jit_data->header = header; 3593 jit_data->rw_header = rw_header; 3594 } 3595 /* 3596 * ctx.prog_offset is used when CFI preambles put code *before* 3597 * the function. See emit_cfi(). For FineIBT specifically this code 3598 * can also be executed and bpf_prog_kallsyms_add() will 3599 * generate an additional symbol to cover this, hence also 3600 * decrement proglen. 3601 */ 3602 prog->bpf_func = (void *)image + cfi_get_offset(); 3603 prog->jited = 1; 3604 prog->jited_len = proglen - cfi_get_offset(); 3605 } else { 3606 prog = orig_prog; 3607 } 3608 3609 if (!image || !prog->is_func || extra_pass) { 3610 if (image) 3611 bpf_prog_fill_jited_linfo(prog, addrs + 1); 3612 out_addrs: 3613 kvfree(addrs); 3614 if (!image && priv_stack_ptr) { 3615 free_percpu(priv_stack_ptr); 3616 prog->aux->priv_stack_ptr = NULL; 3617 } 3618 out_priv_stack: 3619 kfree(jit_data); 3620 prog->aux->jit_data = NULL; 3621 } 3622 out: 3623 if (tmp_blinded) 3624 bpf_jit_prog_release_other(prog, prog == orig_prog ? 3625 tmp : orig_prog); 3626 return prog; 3627 } 3628 3629 bool bpf_jit_supports_kfunc_call(void) 3630 { 3631 return true; 3632 } 3633 3634 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 3635 { 3636 if (text_poke_copy(dst, src, len) == NULL) 3637 return ERR_PTR(-EINVAL); 3638 return dst; 3639 } 3640 3641 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ 3642 bool bpf_jit_supports_subprog_tailcalls(void) 3643 { 3644 return true; 3645 } 3646 3647 bool bpf_jit_supports_percpu_insn(void) 3648 { 3649 return true; 3650 } 3651 3652 void bpf_jit_free(struct bpf_prog *prog) 3653 { 3654 if (prog->jited) { 3655 struct x64_jit_data *jit_data = prog->aux->jit_data; 3656 struct bpf_binary_header *hdr; 3657 void __percpu *priv_stack_ptr; 3658 int priv_stack_alloc_sz; 3659 3660 /* 3661 * If we fail the final pass of JIT (from jit_subprogs), 3662 * the program may not be finalized yet. Call finalize here 3663 * before freeing it. 3664 */ 3665 if (jit_data) { 3666 bpf_jit_binary_pack_finalize(jit_data->header, 3667 jit_data->rw_header); 3668 kvfree(jit_data->addrs); 3669 kfree(jit_data); 3670 } 3671 prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset(); 3672 hdr = bpf_jit_binary_pack_hdr(prog); 3673 bpf_jit_binary_pack_free(hdr, NULL); 3674 priv_stack_ptr = prog->aux->priv_stack_ptr; 3675 if (priv_stack_ptr) { 3676 priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) + 3677 2 * PRIV_STACK_GUARD_SZ; 3678 priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog); 3679 free_percpu(prog->aux->priv_stack_ptr); 3680 } 3681 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); 3682 } 3683 3684 bpf_prog_unlock_free(prog); 3685 } 3686 3687 bool bpf_jit_supports_exceptions(void) 3688 { 3689 /* We unwind through both kernel frames (starting from within bpf_throw 3690 * call) and BPF frames. Therefore we require ORC unwinder to be enabled 3691 * to walk kernel frames and reach BPF frames in the stack trace. 3692 */ 3693 return IS_ENABLED(CONFIG_UNWINDER_ORC); 3694 } 3695 3696 bool bpf_jit_supports_private_stack(void) 3697 { 3698 return true; 3699 } 3700 3701 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) 3702 { 3703 #if defined(CONFIG_UNWINDER_ORC) 3704 struct unwind_state state; 3705 unsigned long addr; 3706 3707 for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state); 3708 unwind_next_frame(&state)) { 3709 addr = unwind_get_return_address(&state); 3710 if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp)) 3711 break; 3712 } 3713 return; 3714 #endif 3715 WARN(1, "verification of programs using bpf_throw should have failed\n"); 3716 } 3717 3718 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, 3719 struct bpf_prog *new, struct bpf_prog *old) 3720 { 3721 u8 *old_addr, *new_addr, *old_bypass_addr; 3722 int ret; 3723 3724 old_bypass_addr = old ? NULL : poke->bypass_addr; 3725 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL; 3726 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL; 3727 3728 /* 3729 * On program loading or teardown, the program's kallsym entry 3730 * might not be in place, so we use __bpf_arch_text_poke to skip 3731 * the kallsyms check. 3732 */ 3733 if (new) { 3734 ret = __bpf_arch_text_poke(poke->tailcall_target, 3735 BPF_MOD_JUMP, 3736 old_addr, new_addr); 3737 BUG_ON(ret < 0); 3738 if (!old) { 3739 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 3740 BPF_MOD_JUMP, 3741 poke->bypass_addr, 3742 NULL); 3743 BUG_ON(ret < 0); 3744 } 3745 } else { 3746 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 3747 BPF_MOD_JUMP, 3748 old_bypass_addr, 3749 poke->bypass_addr); 3750 BUG_ON(ret < 0); 3751 /* let other CPUs finish the execution of program 3752 * so that it will not possible to expose them 3753 * to invalid nop, stack unwind, nop state 3754 */ 3755 if (!ret) 3756 synchronize_rcu(); 3757 ret = __bpf_arch_text_poke(poke->tailcall_target, 3758 BPF_MOD_JUMP, 3759 old_addr, NULL); 3760 BUG_ON(ret < 0); 3761 } 3762 } 3763 3764 bool bpf_jit_supports_arena(void) 3765 { 3766 return true; 3767 } 3768 3769 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) 3770 { 3771 if (!in_arena) 3772 return true; 3773 switch (insn->code) { 3774 case BPF_STX | BPF_ATOMIC | BPF_W: 3775 case BPF_STX | BPF_ATOMIC | BPF_DW: 3776 if (insn->imm == (BPF_AND | BPF_FETCH) || 3777 insn->imm == (BPF_OR | BPF_FETCH) || 3778 insn->imm == (BPF_XOR | BPF_FETCH)) 3779 return false; 3780 } 3781 return true; 3782 } 3783 3784 bool bpf_jit_supports_ptr_xchg(void) 3785 { 3786 return true; 3787 } 3788 3789 /* x86-64 JIT emits its own code to filter user addresses so return 0 here */ 3790 u64 bpf_arch_uaddress_limit(void) 3791 { 3792 return 0; 3793 } 3794