1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <asm/extable.h> 15 #include <asm/ftrace.h> 16 #include <asm/set_memory.h> 17 #include <asm/nospec-branch.h> 18 #include <asm/text-patching.h> 19 20 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 21 { 22 if (len == 1) 23 *ptr = bytes; 24 else if (len == 2) 25 *(u16 *)ptr = bytes; 26 else { 27 *(u32 *)ptr = bytes; 28 barrier(); 29 } 30 return ptr + len; 31 } 32 33 #define EMIT(bytes, len) \ 34 do { prog = emit_code(prog, bytes, len); } while (0) 35 36 #define EMIT1(b1) EMIT(b1, 1) 37 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 38 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 39 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 40 41 #define EMIT1_off32(b1, off) \ 42 do { EMIT1(b1); EMIT(off, 4); } while (0) 43 #define EMIT2_off32(b1, b2, off) \ 44 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 45 #define EMIT3_off32(b1, b2, b3, off) \ 46 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 47 #define EMIT4_off32(b1, b2, b3, b4, off) \ 48 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 49 50 #ifdef CONFIG_X86_KERNEL_IBT 51 #define EMIT_ENDBR() EMIT(gen_endbr(), 4) 52 #else 53 #define EMIT_ENDBR() 54 #endif 55 56 static bool is_imm8(int value) 57 { 58 return value <= 127 && value >= -128; 59 } 60 61 static bool is_simm32(s64 value) 62 { 63 return value == (s64)(s32)value; 64 } 65 66 static bool is_uimm32(u64 value) 67 { 68 return value == (u64)(u32)value; 69 } 70 71 /* mov dst, src */ 72 #define EMIT_mov(DST, SRC) \ 73 do { \ 74 if (DST != SRC) \ 75 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 76 } while (0) 77 78 static int bpf_size_to_x86_bytes(int bpf_size) 79 { 80 if (bpf_size == BPF_W) 81 return 4; 82 else if (bpf_size == BPF_H) 83 return 2; 84 else if (bpf_size == BPF_B) 85 return 1; 86 else if (bpf_size == BPF_DW) 87 return 4; /* imm32 */ 88 else 89 return 0; 90 } 91 92 /* 93 * List of x86 cond jumps opcodes (. + s8) 94 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 95 */ 96 #define X86_JB 0x72 97 #define X86_JAE 0x73 98 #define X86_JE 0x74 99 #define X86_JNE 0x75 100 #define X86_JBE 0x76 101 #define X86_JA 0x77 102 #define X86_JL 0x7C 103 #define X86_JGE 0x7D 104 #define X86_JLE 0x7E 105 #define X86_JG 0x7F 106 107 /* Pick a register outside of BPF range for JIT internal work */ 108 #define AUX_REG (MAX_BPF_JIT_REG + 1) 109 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 110 111 /* 112 * The following table maps BPF registers to x86-64 registers. 113 * 114 * x86-64 register R12 is unused, since if used as base address 115 * register in load/store instructions, it always needs an 116 * extra byte of encoding and is callee saved. 117 * 118 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 119 * trampoline. x86-64 register R10 is used for blinding (if enabled). 120 */ 121 static const int reg2hex[] = { 122 [BPF_REG_0] = 0, /* RAX */ 123 [BPF_REG_1] = 7, /* RDI */ 124 [BPF_REG_2] = 6, /* RSI */ 125 [BPF_REG_3] = 2, /* RDX */ 126 [BPF_REG_4] = 1, /* RCX */ 127 [BPF_REG_5] = 0, /* R8 */ 128 [BPF_REG_6] = 3, /* RBX callee saved */ 129 [BPF_REG_7] = 5, /* R13 callee saved */ 130 [BPF_REG_8] = 6, /* R14 callee saved */ 131 [BPF_REG_9] = 7, /* R15 callee saved */ 132 [BPF_REG_FP] = 5, /* RBP readonly */ 133 [BPF_REG_AX] = 2, /* R10 temp register */ 134 [AUX_REG] = 3, /* R11 temp register */ 135 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 136 }; 137 138 static const int reg2pt_regs[] = { 139 [BPF_REG_0] = offsetof(struct pt_regs, ax), 140 [BPF_REG_1] = offsetof(struct pt_regs, di), 141 [BPF_REG_2] = offsetof(struct pt_regs, si), 142 [BPF_REG_3] = offsetof(struct pt_regs, dx), 143 [BPF_REG_4] = offsetof(struct pt_regs, cx), 144 [BPF_REG_5] = offsetof(struct pt_regs, r8), 145 [BPF_REG_6] = offsetof(struct pt_regs, bx), 146 [BPF_REG_7] = offsetof(struct pt_regs, r13), 147 [BPF_REG_8] = offsetof(struct pt_regs, r14), 148 [BPF_REG_9] = offsetof(struct pt_regs, r15), 149 }; 150 151 /* 152 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 153 * which need extra byte of encoding. 154 * rax,rcx,...,rbp have simpler encoding 155 */ 156 static bool is_ereg(u32 reg) 157 { 158 return (1 << reg) & (BIT(BPF_REG_5) | 159 BIT(AUX_REG) | 160 BIT(BPF_REG_7) | 161 BIT(BPF_REG_8) | 162 BIT(BPF_REG_9) | 163 BIT(X86_REG_R9) | 164 BIT(BPF_REG_AX)); 165 } 166 167 /* 168 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 169 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 170 * of encoding. al,cl,dl,bl have simpler encoding. 171 */ 172 static bool is_ereg_8l(u32 reg) 173 { 174 return is_ereg(reg) || 175 (1 << reg) & (BIT(BPF_REG_1) | 176 BIT(BPF_REG_2) | 177 BIT(BPF_REG_FP)); 178 } 179 180 static bool is_axreg(u32 reg) 181 { 182 return reg == BPF_REG_0; 183 } 184 185 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 186 static u8 add_1mod(u8 byte, u32 reg) 187 { 188 if (is_ereg(reg)) 189 byte |= 1; 190 return byte; 191 } 192 193 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 194 { 195 if (is_ereg(r1)) 196 byte |= 1; 197 if (is_ereg(r2)) 198 byte |= 4; 199 return byte; 200 } 201 202 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 203 static u8 add_1reg(u8 byte, u32 dst_reg) 204 { 205 return byte + reg2hex[dst_reg]; 206 } 207 208 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 209 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 210 { 211 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 212 } 213 214 /* Some 1-byte opcodes for binary ALU operations */ 215 static u8 simple_alu_opcodes[] = { 216 [BPF_ADD] = 0x01, 217 [BPF_SUB] = 0x29, 218 [BPF_AND] = 0x21, 219 [BPF_OR] = 0x09, 220 [BPF_XOR] = 0x31, 221 [BPF_LSH] = 0xE0, 222 [BPF_RSH] = 0xE8, 223 [BPF_ARSH] = 0xF8, 224 }; 225 226 static void jit_fill_hole(void *area, unsigned int size) 227 { 228 /* Fill whole space with INT3 instructions */ 229 memset(area, 0xcc, size); 230 } 231 232 int bpf_arch_text_invalidate(void *dst, size_t len) 233 { 234 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); 235 } 236 237 struct jit_context { 238 int cleanup_addr; /* Epilogue code offset */ 239 240 /* 241 * Program specific offsets of labels in the code; these rely on the 242 * JIT doing at least 2 passes, recording the position on the first 243 * pass, only to generate the correct offset on the second pass. 244 */ 245 int tail_call_direct_label; 246 int tail_call_indirect_label; 247 }; 248 249 /* Maximum number of bytes emitted while JITing one eBPF insn */ 250 #define BPF_MAX_INSN_SIZE 128 251 #define BPF_INSN_SAFETY 64 252 253 /* Number of bytes emit_patch() needs to generate instructions */ 254 #define X86_PATCH_SIZE 5 255 /* Number of bytes that will be skipped on tailcall */ 256 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) 257 258 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 259 { 260 u8 *prog = *pprog; 261 262 if (callee_regs_used[0]) 263 EMIT1(0x53); /* push rbx */ 264 if (callee_regs_used[1]) 265 EMIT2(0x41, 0x55); /* push r13 */ 266 if (callee_regs_used[2]) 267 EMIT2(0x41, 0x56); /* push r14 */ 268 if (callee_regs_used[3]) 269 EMIT2(0x41, 0x57); /* push r15 */ 270 *pprog = prog; 271 } 272 273 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 274 { 275 u8 *prog = *pprog; 276 277 if (callee_regs_used[3]) 278 EMIT2(0x41, 0x5F); /* pop r15 */ 279 if (callee_regs_used[2]) 280 EMIT2(0x41, 0x5E); /* pop r14 */ 281 if (callee_regs_used[1]) 282 EMIT2(0x41, 0x5D); /* pop r13 */ 283 if (callee_regs_used[0]) 284 EMIT1(0x5B); /* pop rbx */ 285 *pprog = prog; 286 } 287 288 /* 289 * Emit x86-64 prologue code for BPF program. 290 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 291 * while jumping to another program 292 */ 293 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 294 bool tail_call_reachable, bool is_subprog) 295 { 296 u8 *prog = *pprog; 297 298 /* BPF trampoline can be made to work without these nops, 299 * but let's waste 5 bytes for now and optimize later 300 */ 301 EMIT_ENDBR(); 302 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 303 prog += X86_PATCH_SIZE; 304 if (!ebpf_from_cbpf) { 305 if (tail_call_reachable && !is_subprog) 306 EMIT2(0x31, 0xC0); /* xor eax, eax */ 307 else 308 EMIT2(0x66, 0x90); /* nop2 */ 309 } 310 EMIT1(0x55); /* push rbp */ 311 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 312 313 /* X86_TAIL_CALL_OFFSET is here */ 314 EMIT_ENDBR(); 315 316 /* sub rsp, rounded_stack_depth */ 317 if (stack_depth) 318 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 319 if (tail_call_reachable) 320 EMIT1(0x50); /* push rax */ 321 *pprog = prog; 322 } 323 324 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 325 { 326 u8 *prog = *pprog; 327 s64 offset; 328 329 offset = func - (ip + X86_PATCH_SIZE); 330 if (!is_simm32(offset)) { 331 pr_err("Target call %p is out of range\n", func); 332 return -ERANGE; 333 } 334 EMIT1_off32(opcode, offset); 335 *pprog = prog; 336 return 0; 337 } 338 339 static int emit_call(u8 **pprog, void *func, void *ip) 340 { 341 return emit_patch(pprog, func, ip, 0xE8); 342 } 343 344 static int emit_rsb_call(u8 **pprog, void *func, void *ip) 345 { 346 OPTIMIZER_HIDE_VAR(func); 347 x86_call_depth_emit_accounting(pprog, func); 348 return emit_patch(pprog, func, ip, 0xE8); 349 } 350 351 static int emit_jump(u8 **pprog, void *func, void *ip) 352 { 353 return emit_patch(pprog, func, ip, 0xE9); 354 } 355 356 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 357 void *old_addr, void *new_addr) 358 { 359 const u8 *nop_insn = x86_nops[5]; 360 u8 old_insn[X86_PATCH_SIZE]; 361 u8 new_insn[X86_PATCH_SIZE]; 362 u8 *prog; 363 int ret; 364 365 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 366 if (old_addr) { 367 prog = old_insn; 368 ret = t == BPF_MOD_CALL ? 369 emit_call(&prog, old_addr, ip) : 370 emit_jump(&prog, old_addr, ip); 371 if (ret) 372 return ret; 373 } 374 375 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 376 if (new_addr) { 377 prog = new_insn; 378 ret = t == BPF_MOD_CALL ? 379 emit_call(&prog, new_addr, ip) : 380 emit_jump(&prog, new_addr, ip); 381 if (ret) 382 return ret; 383 } 384 385 ret = -EBUSY; 386 mutex_lock(&text_mutex); 387 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 388 goto out; 389 ret = 1; 390 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 391 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 392 ret = 0; 393 } 394 out: 395 mutex_unlock(&text_mutex); 396 return ret; 397 } 398 399 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 400 void *old_addr, void *new_addr) 401 { 402 if (!is_kernel_text((long)ip) && 403 !is_bpf_text_address((long)ip)) 404 /* BPF poking in modules is not supported */ 405 return -EINVAL; 406 407 /* 408 * See emit_prologue(), for IBT builds the trampoline hook is preceded 409 * with an ENDBR instruction. 410 */ 411 if (is_endbr(*(u32 *)ip)) 412 ip += ENDBR_INSN_SIZE; 413 414 return __bpf_arch_text_poke(ip, t, old_addr, new_addr); 415 } 416 417 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 418 419 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 420 { 421 u8 *prog = *pprog; 422 423 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 424 EMIT_LFENCE(); 425 EMIT2(0xFF, 0xE0 + reg); 426 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 427 OPTIMIZER_HIDE_VAR(reg); 428 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) 429 emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip); 430 else 431 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 432 } else { 433 EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ 434 if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS)) 435 EMIT1(0xCC); /* int3 */ 436 } 437 438 *pprog = prog; 439 } 440 441 static void emit_return(u8 **pprog, u8 *ip) 442 { 443 u8 *prog = *pprog; 444 445 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { 446 emit_jump(&prog, x86_return_thunk, ip); 447 } else { 448 EMIT1(0xC3); /* ret */ 449 if (IS_ENABLED(CONFIG_SLS)) 450 EMIT1(0xCC); /* int3 */ 451 } 452 453 *pprog = prog; 454 } 455 456 /* 457 * Generate the following code: 458 * 459 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 460 * if (index >= array->map.max_entries) 461 * goto out; 462 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 463 * goto out; 464 * prog = array->ptrs[index]; 465 * if (prog == NULL) 466 * goto out; 467 * goto *(prog->bpf_func + prologue_size); 468 * out: 469 */ 470 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 471 u32 stack_depth, u8 *ip, 472 struct jit_context *ctx) 473 { 474 int tcc_off = -4 - round_up(stack_depth, 8); 475 u8 *prog = *pprog, *start = *pprog; 476 int offset; 477 478 /* 479 * rdi - pointer to ctx 480 * rsi - pointer to bpf_array 481 * rdx - index in bpf_array 482 */ 483 484 /* 485 * if (index >= array->map.max_entries) 486 * goto out; 487 */ 488 EMIT2(0x89, 0xD2); /* mov edx, edx */ 489 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 490 offsetof(struct bpf_array, map.max_entries)); 491 492 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 493 EMIT2(X86_JBE, offset); /* jbe out */ 494 495 /* 496 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 497 * goto out; 498 */ 499 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 500 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 501 502 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 503 EMIT2(X86_JAE, offset); /* jae out */ 504 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 505 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 506 507 /* prog = array->ptrs[index]; */ 508 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 509 offsetof(struct bpf_array, ptrs)); 510 511 /* 512 * if (prog == NULL) 513 * goto out; 514 */ 515 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 516 517 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 518 EMIT2(X86_JE, offset); /* je out */ 519 520 pop_callee_regs(&prog, callee_regs_used); 521 522 EMIT1(0x58); /* pop rax */ 523 if (stack_depth) 524 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 525 round_up(stack_depth, 8)); 526 527 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 528 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 529 offsetof(struct bpf_prog, bpf_func)); 530 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 531 X86_TAIL_CALL_OFFSET); 532 /* 533 * Now we're ready to jump into next BPF program 534 * rdi == ctx (1st arg) 535 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 536 */ 537 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 538 539 /* out: */ 540 ctx->tail_call_indirect_label = prog - start; 541 *pprog = prog; 542 } 543 544 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 545 u8 **pprog, u8 *ip, 546 bool *callee_regs_used, u32 stack_depth, 547 struct jit_context *ctx) 548 { 549 int tcc_off = -4 - round_up(stack_depth, 8); 550 u8 *prog = *pprog, *start = *pprog; 551 int offset; 552 553 /* 554 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 555 * goto out; 556 */ 557 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 558 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 559 560 offset = ctx->tail_call_direct_label - (prog + 2 - start); 561 EMIT2(X86_JAE, offset); /* jae out */ 562 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 563 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 564 565 poke->tailcall_bypass = ip + (prog - start); 566 poke->adj_off = X86_TAIL_CALL_OFFSET; 567 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 568 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 569 570 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 571 poke->tailcall_bypass); 572 573 pop_callee_regs(&prog, callee_regs_used); 574 EMIT1(0x58); /* pop rax */ 575 if (stack_depth) 576 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 577 578 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 579 prog += X86_PATCH_SIZE; 580 581 /* out: */ 582 ctx->tail_call_direct_label = prog - start; 583 584 *pprog = prog; 585 } 586 587 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 588 { 589 struct bpf_jit_poke_descriptor *poke; 590 struct bpf_array *array; 591 struct bpf_prog *target; 592 int i, ret; 593 594 for (i = 0; i < prog->aux->size_poke_tab; i++) { 595 poke = &prog->aux->poke_tab[i]; 596 if (poke->aux && poke->aux != prog->aux) 597 continue; 598 599 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 600 601 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 602 continue; 603 604 array = container_of(poke->tail_call.map, struct bpf_array, map); 605 mutex_lock(&array->aux->poke_mutex); 606 target = array->ptrs[poke->tail_call.key]; 607 if (target) { 608 ret = __bpf_arch_text_poke(poke->tailcall_target, 609 BPF_MOD_JUMP, NULL, 610 (u8 *)target->bpf_func + 611 poke->adj_off); 612 BUG_ON(ret < 0); 613 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 614 BPF_MOD_JUMP, 615 (u8 *)poke->tailcall_target + 616 X86_PATCH_SIZE, NULL); 617 BUG_ON(ret < 0); 618 } 619 WRITE_ONCE(poke->tailcall_target_stable, true); 620 mutex_unlock(&array->aux->poke_mutex); 621 } 622 } 623 624 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 625 u32 dst_reg, const u32 imm32) 626 { 627 u8 *prog = *pprog; 628 u8 b1, b2, b3; 629 630 /* 631 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 632 * (which zero-extends imm32) to save 2 bytes. 633 */ 634 if (sign_propagate && (s32)imm32 < 0) { 635 /* 'mov %rax, imm32' sign extends imm32 */ 636 b1 = add_1mod(0x48, dst_reg); 637 b2 = 0xC7; 638 b3 = 0xC0; 639 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 640 goto done; 641 } 642 643 /* 644 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 645 * to save 3 bytes. 646 */ 647 if (imm32 == 0) { 648 if (is_ereg(dst_reg)) 649 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 650 b2 = 0x31; /* xor */ 651 b3 = 0xC0; 652 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 653 goto done; 654 } 655 656 /* mov %eax, imm32 */ 657 if (is_ereg(dst_reg)) 658 EMIT1(add_1mod(0x40, dst_reg)); 659 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 660 done: 661 *pprog = prog; 662 } 663 664 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 665 const u32 imm32_hi, const u32 imm32_lo) 666 { 667 u8 *prog = *pprog; 668 669 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 670 /* 671 * For emitting plain u32, where sign bit must not be 672 * propagated LLVM tends to load imm64 over mov32 673 * directly, so save couple of bytes by just doing 674 * 'mov %eax, imm32' instead. 675 */ 676 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 677 } else { 678 /* movabsq rax, imm64 */ 679 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 680 EMIT(imm32_lo, 4); 681 EMIT(imm32_hi, 4); 682 } 683 684 *pprog = prog; 685 } 686 687 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 688 { 689 u8 *prog = *pprog; 690 691 if (is64) { 692 /* mov dst, src */ 693 EMIT_mov(dst_reg, src_reg); 694 } else { 695 /* mov32 dst, src */ 696 if (is_ereg(dst_reg) || is_ereg(src_reg)) 697 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 698 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 699 } 700 701 *pprog = prog; 702 } 703 704 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg, 705 u32 src_reg) 706 { 707 u8 *prog = *pprog; 708 709 if (is64) { 710 /* movs[b,w,l]q dst, src */ 711 if (num_bits == 8) 712 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe, 713 add_2reg(0xC0, src_reg, dst_reg)); 714 else if (num_bits == 16) 715 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf, 716 add_2reg(0xC0, src_reg, dst_reg)); 717 else if (num_bits == 32) 718 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63, 719 add_2reg(0xC0, src_reg, dst_reg)); 720 } else { 721 /* movs[b,w]l dst, src */ 722 if (num_bits == 8) { 723 EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe, 724 add_2reg(0xC0, src_reg, dst_reg)); 725 } else if (num_bits == 16) { 726 if (is_ereg(dst_reg) || is_ereg(src_reg)) 727 EMIT1(add_2mod(0x40, src_reg, dst_reg)); 728 EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf, 729 add_2reg(0xC0, src_reg, dst_reg)); 730 } 731 } 732 733 *pprog = prog; 734 } 735 736 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 737 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 738 { 739 u8 *prog = *pprog; 740 741 if (is_imm8(off)) { 742 /* 1-byte signed displacement. 743 * 744 * If off == 0 we could skip this and save one extra byte, but 745 * special case of x86 R13 which always needs an offset is not 746 * worth the hassle 747 */ 748 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 749 } else { 750 /* 4-byte signed displacement */ 751 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 752 } 753 *pprog = prog; 754 } 755 756 /* 757 * Emit a REX byte if it will be necessary to address these registers 758 */ 759 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 760 { 761 u8 *prog = *pprog; 762 763 if (is64) 764 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 765 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 766 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 767 *pprog = prog; 768 } 769 770 /* 771 * Similar version of maybe_emit_mod() for a single register 772 */ 773 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 774 { 775 u8 *prog = *pprog; 776 777 if (is64) 778 EMIT1(add_1mod(0x48, reg)); 779 else if (is_ereg(reg)) 780 EMIT1(add_1mod(0x40, reg)); 781 *pprog = prog; 782 } 783 784 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 785 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 786 { 787 u8 *prog = *pprog; 788 789 switch (size) { 790 case BPF_B: 791 /* Emit 'movzx rax, byte ptr [rax + off]' */ 792 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 793 break; 794 case BPF_H: 795 /* Emit 'movzx rax, word ptr [rax + off]' */ 796 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 797 break; 798 case BPF_W: 799 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 800 if (is_ereg(dst_reg) || is_ereg(src_reg)) 801 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 802 else 803 EMIT1(0x8B); 804 break; 805 case BPF_DW: 806 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 807 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 808 break; 809 } 810 emit_insn_suffix(&prog, src_reg, dst_reg, off); 811 *pprog = prog; 812 } 813 814 /* LDSX: dst_reg = *(s8*)(src_reg + off) */ 815 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 816 { 817 u8 *prog = *pprog; 818 819 switch (size) { 820 case BPF_B: 821 /* Emit 'movsx rax, byte ptr [rax + off]' */ 822 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE); 823 break; 824 case BPF_H: 825 /* Emit 'movsx rax, word ptr [rax + off]' */ 826 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF); 827 break; 828 case BPF_W: 829 /* Emit 'movsx rax, dword ptr [rax+0x14]' */ 830 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63); 831 break; 832 } 833 emit_insn_suffix(&prog, src_reg, dst_reg, off); 834 *pprog = prog; 835 } 836 837 /* STX: *(u8*)(dst_reg + off) = src_reg */ 838 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 839 { 840 u8 *prog = *pprog; 841 842 switch (size) { 843 case BPF_B: 844 /* Emit 'mov byte ptr [rax + off], al' */ 845 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 846 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 847 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 848 else 849 EMIT1(0x88); 850 break; 851 case BPF_H: 852 if (is_ereg(dst_reg) || is_ereg(src_reg)) 853 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 854 else 855 EMIT2(0x66, 0x89); 856 break; 857 case BPF_W: 858 if (is_ereg(dst_reg) || is_ereg(src_reg)) 859 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 860 else 861 EMIT1(0x89); 862 break; 863 case BPF_DW: 864 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 865 break; 866 } 867 emit_insn_suffix(&prog, dst_reg, src_reg, off); 868 *pprog = prog; 869 } 870 871 static int emit_atomic(u8 **pprog, u8 atomic_op, 872 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 873 { 874 u8 *prog = *pprog; 875 876 EMIT1(0xF0); /* lock prefix */ 877 878 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 879 880 /* emit opcode */ 881 switch (atomic_op) { 882 case BPF_ADD: 883 case BPF_AND: 884 case BPF_OR: 885 case BPF_XOR: 886 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 887 EMIT1(simple_alu_opcodes[atomic_op]); 888 break; 889 case BPF_ADD | BPF_FETCH: 890 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 891 EMIT2(0x0F, 0xC1); 892 break; 893 case BPF_XCHG: 894 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 895 EMIT1(0x87); 896 break; 897 case BPF_CMPXCHG: 898 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 899 EMIT2(0x0F, 0xB1); 900 break; 901 default: 902 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 903 return -EFAULT; 904 } 905 906 emit_insn_suffix(&prog, dst_reg, src_reg, off); 907 908 *pprog = prog; 909 return 0; 910 } 911 912 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 913 { 914 u32 reg = x->fixup >> 8; 915 916 /* jump over faulting load and clear dest register */ 917 *(unsigned long *)((void *)regs + reg) = 0; 918 regs->ip += x->fixup & 0xff; 919 return true; 920 } 921 922 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 923 bool *regs_used, bool *tail_call_seen) 924 { 925 int i; 926 927 for (i = 1; i <= insn_cnt; i++, insn++) { 928 if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 929 *tail_call_seen = true; 930 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 931 regs_used[0] = true; 932 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 933 regs_used[1] = true; 934 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 935 regs_used[2] = true; 936 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 937 regs_used[3] = true; 938 } 939 } 940 941 static void emit_nops(u8 **pprog, int len) 942 { 943 u8 *prog = *pprog; 944 int i, noplen; 945 946 while (len > 0) { 947 noplen = len; 948 949 if (noplen > ASM_NOP_MAX) 950 noplen = ASM_NOP_MAX; 951 952 for (i = 0; i < noplen; i++) 953 EMIT1(x86_nops[noplen][i]); 954 len -= noplen; 955 } 956 957 *pprog = prog; 958 } 959 960 /* emit the 3-byte VEX prefix 961 * 962 * r: same as rex.r, extra bit for ModRM reg field 963 * x: same as rex.x, extra bit for SIB index field 964 * b: same as rex.b, extra bit for ModRM r/m, or SIB base 965 * m: opcode map select, encoding escape bytes e.g. 0x0f38 966 * w: same as rex.w (32 bit or 64 bit) or opcode specific 967 * src_reg2: additional source reg (encoded as BPF reg) 968 * l: vector length (128 bit or 256 bit) or reserved 969 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3) 970 */ 971 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m, 972 bool w, u8 src_reg2, bool l, u8 pp) 973 { 974 u8 *prog = *pprog; 975 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */ 976 u8 b1, b2; 977 u8 vvvv = reg2hex[src_reg2]; 978 979 /* reg2hex gives only the lower 3 bit of vvvv */ 980 if (is_ereg(src_reg2)) 981 vvvv |= 1 << 3; 982 983 /* 984 * 2nd byte of 3-byte VEX prefix 985 * ~ means bit inverted encoding 986 * 987 * 7 0 988 * +---+---+---+---+---+---+---+---+ 989 * |~R |~X |~B | m | 990 * +---+---+---+---+---+---+---+---+ 991 */ 992 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f); 993 /* 994 * 3rd byte of 3-byte VEX prefix 995 * 996 * 7 0 997 * +---+---+---+---+---+---+---+---+ 998 * | W | ~vvvv | L | pp | 999 * +---+---+---+---+---+---+---+---+ 1000 */ 1001 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3); 1002 1003 EMIT3(b0, b1, b2); 1004 *pprog = prog; 1005 } 1006 1007 /* emit BMI2 shift instruction */ 1008 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) 1009 { 1010 u8 *prog = *pprog; 1011 bool r = is_ereg(dst_reg); 1012 u8 m = 2; /* escape code 0f38 */ 1013 1014 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op); 1015 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg)); 1016 *pprog = prog; 1017 } 1018 1019 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 1020 1021 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, 1022 int oldproglen, struct jit_context *ctx, bool jmp_padding) 1023 { 1024 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 1025 struct bpf_insn *insn = bpf_prog->insnsi; 1026 bool callee_regs_used[4] = {}; 1027 int insn_cnt = bpf_prog->len; 1028 bool tail_call_seen = false; 1029 bool seen_exit = false; 1030 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 1031 int i, excnt = 0; 1032 int ilen, proglen = 0; 1033 u8 *prog = temp; 1034 int err; 1035 1036 detect_reg_usage(insn, insn_cnt, callee_regs_used, 1037 &tail_call_seen); 1038 1039 /* tail call's presence in current prog implies it is reachable */ 1040 tail_call_reachable |= tail_call_seen; 1041 1042 emit_prologue(&prog, bpf_prog->aux->stack_depth, 1043 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 1044 bpf_prog->aux->func_idx != 0); 1045 push_callee_regs(&prog, callee_regs_used); 1046 1047 ilen = prog - temp; 1048 if (rw_image) 1049 memcpy(rw_image + proglen, temp, ilen); 1050 proglen += ilen; 1051 addrs[0] = proglen; 1052 prog = temp; 1053 1054 for (i = 1; i <= insn_cnt; i++, insn++) { 1055 const s32 imm32 = insn->imm; 1056 u32 dst_reg = insn->dst_reg; 1057 u32 src_reg = insn->src_reg; 1058 u8 b2 = 0, b3 = 0; 1059 u8 *start_of_ldx; 1060 s64 jmp_offset; 1061 s16 insn_off; 1062 u8 jmp_cond; 1063 u8 *func; 1064 int nops; 1065 1066 switch (insn->code) { 1067 /* ALU */ 1068 case BPF_ALU | BPF_ADD | BPF_X: 1069 case BPF_ALU | BPF_SUB | BPF_X: 1070 case BPF_ALU | BPF_AND | BPF_X: 1071 case BPF_ALU | BPF_OR | BPF_X: 1072 case BPF_ALU | BPF_XOR | BPF_X: 1073 case BPF_ALU64 | BPF_ADD | BPF_X: 1074 case BPF_ALU64 | BPF_SUB | BPF_X: 1075 case BPF_ALU64 | BPF_AND | BPF_X: 1076 case BPF_ALU64 | BPF_OR | BPF_X: 1077 case BPF_ALU64 | BPF_XOR | BPF_X: 1078 maybe_emit_mod(&prog, dst_reg, src_reg, 1079 BPF_CLASS(insn->code) == BPF_ALU64); 1080 b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 1081 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 1082 break; 1083 1084 case BPF_ALU64 | BPF_MOV | BPF_X: 1085 case BPF_ALU | BPF_MOV | BPF_X: 1086 if (insn->off == 0) 1087 emit_mov_reg(&prog, 1088 BPF_CLASS(insn->code) == BPF_ALU64, 1089 dst_reg, src_reg); 1090 else 1091 emit_movsx_reg(&prog, insn->off, 1092 BPF_CLASS(insn->code) == BPF_ALU64, 1093 dst_reg, src_reg); 1094 break; 1095 1096 /* neg dst */ 1097 case BPF_ALU | BPF_NEG: 1098 case BPF_ALU64 | BPF_NEG: 1099 maybe_emit_1mod(&prog, dst_reg, 1100 BPF_CLASS(insn->code) == BPF_ALU64); 1101 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 1102 break; 1103 1104 case BPF_ALU | BPF_ADD | BPF_K: 1105 case BPF_ALU | BPF_SUB | BPF_K: 1106 case BPF_ALU | BPF_AND | BPF_K: 1107 case BPF_ALU | BPF_OR | BPF_K: 1108 case BPF_ALU | BPF_XOR | BPF_K: 1109 case BPF_ALU64 | BPF_ADD | BPF_K: 1110 case BPF_ALU64 | BPF_SUB | BPF_K: 1111 case BPF_ALU64 | BPF_AND | BPF_K: 1112 case BPF_ALU64 | BPF_OR | BPF_K: 1113 case BPF_ALU64 | BPF_XOR | BPF_K: 1114 maybe_emit_1mod(&prog, dst_reg, 1115 BPF_CLASS(insn->code) == BPF_ALU64); 1116 1117 /* 1118 * b3 holds 'normal' opcode, b2 short form only valid 1119 * in case dst is eax/rax. 1120 */ 1121 switch (BPF_OP(insn->code)) { 1122 case BPF_ADD: 1123 b3 = 0xC0; 1124 b2 = 0x05; 1125 break; 1126 case BPF_SUB: 1127 b3 = 0xE8; 1128 b2 = 0x2D; 1129 break; 1130 case BPF_AND: 1131 b3 = 0xE0; 1132 b2 = 0x25; 1133 break; 1134 case BPF_OR: 1135 b3 = 0xC8; 1136 b2 = 0x0D; 1137 break; 1138 case BPF_XOR: 1139 b3 = 0xF0; 1140 b2 = 0x35; 1141 break; 1142 } 1143 1144 if (is_imm8(imm32)) 1145 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 1146 else if (is_axreg(dst_reg)) 1147 EMIT1_off32(b2, imm32); 1148 else 1149 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 1150 break; 1151 1152 case BPF_ALU64 | BPF_MOV | BPF_K: 1153 case BPF_ALU | BPF_MOV | BPF_K: 1154 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 1155 dst_reg, imm32); 1156 break; 1157 1158 case BPF_LD | BPF_IMM | BPF_DW: 1159 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 1160 insn++; 1161 i++; 1162 break; 1163 1164 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 1165 case BPF_ALU | BPF_MOD | BPF_X: 1166 case BPF_ALU | BPF_DIV | BPF_X: 1167 case BPF_ALU | BPF_MOD | BPF_K: 1168 case BPF_ALU | BPF_DIV | BPF_K: 1169 case BPF_ALU64 | BPF_MOD | BPF_X: 1170 case BPF_ALU64 | BPF_DIV | BPF_X: 1171 case BPF_ALU64 | BPF_MOD | BPF_K: 1172 case BPF_ALU64 | BPF_DIV | BPF_K: { 1173 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1174 1175 if (dst_reg != BPF_REG_0) 1176 EMIT1(0x50); /* push rax */ 1177 if (dst_reg != BPF_REG_3) 1178 EMIT1(0x52); /* push rdx */ 1179 1180 if (BPF_SRC(insn->code) == BPF_X) { 1181 if (src_reg == BPF_REG_0 || 1182 src_reg == BPF_REG_3) { 1183 /* mov r11, src_reg */ 1184 EMIT_mov(AUX_REG, src_reg); 1185 src_reg = AUX_REG; 1186 } 1187 } else { 1188 /* mov r11, imm32 */ 1189 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 1190 src_reg = AUX_REG; 1191 } 1192 1193 if (dst_reg != BPF_REG_0) 1194 /* mov rax, dst_reg */ 1195 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 1196 1197 if (insn->off == 0) { 1198 /* 1199 * xor edx, edx 1200 * equivalent to 'xor rdx, rdx', but one byte less 1201 */ 1202 EMIT2(0x31, 0xd2); 1203 1204 /* div src_reg */ 1205 maybe_emit_1mod(&prog, src_reg, is64); 1206 EMIT2(0xF7, add_1reg(0xF0, src_reg)); 1207 } else { 1208 if (BPF_CLASS(insn->code) == BPF_ALU) 1209 EMIT1(0x99); /* cdq */ 1210 else 1211 EMIT2(0x48, 0x99); /* cqo */ 1212 1213 /* idiv src_reg */ 1214 maybe_emit_1mod(&prog, src_reg, is64); 1215 EMIT2(0xF7, add_1reg(0xF8, src_reg)); 1216 } 1217 1218 if (BPF_OP(insn->code) == BPF_MOD && 1219 dst_reg != BPF_REG_3) 1220 /* mov dst_reg, rdx */ 1221 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 1222 else if (BPF_OP(insn->code) == BPF_DIV && 1223 dst_reg != BPF_REG_0) 1224 /* mov dst_reg, rax */ 1225 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 1226 1227 if (dst_reg != BPF_REG_3) 1228 EMIT1(0x5A); /* pop rdx */ 1229 if (dst_reg != BPF_REG_0) 1230 EMIT1(0x58); /* pop rax */ 1231 break; 1232 } 1233 1234 case BPF_ALU | BPF_MUL | BPF_K: 1235 case BPF_ALU64 | BPF_MUL | BPF_K: 1236 maybe_emit_mod(&prog, dst_reg, dst_reg, 1237 BPF_CLASS(insn->code) == BPF_ALU64); 1238 1239 if (is_imm8(imm32)) 1240 /* imul dst_reg, dst_reg, imm8 */ 1241 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1242 imm32); 1243 else 1244 /* imul dst_reg, dst_reg, imm32 */ 1245 EMIT2_off32(0x69, 1246 add_2reg(0xC0, dst_reg, dst_reg), 1247 imm32); 1248 break; 1249 1250 case BPF_ALU | BPF_MUL | BPF_X: 1251 case BPF_ALU64 | BPF_MUL | BPF_X: 1252 maybe_emit_mod(&prog, src_reg, dst_reg, 1253 BPF_CLASS(insn->code) == BPF_ALU64); 1254 1255 /* imul dst_reg, src_reg */ 1256 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1257 break; 1258 1259 /* Shifts */ 1260 case BPF_ALU | BPF_LSH | BPF_K: 1261 case BPF_ALU | BPF_RSH | BPF_K: 1262 case BPF_ALU | BPF_ARSH | BPF_K: 1263 case BPF_ALU64 | BPF_LSH | BPF_K: 1264 case BPF_ALU64 | BPF_RSH | BPF_K: 1265 case BPF_ALU64 | BPF_ARSH | BPF_K: 1266 maybe_emit_1mod(&prog, dst_reg, 1267 BPF_CLASS(insn->code) == BPF_ALU64); 1268 1269 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1270 if (imm32 == 1) 1271 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1272 else 1273 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1274 break; 1275 1276 case BPF_ALU | BPF_LSH | BPF_X: 1277 case BPF_ALU | BPF_RSH | BPF_X: 1278 case BPF_ALU | BPF_ARSH | BPF_X: 1279 case BPF_ALU64 | BPF_LSH | BPF_X: 1280 case BPF_ALU64 | BPF_RSH | BPF_X: 1281 case BPF_ALU64 | BPF_ARSH | BPF_X: 1282 /* BMI2 shifts aren't better when shift count is already in rcx */ 1283 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) { 1284 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */ 1285 bool w = (BPF_CLASS(insn->code) == BPF_ALU64); 1286 u8 op; 1287 1288 switch (BPF_OP(insn->code)) { 1289 case BPF_LSH: 1290 op = 1; /* prefix 0x66 */ 1291 break; 1292 case BPF_RSH: 1293 op = 3; /* prefix 0xf2 */ 1294 break; 1295 case BPF_ARSH: 1296 op = 2; /* prefix 0xf3 */ 1297 break; 1298 } 1299 1300 emit_shiftx(&prog, dst_reg, src_reg, w, op); 1301 1302 break; 1303 } 1304 1305 if (src_reg != BPF_REG_4) { /* common case */ 1306 /* Check for bad case when dst_reg == rcx */ 1307 if (dst_reg == BPF_REG_4) { 1308 /* mov r11, dst_reg */ 1309 EMIT_mov(AUX_REG, dst_reg); 1310 dst_reg = AUX_REG; 1311 } else { 1312 EMIT1(0x51); /* push rcx */ 1313 } 1314 /* mov rcx, src_reg */ 1315 EMIT_mov(BPF_REG_4, src_reg); 1316 } 1317 1318 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1319 maybe_emit_1mod(&prog, dst_reg, 1320 BPF_CLASS(insn->code) == BPF_ALU64); 1321 1322 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1323 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1324 1325 if (src_reg != BPF_REG_4) { 1326 if (insn->dst_reg == BPF_REG_4) 1327 /* mov dst_reg, r11 */ 1328 EMIT_mov(insn->dst_reg, AUX_REG); 1329 else 1330 EMIT1(0x59); /* pop rcx */ 1331 } 1332 1333 break; 1334 1335 case BPF_ALU | BPF_END | BPF_FROM_BE: 1336 case BPF_ALU64 | BPF_END | BPF_FROM_LE: 1337 switch (imm32) { 1338 case 16: 1339 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1340 EMIT1(0x66); 1341 if (is_ereg(dst_reg)) 1342 EMIT1(0x41); 1343 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1344 1345 /* Emit 'movzwl eax, ax' */ 1346 if (is_ereg(dst_reg)) 1347 EMIT3(0x45, 0x0F, 0xB7); 1348 else 1349 EMIT2(0x0F, 0xB7); 1350 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1351 break; 1352 case 32: 1353 /* Emit 'bswap eax' to swap lower 4 bytes */ 1354 if (is_ereg(dst_reg)) 1355 EMIT2(0x41, 0x0F); 1356 else 1357 EMIT1(0x0F); 1358 EMIT1(add_1reg(0xC8, dst_reg)); 1359 break; 1360 case 64: 1361 /* Emit 'bswap rax' to swap 8 bytes */ 1362 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1363 add_1reg(0xC8, dst_reg)); 1364 break; 1365 } 1366 break; 1367 1368 case BPF_ALU | BPF_END | BPF_FROM_LE: 1369 switch (imm32) { 1370 case 16: 1371 /* 1372 * Emit 'movzwl eax, ax' to zero extend 16-bit 1373 * into 64 bit 1374 */ 1375 if (is_ereg(dst_reg)) 1376 EMIT3(0x45, 0x0F, 0xB7); 1377 else 1378 EMIT2(0x0F, 0xB7); 1379 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1380 break; 1381 case 32: 1382 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1383 if (is_ereg(dst_reg)) 1384 EMIT1(0x45); 1385 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1386 break; 1387 case 64: 1388 /* nop */ 1389 break; 1390 } 1391 break; 1392 1393 /* speculation barrier */ 1394 case BPF_ST | BPF_NOSPEC: 1395 EMIT_LFENCE(); 1396 break; 1397 1398 /* ST: *(u8*)(dst_reg + off) = imm */ 1399 case BPF_ST | BPF_MEM | BPF_B: 1400 if (is_ereg(dst_reg)) 1401 EMIT2(0x41, 0xC6); 1402 else 1403 EMIT1(0xC6); 1404 goto st; 1405 case BPF_ST | BPF_MEM | BPF_H: 1406 if (is_ereg(dst_reg)) 1407 EMIT3(0x66, 0x41, 0xC7); 1408 else 1409 EMIT2(0x66, 0xC7); 1410 goto st; 1411 case BPF_ST | BPF_MEM | BPF_W: 1412 if (is_ereg(dst_reg)) 1413 EMIT2(0x41, 0xC7); 1414 else 1415 EMIT1(0xC7); 1416 goto st; 1417 case BPF_ST | BPF_MEM | BPF_DW: 1418 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1419 1420 st: if (is_imm8(insn->off)) 1421 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1422 else 1423 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1424 1425 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1426 break; 1427 1428 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1429 case BPF_STX | BPF_MEM | BPF_B: 1430 case BPF_STX | BPF_MEM | BPF_H: 1431 case BPF_STX | BPF_MEM | BPF_W: 1432 case BPF_STX | BPF_MEM | BPF_DW: 1433 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1434 break; 1435 1436 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1437 case BPF_LDX | BPF_MEM | BPF_B: 1438 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1439 case BPF_LDX | BPF_MEM | BPF_H: 1440 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1441 case BPF_LDX | BPF_MEM | BPF_W: 1442 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1443 case BPF_LDX | BPF_MEM | BPF_DW: 1444 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1445 /* LDXS: dst_reg = *(s8*)(src_reg + off) */ 1446 case BPF_LDX | BPF_MEMSX | BPF_B: 1447 case BPF_LDX | BPF_MEMSX | BPF_H: 1448 case BPF_LDX | BPF_MEMSX | BPF_W: 1449 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: 1450 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: 1451 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: 1452 insn_off = insn->off; 1453 1454 if (BPF_MODE(insn->code) == BPF_PROBE_MEM || 1455 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { 1456 /* Conservatively check that src_reg + insn->off is a kernel address: 1457 * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE 1458 * src_reg is used as scratch for src_reg += insn->off and restored 1459 * after emit_ldx if necessary 1460 */ 1461 1462 u64 limit = TASK_SIZE_MAX + PAGE_SIZE; 1463 u8 *end_of_jmp; 1464 1465 /* At end of these emitted checks, insn->off will have been added 1466 * to src_reg, so no need to do relative load with insn->off offset 1467 */ 1468 insn_off = 0; 1469 1470 /* movabsq r11, limit */ 1471 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); 1472 EMIT((u32)limit, 4); 1473 EMIT(limit >> 32, 4); 1474 1475 if (insn->off) { 1476 /* add src_reg, insn->off */ 1477 maybe_emit_1mod(&prog, src_reg, true); 1478 EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off); 1479 } 1480 1481 /* cmp src_reg, r11 */ 1482 maybe_emit_mod(&prog, src_reg, AUX_REG, true); 1483 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); 1484 1485 /* if unsigned '>=', goto load */ 1486 EMIT2(X86_JAE, 0); 1487 end_of_jmp = prog; 1488 1489 /* xor dst_reg, dst_reg */ 1490 emit_mov_imm32(&prog, false, dst_reg, 0); 1491 /* jmp byte_after_ldx */ 1492 EMIT2(0xEB, 0); 1493 1494 /* populate jmp_offset for JAE above to jump to start_of_ldx */ 1495 start_of_ldx = prog; 1496 end_of_jmp[-1] = start_of_ldx - end_of_jmp; 1497 } 1498 if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX || 1499 BPF_MODE(insn->code) == BPF_MEMSX) 1500 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); 1501 else 1502 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); 1503 if (BPF_MODE(insn->code) == BPF_PROBE_MEM || 1504 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { 1505 struct exception_table_entry *ex; 1506 u8 *_insn = image + proglen + (start_of_ldx - temp); 1507 s64 delta; 1508 1509 /* populate jmp_offset for JMP above */ 1510 start_of_ldx[-1] = prog - start_of_ldx; 1511 1512 if (insn->off && src_reg != dst_reg) { 1513 /* sub src_reg, insn->off 1514 * Restore src_reg after "add src_reg, insn->off" in prev 1515 * if statement. But if src_reg == dst_reg, emit_ldx 1516 * above already clobbered src_reg, so no need to restore. 1517 * If add src_reg, insn->off was unnecessary, no need to 1518 * restore either. 1519 */ 1520 maybe_emit_1mod(&prog, src_reg, true); 1521 EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off); 1522 } 1523 1524 if (!bpf_prog->aux->extable) 1525 break; 1526 1527 if (excnt >= bpf_prog->aux->num_exentries) { 1528 pr_err("ex gen bug\n"); 1529 return -EFAULT; 1530 } 1531 ex = &bpf_prog->aux->extable[excnt++]; 1532 1533 delta = _insn - (u8 *)&ex->insn; 1534 if (!is_simm32(delta)) { 1535 pr_err("extable->insn doesn't fit into 32-bit\n"); 1536 return -EFAULT; 1537 } 1538 /* switch ex to rw buffer for writes */ 1539 ex = (void *)rw_image + ((void *)ex - (void *)image); 1540 1541 ex->insn = delta; 1542 1543 ex->data = EX_TYPE_BPF; 1544 1545 if (dst_reg > BPF_REG_9) { 1546 pr_err("verifier error\n"); 1547 return -EFAULT; 1548 } 1549 /* 1550 * Compute size of x86 insn and its target dest x86 register. 1551 * ex_handler_bpf() will use lower 8 bits to adjust 1552 * pt_regs->ip to jump over this x86 instruction 1553 * and upper bits to figure out which pt_regs to zero out. 1554 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 1555 * of 4 bytes will be ignored and rbx will be zero inited. 1556 */ 1557 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 1558 } 1559 break; 1560 1561 case BPF_STX | BPF_ATOMIC | BPF_W: 1562 case BPF_STX | BPF_ATOMIC | BPF_DW: 1563 if (insn->imm == (BPF_AND | BPF_FETCH) || 1564 insn->imm == (BPF_OR | BPF_FETCH) || 1565 insn->imm == (BPF_XOR | BPF_FETCH)) { 1566 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1567 u32 real_src_reg = src_reg; 1568 u32 real_dst_reg = dst_reg; 1569 u8 *branch_target; 1570 1571 /* 1572 * Can't be implemented with a single x86 insn. 1573 * Need to do a CMPXCHG loop. 1574 */ 1575 1576 /* Will need RAX as a CMPXCHG operand so save R0 */ 1577 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1578 if (src_reg == BPF_REG_0) 1579 real_src_reg = BPF_REG_AX; 1580 if (dst_reg == BPF_REG_0) 1581 real_dst_reg = BPF_REG_AX; 1582 1583 branch_target = prog; 1584 /* Load old value */ 1585 emit_ldx(&prog, BPF_SIZE(insn->code), 1586 BPF_REG_0, real_dst_reg, insn->off); 1587 /* 1588 * Perform the (commutative) operation locally, 1589 * put the result in the AUX_REG. 1590 */ 1591 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1592 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1593 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1594 add_2reg(0xC0, AUX_REG, real_src_reg)); 1595 /* Attempt to swap in new value */ 1596 err = emit_atomic(&prog, BPF_CMPXCHG, 1597 real_dst_reg, AUX_REG, 1598 insn->off, 1599 BPF_SIZE(insn->code)); 1600 if (WARN_ON(err)) 1601 return err; 1602 /* 1603 * ZF tells us whether we won the race. If it's 1604 * cleared we need to try again. 1605 */ 1606 EMIT2(X86_JNE, -(prog - branch_target) - 2); 1607 /* Return the pre-modification value */ 1608 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1609 /* Restore R0 after clobbering RAX */ 1610 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1611 break; 1612 } 1613 1614 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 1615 insn->off, BPF_SIZE(insn->code)); 1616 if (err) 1617 return err; 1618 break; 1619 1620 /* call */ 1621 case BPF_JMP | BPF_CALL: { 1622 int offs; 1623 1624 func = (u8 *) __bpf_call_base + imm32; 1625 if (tail_call_reachable) { 1626 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ 1627 EMIT3_off32(0x48, 0x8B, 0x85, 1628 -round_up(bpf_prog->aux->stack_depth, 8) - 8); 1629 if (!imm32) 1630 return -EINVAL; 1631 offs = 7 + x86_call_depth_emit_accounting(&prog, func); 1632 } else { 1633 if (!imm32) 1634 return -EINVAL; 1635 offs = x86_call_depth_emit_accounting(&prog, func); 1636 } 1637 if (emit_call(&prog, func, image + addrs[i - 1] + offs)) 1638 return -EINVAL; 1639 break; 1640 } 1641 1642 case BPF_JMP | BPF_TAIL_CALL: 1643 if (imm32) 1644 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1645 &prog, image + addrs[i - 1], 1646 callee_regs_used, 1647 bpf_prog->aux->stack_depth, 1648 ctx); 1649 else 1650 emit_bpf_tail_call_indirect(&prog, 1651 callee_regs_used, 1652 bpf_prog->aux->stack_depth, 1653 image + addrs[i - 1], 1654 ctx); 1655 break; 1656 1657 /* cond jump */ 1658 case BPF_JMP | BPF_JEQ | BPF_X: 1659 case BPF_JMP | BPF_JNE | BPF_X: 1660 case BPF_JMP | BPF_JGT | BPF_X: 1661 case BPF_JMP | BPF_JLT | BPF_X: 1662 case BPF_JMP | BPF_JGE | BPF_X: 1663 case BPF_JMP | BPF_JLE | BPF_X: 1664 case BPF_JMP | BPF_JSGT | BPF_X: 1665 case BPF_JMP | BPF_JSLT | BPF_X: 1666 case BPF_JMP | BPF_JSGE | BPF_X: 1667 case BPF_JMP | BPF_JSLE | BPF_X: 1668 case BPF_JMP32 | BPF_JEQ | BPF_X: 1669 case BPF_JMP32 | BPF_JNE | BPF_X: 1670 case BPF_JMP32 | BPF_JGT | BPF_X: 1671 case BPF_JMP32 | BPF_JLT | BPF_X: 1672 case BPF_JMP32 | BPF_JGE | BPF_X: 1673 case BPF_JMP32 | BPF_JLE | BPF_X: 1674 case BPF_JMP32 | BPF_JSGT | BPF_X: 1675 case BPF_JMP32 | BPF_JSLT | BPF_X: 1676 case BPF_JMP32 | BPF_JSGE | BPF_X: 1677 case BPF_JMP32 | BPF_JSLE | BPF_X: 1678 /* cmp dst_reg, src_reg */ 1679 maybe_emit_mod(&prog, dst_reg, src_reg, 1680 BPF_CLASS(insn->code) == BPF_JMP); 1681 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 1682 goto emit_cond_jmp; 1683 1684 case BPF_JMP | BPF_JSET | BPF_X: 1685 case BPF_JMP32 | BPF_JSET | BPF_X: 1686 /* test dst_reg, src_reg */ 1687 maybe_emit_mod(&prog, dst_reg, src_reg, 1688 BPF_CLASS(insn->code) == BPF_JMP); 1689 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 1690 goto emit_cond_jmp; 1691 1692 case BPF_JMP | BPF_JSET | BPF_K: 1693 case BPF_JMP32 | BPF_JSET | BPF_K: 1694 /* test dst_reg, imm32 */ 1695 maybe_emit_1mod(&prog, dst_reg, 1696 BPF_CLASS(insn->code) == BPF_JMP); 1697 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 1698 goto emit_cond_jmp; 1699 1700 case BPF_JMP | BPF_JEQ | BPF_K: 1701 case BPF_JMP | BPF_JNE | BPF_K: 1702 case BPF_JMP | BPF_JGT | BPF_K: 1703 case BPF_JMP | BPF_JLT | BPF_K: 1704 case BPF_JMP | BPF_JGE | BPF_K: 1705 case BPF_JMP | BPF_JLE | BPF_K: 1706 case BPF_JMP | BPF_JSGT | BPF_K: 1707 case BPF_JMP | BPF_JSLT | BPF_K: 1708 case BPF_JMP | BPF_JSGE | BPF_K: 1709 case BPF_JMP | BPF_JSLE | BPF_K: 1710 case BPF_JMP32 | BPF_JEQ | BPF_K: 1711 case BPF_JMP32 | BPF_JNE | BPF_K: 1712 case BPF_JMP32 | BPF_JGT | BPF_K: 1713 case BPF_JMP32 | BPF_JLT | BPF_K: 1714 case BPF_JMP32 | BPF_JGE | BPF_K: 1715 case BPF_JMP32 | BPF_JLE | BPF_K: 1716 case BPF_JMP32 | BPF_JSGT | BPF_K: 1717 case BPF_JMP32 | BPF_JSLT | BPF_K: 1718 case BPF_JMP32 | BPF_JSGE | BPF_K: 1719 case BPF_JMP32 | BPF_JSLE | BPF_K: 1720 /* test dst_reg, dst_reg to save one extra byte */ 1721 if (imm32 == 0) { 1722 maybe_emit_mod(&prog, dst_reg, dst_reg, 1723 BPF_CLASS(insn->code) == BPF_JMP); 1724 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1725 goto emit_cond_jmp; 1726 } 1727 1728 /* cmp dst_reg, imm8/32 */ 1729 maybe_emit_1mod(&prog, dst_reg, 1730 BPF_CLASS(insn->code) == BPF_JMP); 1731 1732 if (is_imm8(imm32)) 1733 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 1734 else 1735 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 1736 1737 emit_cond_jmp: /* Convert BPF opcode to x86 */ 1738 switch (BPF_OP(insn->code)) { 1739 case BPF_JEQ: 1740 jmp_cond = X86_JE; 1741 break; 1742 case BPF_JSET: 1743 case BPF_JNE: 1744 jmp_cond = X86_JNE; 1745 break; 1746 case BPF_JGT: 1747 /* GT is unsigned '>', JA in x86 */ 1748 jmp_cond = X86_JA; 1749 break; 1750 case BPF_JLT: 1751 /* LT is unsigned '<', JB in x86 */ 1752 jmp_cond = X86_JB; 1753 break; 1754 case BPF_JGE: 1755 /* GE is unsigned '>=', JAE in x86 */ 1756 jmp_cond = X86_JAE; 1757 break; 1758 case BPF_JLE: 1759 /* LE is unsigned '<=', JBE in x86 */ 1760 jmp_cond = X86_JBE; 1761 break; 1762 case BPF_JSGT: 1763 /* Signed '>', GT in x86 */ 1764 jmp_cond = X86_JG; 1765 break; 1766 case BPF_JSLT: 1767 /* Signed '<', LT in x86 */ 1768 jmp_cond = X86_JL; 1769 break; 1770 case BPF_JSGE: 1771 /* Signed '>=', GE in x86 */ 1772 jmp_cond = X86_JGE; 1773 break; 1774 case BPF_JSLE: 1775 /* Signed '<=', LE in x86 */ 1776 jmp_cond = X86_JLE; 1777 break; 1778 default: /* to silence GCC warning */ 1779 return -EFAULT; 1780 } 1781 jmp_offset = addrs[i + insn->off] - addrs[i]; 1782 if (is_imm8(jmp_offset)) { 1783 if (jmp_padding) { 1784 /* To keep the jmp_offset valid, the extra bytes are 1785 * padded before the jump insn, so we subtract the 1786 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 1787 * 1788 * If the previous pass already emits an imm8 1789 * jmp_cond, then this BPF insn won't shrink, so 1790 * "nops" is 0. 1791 * 1792 * On the other hand, if the previous pass emits an 1793 * imm32 jmp_cond, the extra 4 bytes(*) is padded to 1794 * keep the image from shrinking further. 1795 * 1796 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 1797 * is 2 bytes, so the size difference is 4 bytes. 1798 */ 1799 nops = INSN_SZ_DIFF - 2; 1800 if (nops != 0 && nops != 4) { 1801 pr_err("unexpected jmp_cond padding: %d bytes\n", 1802 nops); 1803 return -EFAULT; 1804 } 1805 emit_nops(&prog, nops); 1806 } 1807 EMIT2(jmp_cond, jmp_offset); 1808 } else if (is_simm32(jmp_offset)) { 1809 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 1810 } else { 1811 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 1812 return -EFAULT; 1813 } 1814 1815 break; 1816 1817 case BPF_JMP | BPF_JA: 1818 case BPF_JMP32 | BPF_JA: 1819 if (BPF_CLASS(insn->code) == BPF_JMP) { 1820 if (insn->off == -1) 1821 /* -1 jmp instructions will always jump 1822 * backwards two bytes. Explicitly handling 1823 * this case avoids wasting too many passes 1824 * when there are long sequences of replaced 1825 * dead code. 1826 */ 1827 jmp_offset = -2; 1828 else 1829 jmp_offset = addrs[i + insn->off] - addrs[i]; 1830 } else { 1831 if (insn->imm == -1) 1832 jmp_offset = -2; 1833 else 1834 jmp_offset = addrs[i + insn->imm] - addrs[i]; 1835 } 1836 1837 if (!jmp_offset) { 1838 /* 1839 * If jmp_padding is enabled, the extra nops will 1840 * be inserted. Otherwise, optimize out nop jumps. 1841 */ 1842 if (jmp_padding) { 1843 /* There are 3 possible conditions. 1844 * (1) This BPF_JA is already optimized out in 1845 * the previous run, so there is no need 1846 * to pad any extra byte (0 byte). 1847 * (2) The previous pass emits an imm8 jmp, 1848 * so we pad 2 bytes to match the previous 1849 * insn size. 1850 * (3) Similarly, the previous pass emits an 1851 * imm32 jmp, and 5 bytes is padded. 1852 */ 1853 nops = INSN_SZ_DIFF; 1854 if (nops != 0 && nops != 2 && nops != 5) { 1855 pr_err("unexpected nop jump padding: %d bytes\n", 1856 nops); 1857 return -EFAULT; 1858 } 1859 emit_nops(&prog, nops); 1860 } 1861 break; 1862 } 1863 emit_jmp: 1864 if (is_imm8(jmp_offset)) { 1865 if (jmp_padding) { 1866 /* To avoid breaking jmp_offset, the extra bytes 1867 * are padded before the actual jmp insn, so 1868 * 2 bytes is subtracted from INSN_SZ_DIFF. 1869 * 1870 * If the previous pass already emits an imm8 1871 * jmp, there is nothing to pad (0 byte). 1872 * 1873 * If it emits an imm32 jmp (5 bytes) previously 1874 * and now an imm8 jmp (2 bytes), then we pad 1875 * (5 - 2 = 3) bytes to stop the image from 1876 * shrinking further. 1877 */ 1878 nops = INSN_SZ_DIFF - 2; 1879 if (nops != 0 && nops != 3) { 1880 pr_err("unexpected jump padding: %d bytes\n", 1881 nops); 1882 return -EFAULT; 1883 } 1884 emit_nops(&prog, INSN_SZ_DIFF - 2); 1885 } 1886 EMIT2(0xEB, jmp_offset); 1887 } else if (is_simm32(jmp_offset)) { 1888 EMIT1_off32(0xE9, jmp_offset); 1889 } else { 1890 pr_err("jmp gen bug %llx\n", jmp_offset); 1891 return -EFAULT; 1892 } 1893 break; 1894 1895 case BPF_JMP | BPF_EXIT: 1896 if (seen_exit) { 1897 jmp_offset = ctx->cleanup_addr - addrs[i]; 1898 goto emit_jmp; 1899 } 1900 seen_exit = true; 1901 /* Update cleanup_addr */ 1902 ctx->cleanup_addr = proglen; 1903 pop_callee_regs(&prog, callee_regs_used); 1904 EMIT1(0xC9); /* leave */ 1905 emit_return(&prog, image + addrs[i - 1] + (prog - temp)); 1906 break; 1907 1908 default: 1909 /* 1910 * By design x86-64 JIT should support all BPF instructions. 1911 * This error will be seen if new instruction was added 1912 * to the interpreter, but not to the JIT, or if there is 1913 * junk in bpf_prog. 1914 */ 1915 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1916 return -EINVAL; 1917 } 1918 1919 ilen = prog - temp; 1920 if (ilen > BPF_MAX_INSN_SIZE) { 1921 pr_err("bpf_jit: fatal insn size error\n"); 1922 return -EFAULT; 1923 } 1924 1925 if (image) { 1926 /* 1927 * When populating the image, assert that: 1928 * 1929 * i) We do not write beyond the allocated space, and 1930 * ii) addrs[i] did not change from the prior run, in order 1931 * to validate assumptions made for computing branch 1932 * displacements. 1933 */ 1934 if (unlikely(proglen + ilen > oldproglen || 1935 proglen + ilen != addrs[i])) { 1936 pr_err("bpf_jit: fatal error\n"); 1937 return -EFAULT; 1938 } 1939 memcpy(rw_image + proglen, temp, ilen); 1940 } 1941 proglen += ilen; 1942 addrs[i] = proglen; 1943 prog = temp; 1944 } 1945 1946 if (image && excnt != bpf_prog->aux->num_exentries) { 1947 pr_err("extable is not populated\n"); 1948 return -EFAULT; 1949 } 1950 return proglen; 1951 } 1952 1953 static void clean_stack_garbage(const struct btf_func_model *m, 1954 u8 **pprog, int nr_stack_slots, 1955 int stack_size) 1956 { 1957 int arg_size, off; 1958 u8 *prog; 1959 1960 /* Generally speaking, the compiler will pass the arguments 1961 * on-stack with "push" instruction, which will take 8-byte 1962 * on the stack. In this case, there won't be garbage values 1963 * while we copy the arguments from origin stack frame to current 1964 * in BPF_DW. 1965 * 1966 * However, sometimes the compiler will only allocate 4-byte on 1967 * the stack for the arguments. For now, this case will only 1968 * happen if there is only one argument on-stack and its size 1969 * not more than 4 byte. In this case, there will be garbage 1970 * values on the upper 4-byte where we store the argument on 1971 * current stack frame. 1972 * 1973 * arguments on origin stack: 1974 * 1975 * stack_arg_1(4-byte) xxx(4-byte) 1976 * 1977 * what we copy: 1978 * 1979 * stack_arg_1(8-byte): stack_arg_1(origin) xxx 1980 * 1981 * and the xxx is the garbage values which we should clean here. 1982 */ 1983 if (nr_stack_slots != 1) 1984 return; 1985 1986 /* the size of the last argument */ 1987 arg_size = m->arg_size[m->nr_args - 1]; 1988 if (arg_size <= 4) { 1989 off = -(stack_size - 4); 1990 prog = *pprog; 1991 /* mov DWORD PTR [rbp + off], 0 */ 1992 if (!is_imm8(off)) 1993 EMIT2_off32(0xC7, 0x85, off); 1994 else 1995 EMIT3(0xC7, 0x45, off); 1996 EMIT(0, 4); 1997 *pprog = prog; 1998 } 1999 } 2000 2001 /* get the count of the regs that are used to pass arguments */ 2002 static int get_nr_used_regs(const struct btf_func_model *m) 2003 { 2004 int i, arg_regs, nr_used_regs = 0; 2005 2006 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2007 arg_regs = (m->arg_size[i] + 7) / 8; 2008 if (nr_used_regs + arg_regs <= 6) 2009 nr_used_regs += arg_regs; 2010 2011 if (nr_used_regs >= 6) 2012 break; 2013 } 2014 2015 return nr_used_regs; 2016 } 2017 2018 static void save_args(const struct btf_func_model *m, u8 **prog, 2019 int stack_size, bool for_call_origin) 2020 { 2021 int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0; 2022 int i, j; 2023 2024 /* Store function arguments to stack. 2025 * For a function that accepts two pointers the sequence will be: 2026 * mov QWORD PTR [rbp-0x10],rdi 2027 * mov QWORD PTR [rbp-0x8],rsi 2028 */ 2029 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2030 arg_regs = (m->arg_size[i] + 7) / 8; 2031 2032 /* According to the research of Yonghong, struct members 2033 * should be all in register or all on the stack. 2034 * Meanwhile, the compiler will pass the argument on regs 2035 * if the remaining regs can hold the argument. 2036 * 2037 * Disorder of the args can happen. For example: 2038 * 2039 * struct foo_struct { 2040 * long a; 2041 * int b; 2042 * }; 2043 * int foo(char, char, char, char, char, struct foo_struct, 2044 * char); 2045 * 2046 * the arg1-5,arg7 will be passed by regs, and arg6 will 2047 * by stack. 2048 */ 2049 if (nr_regs + arg_regs > 6) { 2050 /* copy function arguments from origin stack frame 2051 * into current stack frame. 2052 * 2053 * The starting address of the arguments on-stack 2054 * is: 2055 * rbp + 8(push rbp) + 2056 * 8(return addr of origin call) + 2057 * 8(return addr of the caller) 2058 * which means: rbp + 24 2059 */ 2060 for (j = 0; j < arg_regs; j++) { 2061 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 2062 nr_stack_slots * 8 + 0x18); 2063 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0, 2064 -stack_size); 2065 2066 if (!nr_stack_slots) 2067 first_off = stack_size; 2068 stack_size -= 8; 2069 nr_stack_slots++; 2070 } 2071 } else { 2072 /* Only copy the arguments on-stack to current 2073 * 'stack_size' and ignore the regs, used to 2074 * prepare the arguments on-stack for orign call. 2075 */ 2076 if (for_call_origin) { 2077 nr_regs += arg_regs; 2078 continue; 2079 } 2080 2081 /* copy the arguments from regs into stack */ 2082 for (j = 0; j < arg_regs; j++) { 2083 emit_stx(prog, BPF_DW, BPF_REG_FP, 2084 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, 2085 -stack_size); 2086 stack_size -= 8; 2087 nr_regs++; 2088 } 2089 } 2090 } 2091 2092 clean_stack_garbage(m, prog, nr_stack_slots, first_off); 2093 } 2094 2095 static void restore_regs(const struct btf_func_model *m, u8 **prog, 2096 int stack_size) 2097 { 2098 int i, j, arg_regs, nr_regs = 0; 2099 2100 /* Restore function arguments from stack. 2101 * For a function that accepts two pointers the sequence will be: 2102 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 2103 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 2104 * 2105 * The logic here is similar to what we do in save_args() 2106 */ 2107 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2108 arg_regs = (m->arg_size[i] + 7) / 8; 2109 if (nr_regs + arg_regs <= 6) { 2110 for (j = 0; j < arg_regs; j++) { 2111 emit_ldx(prog, BPF_DW, 2112 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, 2113 BPF_REG_FP, 2114 -stack_size); 2115 stack_size -= 8; 2116 nr_regs++; 2117 } 2118 } else { 2119 stack_size -= 8 * arg_regs; 2120 } 2121 2122 if (nr_regs >= 6) 2123 break; 2124 } 2125 } 2126 2127 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 2128 struct bpf_tramp_link *l, int stack_size, 2129 int run_ctx_off, bool save_ret) 2130 { 2131 u8 *prog = *pprog; 2132 u8 *jmp_insn; 2133 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 2134 struct bpf_prog *p = l->link.prog; 2135 u64 cookie = l->cookie; 2136 2137 /* mov rdi, cookie */ 2138 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); 2139 2140 /* Prepare struct bpf_tramp_run_ctx. 2141 * 2142 * bpf_tramp_run_ctx is already preserved by 2143 * arch_prepare_bpf_trampoline(). 2144 * 2145 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi 2146 */ 2147 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); 2148 2149 /* arg1: mov rdi, progs[i] */ 2150 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 2151 /* arg2: lea rsi, [rbp - ctx_cookie_off] */ 2152 if (!is_imm8(-run_ctx_off)) 2153 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off); 2154 else 2155 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); 2156 2157 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog)) 2158 return -EINVAL; 2159 /* remember prog start time returned by __bpf_prog_enter */ 2160 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 2161 2162 /* if (__bpf_prog_enter*(prog) == 0) 2163 * goto skip_exec_of_prog; 2164 */ 2165 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 2166 /* emit 2 nops that will be replaced with JE insn */ 2167 jmp_insn = prog; 2168 emit_nops(&prog, 2); 2169 2170 /* arg1: lea rdi, [rbp - stack_size] */ 2171 if (!is_imm8(-stack_size)) 2172 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size); 2173 else 2174 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 2175 /* arg2: progs[i]->insnsi for interpreter */ 2176 if (!p->jited) 2177 emit_mov_imm64(&prog, BPF_REG_2, 2178 (long) p->insnsi >> 32, 2179 (u32) (long) p->insnsi); 2180 /* call JITed bpf program or interpreter */ 2181 if (emit_rsb_call(&prog, p->bpf_func, prog)) 2182 return -EINVAL; 2183 2184 /* 2185 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 2186 * of the previous call which is then passed on the stack to 2187 * the next BPF program. 2188 * 2189 * BPF_TRAMP_FENTRY trampoline may need to return the return 2190 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 2191 */ 2192 if (save_ret) 2193 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2194 2195 /* replace 2 nops with JE insn, since jmp target is known */ 2196 jmp_insn[0] = X86_JE; 2197 jmp_insn[1] = prog - jmp_insn - 2; 2198 2199 /* arg1: mov rdi, progs[i] */ 2200 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 2201 /* arg2: mov rsi, rbx <- start time in nsec */ 2202 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 2203 /* arg3: lea rdx, [rbp - run_ctx_off] */ 2204 if (!is_imm8(-run_ctx_off)) 2205 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off); 2206 else 2207 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); 2208 if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog)) 2209 return -EINVAL; 2210 2211 *pprog = prog; 2212 return 0; 2213 } 2214 2215 static void emit_align(u8 **pprog, u32 align) 2216 { 2217 u8 *target, *prog = *pprog; 2218 2219 target = PTR_ALIGN(prog, align); 2220 if (target != prog) 2221 emit_nops(&prog, target - prog); 2222 2223 *pprog = prog; 2224 } 2225 2226 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 2227 { 2228 u8 *prog = *pprog; 2229 s64 offset; 2230 2231 offset = func - (ip + 2 + 4); 2232 if (!is_simm32(offset)) { 2233 pr_err("Target %p is out of range\n", func); 2234 return -EINVAL; 2235 } 2236 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 2237 *pprog = prog; 2238 return 0; 2239 } 2240 2241 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 2242 struct bpf_tramp_links *tl, int stack_size, 2243 int run_ctx_off, bool save_ret) 2244 { 2245 int i; 2246 u8 *prog = *pprog; 2247 2248 for (i = 0; i < tl->nr_links; i++) { 2249 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, 2250 run_ctx_off, save_ret)) 2251 return -EINVAL; 2252 } 2253 *pprog = prog; 2254 return 0; 2255 } 2256 2257 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 2258 struct bpf_tramp_links *tl, int stack_size, 2259 int run_ctx_off, u8 **branches) 2260 { 2261 u8 *prog = *pprog; 2262 int i; 2263 2264 /* The first fmod_ret program will receive a garbage return value. 2265 * Set this to 0 to avoid confusing the program. 2266 */ 2267 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 2268 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2269 for (i = 0; i < tl->nr_links; i++) { 2270 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true)) 2271 return -EINVAL; 2272 2273 /* mod_ret prog stored return value into [rbp - 8]. Emit: 2274 * if (*(u64 *)(rbp - 8) != 0) 2275 * goto do_fexit; 2276 */ 2277 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 2278 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 2279 2280 /* Save the location of the branch and Generate 6 nops 2281 * (4 bytes for an offset and 2 bytes for the jump) These nops 2282 * are replaced with a conditional jump once do_fexit (i.e. the 2283 * start of the fexit invocation) is finalized. 2284 */ 2285 branches[i] = prog; 2286 emit_nops(&prog, 4 + 2); 2287 } 2288 2289 *pprog = prog; 2290 return 0; 2291 } 2292 2293 /* Example: 2294 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 2295 * its 'struct btf_func_model' will be nr_args=2 2296 * The assembly code when eth_type_trans is executing after trampoline: 2297 * 2298 * push rbp 2299 * mov rbp, rsp 2300 * sub rsp, 16 // space for skb and dev 2301 * push rbx // temp regs to pass start time 2302 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 2303 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 2304 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2305 * mov rbx, rax // remember start time in bpf stats are enabled 2306 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 2307 * call addr_of_jited_FENTRY_prog 2308 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2309 * mov rsi, rbx // prog start time 2310 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2311 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 2312 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 2313 * pop rbx 2314 * leave 2315 * ret 2316 * 2317 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 2318 * replaced with 'call generated_bpf_trampoline'. When it returns 2319 * eth_type_trans will continue executing with original skb and dev pointers. 2320 * 2321 * The assembly code when eth_type_trans is called from trampoline: 2322 * 2323 * push rbp 2324 * mov rbp, rsp 2325 * sub rsp, 24 // space for skb, dev, return value 2326 * push rbx // temp regs to pass start time 2327 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 2328 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 2329 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2330 * mov rbx, rax // remember start time if bpf stats are enabled 2331 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2332 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 2333 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2334 * mov rsi, rbx // prog start time 2335 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2336 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 2337 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 2338 * call eth_type_trans+5 // execute body of eth_type_trans 2339 * mov qword ptr [rbp - 8], rax // save return value 2340 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2341 * mov rbx, rax // remember start time in bpf stats are enabled 2342 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2343 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 2344 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2345 * mov rsi, rbx // prog start time 2346 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2347 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 2348 * pop rbx 2349 * leave 2350 * add rsp, 8 // skip eth_type_trans's frame 2351 * ret // return to its caller 2352 */ 2353 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 2354 const struct btf_func_model *m, u32 flags, 2355 struct bpf_tramp_links *tlinks, 2356 void *func_addr) 2357 { 2358 int i, ret, nr_regs = m->nr_args, stack_size = 0; 2359 int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off; 2360 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2361 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2362 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2363 void *orig_call = func_addr; 2364 u8 **branches = NULL; 2365 u8 *prog; 2366 bool save_ret; 2367 2368 /* extra registers for struct arguments */ 2369 for (i = 0; i < m->nr_args; i++) 2370 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) 2371 nr_regs += (m->arg_size[i] + 7) / 8 - 1; 2372 2373 /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 2374 * are passed through regs, the remains are through stack. 2375 */ 2376 if (nr_regs > MAX_BPF_FUNC_ARGS) 2377 return -ENOTSUPP; 2378 2379 /* Generated trampoline stack layout: 2380 * 2381 * RBP + 8 [ return address ] 2382 * RBP + 0 [ RBP ] 2383 * 2384 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 2385 * BPF_TRAMP_F_RET_FENTRY_RET flags 2386 * 2387 * [ reg_argN ] always 2388 * [ ... ] 2389 * RBP - regs_off [ reg_arg1 ] program's ctx pointer 2390 * 2391 * RBP - nregs_off [ regs count ] always 2392 * 2393 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2394 * 2395 * RBP - rbx_off [ rbx value ] always 2396 * 2397 * RBP - run_ctx_off [ bpf_tramp_run_ctx ] 2398 * 2399 * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG 2400 * [ ... ] 2401 * [ stack_arg2 ] 2402 * RBP - arg_stack_off [ stack_arg1 ] 2403 */ 2404 2405 /* room for return value of orig_call or fentry prog */ 2406 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2407 if (save_ret) 2408 stack_size += 8; 2409 2410 stack_size += nr_regs * 8; 2411 regs_off = stack_size; 2412 2413 /* regs count */ 2414 stack_size += 8; 2415 nregs_off = stack_size; 2416 2417 if (flags & BPF_TRAMP_F_IP_ARG) 2418 stack_size += 8; /* room for IP address argument */ 2419 2420 ip_off = stack_size; 2421 2422 stack_size += 8; 2423 rbx_off = stack_size; 2424 2425 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; 2426 run_ctx_off = stack_size; 2427 2428 if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) { 2429 /* the space that used to pass arguments on-stack */ 2430 stack_size += (nr_regs - get_nr_used_regs(m)) * 8; 2431 /* make sure the stack pointer is 16-byte aligned if we 2432 * need pass arguments on stack, which means 2433 * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)] 2434 * should be 16-byte aligned. Following code depend on 2435 * that stack_size is already 8-byte aligned. 2436 */ 2437 stack_size += (stack_size % 16) ? 0 : 8; 2438 } 2439 2440 arg_stack_off = stack_size; 2441 2442 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 2443 /* skip patched call instruction and point orig_call to actual 2444 * body of the kernel function. 2445 */ 2446 if (is_endbr(*(u32 *)orig_call)) 2447 orig_call += ENDBR_INSN_SIZE; 2448 orig_call += X86_PATCH_SIZE; 2449 } 2450 2451 prog = image; 2452 2453 EMIT_ENDBR(); 2454 /* 2455 * This is the direct-call trampoline, as such it needs accounting 2456 * for the __fentry__ call. 2457 */ 2458 x86_call_depth_emit_accounting(&prog, NULL); 2459 EMIT1(0x55); /* push rbp */ 2460 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2461 if (!is_imm8(stack_size)) 2462 /* sub rsp, stack_size */ 2463 EMIT3_off32(0x48, 0x81, 0xEC, stack_size); 2464 else 2465 /* sub rsp, stack_size */ 2466 EMIT4(0x48, 0x83, 0xEC, stack_size); 2467 /* mov QWORD PTR [rbp - rbx_off], rbx */ 2468 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off); 2469 2470 /* Store number of argument registers of the traced function: 2471 * mov rax, nr_regs 2472 * mov QWORD PTR [rbp - nregs_off], rax 2473 */ 2474 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs); 2475 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off); 2476 2477 if (flags & BPF_TRAMP_F_IP_ARG) { 2478 /* Store IP address of the traced function: 2479 * movabsq rax, func_addr 2480 * mov QWORD PTR [rbp - ip_off], rax 2481 */ 2482 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr); 2483 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 2484 } 2485 2486 save_args(m, &prog, regs_off, false); 2487 2488 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2489 /* arg1: mov rdi, im */ 2490 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2491 if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) { 2492 ret = -EINVAL; 2493 goto cleanup; 2494 } 2495 } 2496 2497 if (fentry->nr_links) 2498 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, 2499 flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2500 return -EINVAL; 2501 2502 if (fmod_ret->nr_links) { 2503 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), 2504 GFP_KERNEL); 2505 if (!branches) 2506 return -ENOMEM; 2507 2508 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 2509 run_ctx_off, branches)) { 2510 ret = -EINVAL; 2511 goto cleanup; 2512 } 2513 } 2514 2515 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2516 restore_regs(m, &prog, regs_off); 2517 save_args(m, &prog, arg_stack_off, true); 2518 2519 if (flags & BPF_TRAMP_F_ORIG_STACK) { 2520 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); 2521 EMIT2(0xff, 0xd0); /* call *rax */ 2522 } else { 2523 /* call original function */ 2524 if (emit_rsb_call(&prog, orig_call, prog)) { 2525 ret = -EINVAL; 2526 goto cleanup; 2527 } 2528 } 2529 /* remember return value in a stack for bpf prog to access */ 2530 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2531 im->ip_after_call = prog; 2532 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 2533 prog += X86_PATCH_SIZE; 2534 } 2535 2536 if (fmod_ret->nr_links) { 2537 /* From Intel 64 and IA-32 Architectures Optimization 2538 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2539 * Coding Rule 11: All branch targets should be 16-byte 2540 * aligned. 2541 */ 2542 emit_align(&prog, 16); 2543 /* Update the branches saved in invoke_bpf_mod_ret with the 2544 * aligned address of do_fexit. 2545 */ 2546 for (i = 0; i < fmod_ret->nr_links; i++) 2547 emit_cond_near_jump(&branches[i], prog, branches[i], 2548 X86_JNE); 2549 } 2550 2551 if (fexit->nr_links) 2552 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) { 2553 ret = -EINVAL; 2554 goto cleanup; 2555 } 2556 2557 if (flags & BPF_TRAMP_F_RESTORE_REGS) 2558 restore_regs(m, &prog, regs_off); 2559 2560 /* This needs to be done regardless. If there were fmod_ret programs, 2561 * the return value is only updated on the stack and still needs to be 2562 * restored to R0. 2563 */ 2564 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2565 im->ip_epilogue = prog; 2566 /* arg1: mov rdi, im */ 2567 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2568 if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) { 2569 ret = -EINVAL; 2570 goto cleanup; 2571 } 2572 } 2573 /* restore return value of orig_call or fentry prog back into RAX */ 2574 if (save_ret) 2575 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2576 2577 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); 2578 EMIT1(0xC9); /* leave */ 2579 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2580 /* skip our return address and return to parent */ 2581 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 2582 emit_return(&prog, prog); 2583 /* Make sure the trampoline generation logic doesn't overflow */ 2584 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 2585 ret = -EFAULT; 2586 goto cleanup; 2587 } 2588 ret = prog - (u8 *)image; 2589 2590 cleanup: 2591 kfree(branches); 2592 return ret; 2593 } 2594 2595 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf) 2596 { 2597 u8 *jg_reloc, *prog = *pprog; 2598 int pivot, err, jg_bytes = 1; 2599 s64 jg_offset; 2600 2601 if (a == b) { 2602 /* Leaf node of recursion, i.e. not a range of indices 2603 * anymore. 2604 */ 2605 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2606 if (!is_simm32(progs[a])) 2607 return -1; 2608 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 2609 progs[a]); 2610 err = emit_cond_near_jump(&prog, /* je func */ 2611 (void *)progs[a], image + (prog - buf), 2612 X86_JE); 2613 if (err) 2614 return err; 2615 2616 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf)); 2617 2618 *pprog = prog; 2619 return 0; 2620 } 2621 2622 /* Not a leaf node, so we pivot, and recursively descend into 2623 * the lower and upper ranges. 2624 */ 2625 pivot = (b - a) / 2; 2626 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2627 if (!is_simm32(progs[a + pivot])) 2628 return -1; 2629 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 2630 2631 if (pivot > 2) { /* jg upper_part */ 2632 /* Require near jump. */ 2633 jg_bytes = 4; 2634 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 2635 } else { 2636 EMIT2(X86_JG, 0); 2637 } 2638 jg_reloc = prog; 2639 2640 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 2641 progs, image, buf); 2642 if (err) 2643 return err; 2644 2645 /* From Intel 64 and IA-32 Architectures Optimization 2646 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2647 * Coding Rule 11: All branch targets should be 16-byte 2648 * aligned. 2649 */ 2650 emit_align(&prog, 16); 2651 jg_offset = prog - jg_reloc; 2652 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 2653 2654 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 2655 b, progs, image, buf); 2656 if (err) 2657 return err; 2658 2659 *pprog = prog; 2660 return 0; 2661 } 2662 2663 static int cmp_ips(const void *a, const void *b) 2664 { 2665 const s64 *ipa = a; 2666 const s64 *ipb = b; 2667 2668 if (*ipa > *ipb) 2669 return 1; 2670 if (*ipa < *ipb) 2671 return -1; 2672 return 0; 2673 } 2674 2675 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) 2676 { 2677 u8 *prog = buf; 2678 2679 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 2680 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); 2681 } 2682 2683 struct x64_jit_data { 2684 struct bpf_binary_header *rw_header; 2685 struct bpf_binary_header *header; 2686 int *addrs; 2687 u8 *image; 2688 int proglen; 2689 struct jit_context ctx; 2690 }; 2691 2692 #define MAX_PASSES 20 2693 #define PADDING_PASSES (MAX_PASSES - 5) 2694 2695 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2696 { 2697 struct bpf_binary_header *rw_header = NULL; 2698 struct bpf_binary_header *header = NULL; 2699 struct bpf_prog *tmp, *orig_prog = prog; 2700 struct x64_jit_data *jit_data; 2701 int proglen, oldproglen = 0; 2702 struct jit_context ctx = {}; 2703 bool tmp_blinded = false; 2704 bool extra_pass = false; 2705 bool padding = false; 2706 u8 *rw_image = NULL; 2707 u8 *image = NULL; 2708 int *addrs; 2709 int pass; 2710 int i; 2711 2712 if (!prog->jit_requested) 2713 return orig_prog; 2714 2715 tmp = bpf_jit_blind_constants(prog); 2716 /* 2717 * If blinding was requested and we failed during blinding, 2718 * we must fall back to the interpreter. 2719 */ 2720 if (IS_ERR(tmp)) 2721 return orig_prog; 2722 if (tmp != prog) { 2723 tmp_blinded = true; 2724 prog = tmp; 2725 } 2726 2727 jit_data = prog->aux->jit_data; 2728 if (!jit_data) { 2729 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2730 if (!jit_data) { 2731 prog = orig_prog; 2732 goto out; 2733 } 2734 prog->aux->jit_data = jit_data; 2735 } 2736 addrs = jit_data->addrs; 2737 if (addrs) { 2738 ctx = jit_data->ctx; 2739 oldproglen = jit_data->proglen; 2740 image = jit_data->image; 2741 header = jit_data->header; 2742 rw_header = jit_data->rw_header; 2743 rw_image = (void *)rw_header + ((void *)image - (void *)header); 2744 extra_pass = true; 2745 padding = true; 2746 goto skip_init_addrs; 2747 } 2748 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2749 if (!addrs) { 2750 prog = orig_prog; 2751 goto out_addrs; 2752 } 2753 2754 /* 2755 * Before first pass, make a rough estimation of addrs[] 2756 * each BPF instruction is translated to less than 64 bytes 2757 */ 2758 for (proglen = 0, i = 0; i <= prog->len; i++) { 2759 proglen += 64; 2760 addrs[i] = proglen; 2761 } 2762 ctx.cleanup_addr = proglen; 2763 skip_init_addrs: 2764 2765 /* 2766 * JITed image shrinks with every pass and the loop iterates 2767 * until the image stops shrinking. Very large BPF programs 2768 * may converge on the last pass. In such case do one more 2769 * pass to emit the final image. 2770 */ 2771 for (pass = 0; pass < MAX_PASSES || image; pass++) { 2772 if (!padding && pass >= PADDING_PASSES) 2773 padding = true; 2774 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); 2775 if (proglen <= 0) { 2776 out_image: 2777 image = NULL; 2778 if (header) { 2779 bpf_arch_text_copy(&header->size, &rw_header->size, 2780 sizeof(rw_header->size)); 2781 bpf_jit_binary_pack_free(header, rw_header); 2782 } 2783 /* Fall back to interpreter mode */ 2784 prog = orig_prog; 2785 if (extra_pass) { 2786 prog->bpf_func = NULL; 2787 prog->jited = 0; 2788 prog->jited_len = 0; 2789 } 2790 goto out_addrs; 2791 } 2792 if (image) { 2793 if (proglen != oldproglen) { 2794 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2795 proglen, oldproglen); 2796 goto out_image; 2797 } 2798 break; 2799 } 2800 if (proglen == oldproglen) { 2801 /* 2802 * The number of entries in extable is the number of BPF_LDX 2803 * insns that access kernel memory via "pointer to BTF type". 2804 * The verifier changed their opcode from LDX|MEM|size 2805 * to LDX|PROBE_MEM|size to make JITing easier. 2806 */ 2807 u32 align = __alignof__(struct exception_table_entry); 2808 u32 extable_size = prog->aux->num_exentries * 2809 sizeof(struct exception_table_entry); 2810 2811 /* allocate module memory for x86 insns and extable */ 2812 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, 2813 &image, align, &rw_header, &rw_image, 2814 jit_fill_hole); 2815 if (!header) { 2816 prog = orig_prog; 2817 goto out_addrs; 2818 } 2819 prog->aux->extable = (void *) image + roundup(proglen, align); 2820 } 2821 oldproglen = proglen; 2822 cond_resched(); 2823 } 2824 2825 if (bpf_jit_enable > 1) 2826 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image); 2827 2828 if (image) { 2829 if (!prog->is_func || extra_pass) { 2830 /* 2831 * bpf_jit_binary_pack_finalize fails in two scenarios: 2832 * 1) header is not pointing to proper module memory; 2833 * 2) the arch doesn't support bpf_arch_text_copy(). 2834 * 2835 * Both cases are serious bugs and justify WARN_ON. 2836 */ 2837 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { 2838 /* header has been freed */ 2839 header = NULL; 2840 goto out_image; 2841 } 2842 2843 bpf_tail_call_direct_fixup(prog); 2844 } else { 2845 jit_data->addrs = addrs; 2846 jit_data->ctx = ctx; 2847 jit_data->proglen = proglen; 2848 jit_data->image = image; 2849 jit_data->header = header; 2850 jit_data->rw_header = rw_header; 2851 } 2852 prog->bpf_func = (void *)image; 2853 prog->jited = 1; 2854 prog->jited_len = proglen; 2855 } else { 2856 prog = orig_prog; 2857 } 2858 2859 if (!image || !prog->is_func || extra_pass) { 2860 if (image) 2861 bpf_prog_fill_jited_linfo(prog, addrs + 1); 2862 out_addrs: 2863 kvfree(addrs); 2864 kfree(jit_data); 2865 prog->aux->jit_data = NULL; 2866 } 2867 out: 2868 if (tmp_blinded) 2869 bpf_jit_prog_release_other(prog, prog == orig_prog ? 2870 tmp : orig_prog); 2871 return prog; 2872 } 2873 2874 bool bpf_jit_supports_kfunc_call(void) 2875 { 2876 return true; 2877 } 2878 2879 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 2880 { 2881 if (text_poke_copy(dst, src, len) == NULL) 2882 return ERR_PTR(-EINVAL); 2883 return dst; 2884 } 2885 2886 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ 2887 bool bpf_jit_supports_subprog_tailcalls(void) 2888 { 2889 return true; 2890 } 2891 2892 void bpf_jit_free(struct bpf_prog *prog) 2893 { 2894 if (prog->jited) { 2895 struct x64_jit_data *jit_data = prog->aux->jit_data; 2896 struct bpf_binary_header *hdr; 2897 2898 /* 2899 * If we fail the final pass of JIT (from jit_subprogs), 2900 * the program may not be finalized yet. Call finalize here 2901 * before freeing it. 2902 */ 2903 if (jit_data) { 2904 bpf_jit_binary_pack_finalize(prog, jit_data->header, 2905 jit_data->rw_header); 2906 kvfree(jit_data->addrs); 2907 kfree(jit_data); 2908 } 2909 hdr = bpf_jit_binary_pack_hdr(prog); 2910 bpf_jit_binary_pack_free(hdr, NULL); 2911 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); 2912 } 2913 2914 bpf_prog_unlock_free(prog); 2915 } 2916