1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * BPF Jit compiler for s390. 4 * 5 * Minimum build requirements: 6 * 7 * - HAVE_MARCH_Z196_FEATURES: laal, laalg 8 * - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj 9 * - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf 10 * - 64BIT 11 * 12 * Copyright IBM Corp. 2012,2015 13 * 14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 15 * Michael Holzheu <holzheu@linux.vnet.ibm.com> 16 */ 17 18 #define KMSG_COMPONENT "bpf_jit" 19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 20 21 #include <linux/netdevice.h> 22 #include <linux/filter.h> 23 #include <linux/init.h> 24 #include <linux/bpf.h> 25 #include <linux/mm.h> 26 #include <linux/kernel.h> 27 #include <asm/cacheflush.h> 28 #include <asm/extable.h> 29 #include <asm/dis.h> 30 #include <asm/facility.h> 31 #include <asm/nospec-branch.h> 32 #include <asm/set_memory.h> 33 #include <asm/text-patching.h> 34 #include <asm/unwind.h> 35 36 struct bpf_jit { 37 u32 seen; /* Flags to remember seen eBPF instructions */ 38 u16 seen_regs; /* Mask to remember which registers are used */ 39 u32 *addrs; /* Array with relative instruction addresses */ 40 u8 *prg_buf; /* Start of program */ 41 int size; /* Size of program and literal pool */ 42 int size_prg; /* Size of program */ 43 int prg; /* Current position in program */ 44 int lit32_start; /* Start of 32-bit literal pool */ 45 int lit32; /* Current position in 32-bit literal pool */ 46 int lit64_start; /* Start of 64-bit literal pool */ 47 int lit64; /* Current position in 64-bit literal pool */ 48 int base_ip; /* Base address for literal pool */ 49 int exit_ip; /* Address of exit */ 50 int tail_call_start; /* Tail call start offset */ 51 int excnt; /* Number of exception table entries */ 52 int prologue_plt_ret; /* Return address for prologue hotpatch PLT */ 53 int prologue_plt; /* Start of prologue hotpatch PLT */ 54 int kern_arena; /* Pool offset of kernel arena address */ 55 u64 user_arena; /* User arena address */ 56 u32 frame_off; /* Offset of struct bpf_prog from %r15 */ 57 }; 58 59 #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ 60 #define SEEN_LITERAL BIT(1) /* code uses literals */ 61 #define SEEN_FUNC BIT(2) /* calls C functions */ 62 #define SEEN_STACK (SEEN_FUNC | SEEN_MEM) 63 64 #define NVREGS 0xffc0 /* %r6-%r15 */ 65 66 /* 67 * s390 registers 68 */ 69 #define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */ 70 #define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */ 71 #define REG_L (MAX_BPF_JIT_REG + 2) /* Literal pool register */ 72 #define REG_15 (MAX_BPF_JIT_REG + 3) /* Register 15 */ 73 #define REG_0 REG_W0 /* Register 0 */ 74 #define REG_1 REG_W1 /* Register 1 */ 75 #define REG_2 BPF_REG_1 /* Register 2 */ 76 #define REG_3 BPF_REG_2 /* Register 3 */ 77 #define REG_4 BPF_REG_3 /* Register 4 */ 78 #define REG_7 BPF_REG_6 /* Register 7 */ 79 #define REG_8 BPF_REG_7 /* Register 8 */ 80 #define REG_14 BPF_REG_0 /* Register 14 */ 81 82 /* 83 * Mapping of BPF registers to s390 registers 84 */ 85 static const int reg2hex[] = { 86 /* Return code */ 87 [BPF_REG_0] = 14, 88 /* Function parameters */ 89 [BPF_REG_1] = 2, 90 [BPF_REG_2] = 3, 91 [BPF_REG_3] = 4, 92 [BPF_REG_4] = 5, 93 [BPF_REG_5] = 6, 94 /* Call saved registers */ 95 [BPF_REG_6] = 7, 96 [BPF_REG_7] = 8, 97 [BPF_REG_8] = 9, 98 [BPF_REG_9] = 10, 99 /* BPF stack pointer */ 100 [BPF_REG_FP] = 13, 101 /* Register for blinding */ 102 [BPF_REG_AX] = 12, 103 /* Work registers for s390x backend */ 104 [REG_W0] = 0, 105 [REG_W1] = 1, 106 [REG_L] = 11, 107 [REG_15] = 15, 108 }; 109 110 static inline u32 reg(u32 dst_reg, u32 src_reg) 111 { 112 return reg2hex[dst_reg] << 4 | reg2hex[src_reg]; 113 } 114 115 static inline u32 reg_high(u32 reg) 116 { 117 return reg2hex[reg] << 4; 118 } 119 120 static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) 121 { 122 u32 r1 = reg2hex[b1]; 123 124 if (r1 >= 6 && r1 <= 15) 125 jit->seen_regs |= (1 << r1); 126 } 127 128 static s32 off_to_pcrel(struct bpf_jit *jit, u32 off) 129 { 130 return off - jit->prg; 131 } 132 133 static s64 ptr_to_pcrel(struct bpf_jit *jit, const void *ptr) 134 { 135 if (jit->prg_buf) 136 return (const u8 *)ptr - ((const u8 *)jit->prg_buf + jit->prg); 137 return 0; 138 } 139 140 #define REG_SET_SEEN(b1) \ 141 ({ \ 142 reg_set_seen(jit, b1); \ 143 }) 144 145 /* 146 * EMIT macros for code generation 147 */ 148 149 #define _EMIT2(op) \ 150 ({ \ 151 if (jit->prg_buf) \ 152 *(u16 *) (jit->prg_buf + jit->prg) = (op); \ 153 jit->prg += 2; \ 154 }) 155 156 #define EMIT2(op, b1, b2) \ 157 ({ \ 158 _EMIT2((op) | reg(b1, b2)); \ 159 REG_SET_SEEN(b1); \ 160 REG_SET_SEEN(b2); \ 161 }) 162 163 #define _EMIT4(op) \ 164 ({ \ 165 if (jit->prg_buf) \ 166 *(u32 *) (jit->prg_buf + jit->prg) = (op); \ 167 jit->prg += 4; \ 168 }) 169 170 #define EMIT4(op, b1, b2) \ 171 ({ \ 172 _EMIT4((op) | reg(b1, b2)); \ 173 REG_SET_SEEN(b1); \ 174 REG_SET_SEEN(b2); \ 175 }) 176 177 #define EMIT4_RRF(op, b1, b2, b3) \ 178 ({ \ 179 _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \ 180 REG_SET_SEEN(b1); \ 181 REG_SET_SEEN(b2); \ 182 REG_SET_SEEN(b3); \ 183 }) 184 185 #define _EMIT4_DISP(op, disp) \ 186 ({ \ 187 unsigned int __disp = (disp) & 0xfff; \ 188 _EMIT4((op) | __disp); \ 189 }) 190 191 #define EMIT4_DISP(op, b1, b2, disp) \ 192 ({ \ 193 _EMIT4_DISP((op) | reg_high(b1) << 16 | \ 194 reg_high(b2) << 8, (disp)); \ 195 REG_SET_SEEN(b1); \ 196 REG_SET_SEEN(b2); \ 197 }) 198 199 #define EMIT4_IMM(op, b1, imm) \ 200 ({ \ 201 unsigned int __imm = (imm) & 0xffff; \ 202 _EMIT4((op) | reg_high(b1) << 16 | __imm); \ 203 REG_SET_SEEN(b1); \ 204 }) 205 206 #define EMIT4_PCREL(op, pcrel) \ 207 ({ \ 208 long __pcrel = ((pcrel) >> 1) & 0xffff; \ 209 _EMIT4((op) | __pcrel); \ 210 }) 211 212 #define EMIT4_PCREL_RIC(op, mask, target) \ 213 ({ \ 214 int __rel = off_to_pcrel(jit, target) / 2; \ 215 _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \ 216 }) 217 218 #define _EMIT6(op1, op2) \ 219 ({ \ 220 if (jit->prg_buf) { \ 221 *(u32 *) (jit->prg_buf + jit->prg) = (op1); \ 222 *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \ 223 } \ 224 jit->prg += 6; \ 225 }) 226 227 #define _EMIT6_DISP(op1, op2, disp) \ 228 ({ \ 229 unsigned int __disp = (disp) & 0xfff; \ 230 _EMIT6((op1) | __disp, op2); \ 231 }) 232 233 #define _EMIT6_DISP_LH(op1, op2, disp) \ 234 ({ \ 235 u32 _disp = (u32) (disp); \ 236 unsigned int __disp_h = _disp & 0xff000; \ 237 unsigned int __disp_l = _disp & 0x00fff; \ 238 _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \ 239 }) 240 241 #define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \ 242 ({ \ 243 _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \ 244 reg_high(b3) << 8, op2, disp); \ 245 REG_SET_SEEN(b1); \ 246 REG_SET_SEEN(b2); \ 247 REG_SET_SEEN(b3); \ 248 }) 249 250 #define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \ 251 ({ \ 252 unsigned int rel = off_to_pcrel(jit, target) / 2; \ 253 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \ 254 (op2) | (mask) << 12); \ 255 REG_SET_SEEN(b1); \ 256 REG_SET_SEEN(b2); \ 257 }) 258 259 #define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \ 260 ({ \ 261 unsigned int rel = off_to_pcrel(jit, target) / 2; \ 262 _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \ 263 (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \ 264 REG_SET_SEEN(b1); \ 265 BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \ 266 }) 267 268 #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ 269 ({ \ 270 int rel = off_to_pcrel(jit, addrs[(i) + (off) + 1]) / 2;\ 271 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\ 272 REG_SET_SEEN(b1); \ 273 REG_SET_SEEN(b2); \ 274 }) 275 276 static void emit6_pcrel_ril(struct bpf_jit *jit, u32 op, s64 pcrel) 277 { 278 u32 pc32dbl = (s32)(pcrel / 2); 279 280 _EMIT6(op | pc32dbl >> 16, pc32dbl & 0xffff); 281 } 282 283 static void emit6_pcrel_rilb(struct bpf_jit *jit, u32 op, u8 b, s64 pcrel) 284 { 285 emit6_pcrel_ril(jit, op | reg_high(b) << 16, pcrel); 286 REG_SET_SEEN(b); 287 } 288 289 #define EMIT6_PCREL_RILB(op, b, target) \ 290 emit6_pcrel_rilb(jit, op, b, off_to_pcrel(jit, target)) 291 292 #define EMIT6_PCREL_RILB_PTR(op, b, target_ptr) \ 293 emit6_pcrel_rilb(jit, op, b, ptr_to_pcrel(jit, target_ptr)) 294 295 static void emit6_pcrel_rilc(struct bpf_jit *jit, u32 op, u8 mask, s64 pcrel) 296 { 297 emit6_pcrel_ril(jit, op | mask << 20, pcrel); 298 } 299 300 #define EMIT6_PCREL_RILC(op, mask, target) \ 301 emit6_pcrel_rilc(jit, op, mask, off_to_pcrel(jit, target)) 302 303 #define EMIT6_PCREL_RILC_PTR(op, mask, target_ptr) \ 304 emit6_pcrel_rilc(jit, op, mask, ptr_to_pcrel(jit, target_ptr)) 305 306 #define _EMIT6_IMM(op, imm) \ 307 ({ \ 308 unsigned int __imm = (imm); \ 309 _EMIT6((op) | (__imm >> 16), __imm & 0xffff); \ 310 }) 311 312 #define EMIT6_IMM(op, b1, imm) \ 313 ({ \ 314 _EMIT6_IMM((op) | reg_high(b1) << 16, imm); \ 315 REG_SET_SEEN(b1); \ 316 }) 317 318 #define _EMIT_CONST_U32(val) \ 319 ({ \ 320 unsigned int ret; \ 321 ret = jit->lit32; \ 322 if (jit->prg_buf) \ 323 *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\ 324 jit->lit32 += 4; \ 325 ret; \ 326 }) 327 328 #define EMIT_CONST_U32(val) \ 329 ({ \ 330 jit->seen |= SEEN_LITERAL; \ 331 _EMIT_CONST_U32(val) - jit->base_ip; \ 332 }) 333 334 #define _EMIT_CONST_U64(val) \ 335 ({ \ 336 unsigned int ret; \ 337 ret = jit->lit64; \ 338 if (jit->prg_buf) \ 339 *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\ 340 jit->lit64 += 8; \ 341 ret; \ 342 }) 343 344 #define EMIT_CONST_U64(val) \ 345 ({ \ 346 jit->seen |= SEEN_LITERAL; \ 347 _EMIT_CONST_U64(val) - jit->base_ip; \ 348 }) 349 350 #define EMIT_ZERO(b1) \ 351 ({ \ 352 if (!fp->aux->verifier_zext) { \ 353 /* llgfr %dst,%dst (zero extend to 64 bit) */ \ 354 EMIT4(0xb9160000, b1, b1); \ 355 REG_SET_SEEN(b1); \ 356 } \ 357 }) 358 359 /* 360 * Return whether this is the first pass. The first pass is special, since we 361 * don't know any sizes yet, and thus must be conservative. 362 */ 363 static bool is_first_pass(struct bpf_jit *jit) 364 { 365 return jit->size == 0; 366 } 367 368 /* 369 * Return whether this is the code generation pass. The code generation pass is 370 * special, since we should change as little as possible. 371 */ 372 static bool is_codegen_pass(struct bpf_jit *jit) 373 { 374 return jit->prg_buf; 375 } 376 377 /* 378 * Return whether "rel" can be encoded as a short PC-relative offset 379 */ 380 static bool is_valid_rel(int rel) 381 { 382 return rel >= -65536 && rel <= 65534; 383 } 384 385 /* 386 * Return whether "off" can be reached using a short PC-relative offset 387 */ 388 static bool can_use_rel(struct bpf_jit *jit, int off) 389 { 390 return is_valid_rel(off - jit->prg); 391 } 392 393 /* 394 * Return whether given displacement can be encoded using 395 * Long-Displacement Facility 396 */ 397 static bool is_valid_ldisp(int disp) 398 { 399 return disp >= -524288 && disp <= 524287; 400 } 401 402 /* 403 * Return whether the next 32-bit literal pool entry can be referenced using 404 * Long-Displacement Facility 405 */ 406 static bool can_use_ldisp_for_lit32(struct bpf_jit *jit) 407 { 408 return is_valid_ldisp(jit->lit32 - jit->base_ip); 409 } 410 411 /* 412 * Return whether the next 64-bit literal pool entry can be referenced using 413 * Long-Displacement Facility 414 */ 415 static bool can_use_ldisp_for_lit64(struct bpf_jit *jit) 416 { 417 return is_valid_ldisp(jit->lit64 - jit->base_ip); 418 } 419 420 /* 421 * Fill whole space with illegal instructions 422 */ 423 static void jit_fill_hole(void *area, unsigned int size) 424 { 425 memset(area, 0, size); 426 } 427 428 /* 429 * Caller-allocated part of the frame. 430 * Thanks to packed stack, its otherwise unused initial part can be used for 431 * the BPF stack and for the next frame. 432 */ 433 struct prog_frame { 434 u64 unused[8]; 435 /* BPF stack starts here and grows towards 0 */ 436 u32 tail_call_cnt; 437 u32 pad; 438 u64 r6[10]; /* r6 - r15 */ 439 u64 backchain; 440 } __packed; 441 442 /* 443 * Save registers from "rs" (register start) to "re" (register end) on stack 444 */ 445 static void save_regs(struct bpf_jit *jit, u32 rs, u32 re) 446 { 447 u32 off = offsetof(struct prog_frame, r6) + (rs - 6) * 8; 448 449 if (rs == re) 450 /* stg %rs,off(%r15) */ 451 _EMIT6(0xe300f000 | rs << 20 | off, 0x0024); 452 else 453 /* stmg %rs,%re,off(%r15) */ 454 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off); 455 } 456 457 /* 458 * Restore registers from "rs" (register start) to "re" (register end) on stack 459 */ 460 static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re) 461 { 462 u32 off = jit->frame_off + offsetof(struct prog_frame, r6) + (rs - 6) * 8; 463 464 if (rs == re) 465 /* lg %rs,off(%r15) */ 466 _EMIT6(0xe300f000 | rs << 20 | off, 0x0004); 467 else 468 /* lmg %rs,%re,off(%r15) */ 469 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off); 470 } 471 472 /* 473 * Return first seen register (from start) 474 */ 475 static int get_start(u16 seen_regs, int start) 476 { 477 int i; 478 479 for (i = start; i <= 15; i++) { 480 if (seen_regs & (1 << i)) 481 return i; 482 } 483 return 0; 484 } 485 486 /* 487 * Return last seen register (from start) (gap >= 2) 488 */ 489 static int get_end(u16 seen_regs, int start) 490 { 491 int i; 492 493 for (i = start; i < 15; i++) { 494 if (!(seen_regs & (3 << i))) 495 return i - 1; 496 } 497 return (seen_regs & (1 << 15)) ? 15 : 14; 498 } 499 500 #define REGS_SAVE 1 501 #define REGS_RESTORE 0 502 /* 503 * Save and restore clobbered registers (6-15) on stack. 504 * We save/restore registers in chunks with gap >= 2 registers. 505 */ 506 static void save_restore_regs(struct bpf_jit *jit, int op, u16 extra_regs) 507 { 508 u16 seen_regs = jit->seen_regs | extra_regs; 509 const int last = 15, save_restore_size = 6; 510 int re = 6, rs; 511 512 if (is_first_pass(jit)) { 513 /* 514 * We don't know yet which registers are used. Reserve space 515 * conservatively. 516 */ 517 jit->prg += (last - re + 1) * save_restore_size; 518 return; 519 } 520 521 do { 522 rs = get_start(seen_regs, re); 523 if (!rs) 524 break; 525 re = get_end(seen_regs, rs + 1); 526 if (op == REGS_SAVE) 527 save_regs(jit, rs, re); 528 else 529 restore_regs(jit, rs, re); 530 re++; 531 } while (re <= last); 532 } 533 534 static void bpf_skip(struct bpf_jit *jit, int size) 535 { 536 if (size >= 6 && !is_valid_rel(size)) { 537 /* brcl 0xf,size */ 538 EMIT6_PCREL_RILC(0xc0040000, 0xf, size); 539 size -= 6; 540 } else if (size >= 4 && is_valid_rel(size)) { 541 /* brc 0xf,size */ 542 EMIT4_PCREL(0xa7f40000, size); 543 size -= 4; 544 } 545 while (size >= 2) { 546 /* bcr 0,%0 */ 547 _EMIT2(0x0700); 548 size -= 2; 549 } 550 } 551 552 /* 553 * PLT for hotpatchable calls. The calling convention is the same as for the 554 * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered. 555 */ 556 struct bpf_plt { 557 char code[16]; 558 void *ret; 559 void *target; 560 } __packed; 561 extern const struct bpf_plt bpf_plt; 562 asm( 563 ".pushsection .rodata\n" 564 " .balign 8\n" 565 "bpf_plt:\n" 566 " lgrl %r0,bpf_plt_ret\n" 567 " lgrl %r1,bpf_plt_target\n" 568 " br %r1\n" 569 " .balign 8\n" 570 "bpf_plt_ret: .quad 0\n" 571 "bpf_plt_target: .quad 0\n" 572 " .popsection\n" 573 ); 574 575 static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target) 576 { 577 memcpy(plt, &bpf_plt, sizeof(*plt)); 578 plt->ret = ret; 579 /* 580 * (target == NULL) implies that the branch to this PLT entry was 581 * patched and became a no-op. However, some CPU could have jumped 582 * to this PLT entry before patching and may be still executing it. 583 * 584 * Since the intention in this case is to make the PLT entry a no-op, 585 * make the target point to the return label instead of NULL. 586 */ 587 plt->target = target ?: ret; 588 } 589 590 /* 591 * Emit function prologue 592 * 593 * Save registers and create stack frame if necessary. 594 * Stack frame layout is described by struct prog_frame. 595 */ 596 static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp) 597 { 598 BUILD_BUG_ON(sizeof(struct prog_frame) != STACK_FRAME_OVERHEAD); 599 600 /* No-op for hotpatching */ 601 /* brcl 0,prologue_plt */ 602 EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt); 603 jit->prologue_plt_ret = jit->prg; 604 605 if (!bpf_is_subprog(fp)) { 606 /* Initialize the tail call counter in the main program. */ 607 /* xc tail_call_cnt(4,%r15),tail_call_cnt(%r15) */ 608 _EMIT6(0xd703f000 | offsetof(struct prog_frame, tail_call_cnt), 609 0xf000 | offsetof(struct prog_frame, tail_call_cnt)); 610 } else { 611 /* 612 * Skip the tail call counter initialization in subprograms. 613 * Insert nops in order to have tail_call_start at a 614 * predictable offset. 615 */ 616 bpf_skip(jit, 6); 617 } 618 /* Tail calls have to skip above initialization */ 619 jit->tail_call_start = jit->prg; 620 if (fp->aux->exception_cb) { 621 /* 622 * Switch stack, the new address is in the 2nd parameter. 623 * 624 * Arrange the restoration of %r6-%r15 in the epilogue. 625 * Do not restore them now, the prog does not need them. 626 */ 627 /* lgr %r15,%r3 */ 628 EMIT4(0xb9040000, REG_15, REG_3); 629 jit->seen_regs |= NVREGS; 630 } else { 631 /* Save registers */ 632 save_restore_regs(jit, REGS_SAVE, 633 fp->aux->exception_boundary ? NVREGS : 0); 634 } 635 /* Setup literal pool */ 636 if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) { 637 if (!is_first_pass(jit) && 638 is_valid_ldisp(jit->size - (jit->prg + 2))) { 639 /* basr %l,0 */ 640 EMIT2(0x0d00, REG_L, REG_0); 641 jit->base_ip = jit->prg; 642 } else { 643 /* larl %l,lit32_start */ 644 EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start); 645 jit->base_ip = jit->lit32_start; 646 } 647 } 648 /* Setup stack and backchain */ 649 if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) { 650 /* lgr %w1,%r15 (backchain) */ 651 EMIT4(0xb9040000, REG_W1, REG_15); 652 /* la %bfp,unused_end(%r15) (BPF frame pointer) */ 653 EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, 654 offsetofend(struct prog_frame, unused)); 655 /* aghi %r15,-frame_off */ 656 EMIT4_IMM(0xa70b0000, REG_15, -jit->frame_off); 657 /* stg %w1,backchain(%r15) */ 658 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, 659 REG_15, 660 offsetof(struct prog_frame, backchain)); 661 } 662 } 663 664 /* 665 * Jump using a register either directly or via an expoline thunk 666 */ 667 #define EMIT_JUMP_REG(reg) do { \ 668 if (nospec_uses_trampoline()) \ 669 /* brcl 0xf,__s390_indirect_jump_rN */ \ 670 EMIT6_PCREL_RILC_PTR(0xc0040000, 0x0f, \ 671 __s390_indirect_jump_r ## reg); \ 672 else \ 673 /* br %rN */ \ 674 _EMIT2(0x07f0 | reg); \ 675 } while (0) 676 677 /* 678 * Call r1 either directly or via __s390_indirect_jump_r1 thunk 679 */ 680 static void call_r1(struct bpf_jit *jit) 681 { 682 if (nospec_uses_trampoline()) 683 /* brasl %r14,__s390_indirect_jump_r1 */ 684 EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14, 685 __s390_indirect_jump_r1); 686 else 687 /* basr %r14,%r1 */ 688 EMIT2(0x0d00, REG_14, REG_1); 689 } 690 691 /* 692 * Function epilogue 693 */ 694 static void bpf_jit_epilogue(struct bpf_jit *jit) 695 { 696 jit->exit_ip = jit->prg; 697 /* Load exit code: lgr %r2,%b0 */ 698 EMIT4(0xb9040000, REG_2, BPF_REG_0); 699 /* Restore registers */ 700 save_restore_regs(jit, REGS_RESTORE, 0); 701 EMIT_JUMP_REG(14); 702 703 jit->prg = ALIGN(jit->prg, 8); 704 jit->prologue_plt = jit->prg; 705 if (jit->prg_buf) 706 bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg), 707 jit->prg_buf + jit->prologue_plt_ret, NULL); 708 jit->prg += sizeof(struct bpf_plt); 709 } 710 711 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 712 { 713 regs->psw.addr = extable_fixup(x); 714 if (x->data != -1) 715 regs->gprs[x->data] = 0; 716 return true; 717 } 718 719 /* 720 * A single BPF probe instruction 721 */ 722 struct bpf_jit_probe { 723 int prg; /* JITed instruction offset */ 724 int nop_prg; /* JITed nop offset */ 725 int reg; /* Register to clear on exception */ 726 int arena_reg; /* Register to use for arena addressing */ 727 }; 728 729 static void bpf_jit_probe_init(struct bpf_jit_probe *probe) 730 { 731 probe->prg = -1; 732 probe->nop_prg = -1; 733 probe->reg = -1; 734 probe->arena_reg = REG_0; 735 } 736 737 /* 738 * Handlers of certain exceptions leave psw.addr pointing to the instruction 739 * directly after the failing one. Therefore, create two exception table 740 * entries and also add a nop in case two probing instructions come directly 741 * after each other. 742 */ 743 static void bpf_jit_probe_emit_nop(struct bpf_jit *jit, 744 struct bpf_jit_probe *probe) 745 { 746 if (probe->prg == -1 || probe->nop_prg != -1) 747 /* The probe is not armed or nop is already emitted. */ 748 return; 749 750 probe->nop_prg = jit->prg; 751 /* bcr 0,%0 */ 752 _EMIT2(0x0700); 753 } 754 755 static void bpf_jit_probe_load_pre(struct bpf_jit *jit, struct bpf_insn *insn, 756 struct bpf_jit_probe *probe) 757 { 758 if (BPF_MODE(insn->code) != BPF_PROBE_MEM && 759 BPF_MODE(insn->code) != BPF_PROBE_MEMSX && 760 BPF_MODE(insn->code) != BPF_PROBE_MEM32) 761 return; 762 763 if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { 764 /* lgrl %r1,kern_arena */ 765 EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); 766 probe->arena_reg = REG_W1; 767 } 768 probe->prg = jit->prg; 769 probe->reg = reg2hex[insn->dst_reg]; 770 } 771 772 static void bpf_jit_probe_store_pre(struct bpf_jit *jit, struct bpf_insn *insn, 773 struct bpf_jit_probe *probe) 774 { 775 if (BPF_MODE(insn->code) != BPF_PROBE_MEM32) 776 return; 777 778 /* lgrl %r1,kern_arena */ 779 EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); 780 probe->arena_reg = REG_W1; 781 probe->prg = jit->prg; 782 } 783 784 static void bpf_jit_probe_atomic_pre(struct bpf_jit *jit, 785 struct bpf_insn *insn, 786 struct bpf_jit_probe *probe) 787 { 788 if (BPF_MODE(insn->code) != BPF_PROBE_ATOMIC) 789 return; 790 791 /* lgrl %r1,kern_arena */ 792 EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); 793 /* agr %r1,%dst */ 794 EMIT4(0xb9080000, REG_W1, insn->dst_reg); 795 probe->arena_reg = REG_W1; 796 probe->prg = jit->prg; 797 } 798 799 static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp, 800 struct bpf_jit_probe *probe) 801 { 802 struct exception_table_entry *ex; 803 int i, prg; 804 s64 delta; 805 u8 *insn; 806 807 if (probe->prg == -1) 808 /* The probe is not armed. */ 809 return 0; 810 bpf_jit_probe_emit_nop(jit, probe); 811 if (!fp->aux->extable) 812 /* Do nothing during early JIT passes. */ 813 return 0; 814 insn = jit->prg_buf + probe->prg; 815 if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg)) 816 /* JIT bug - gap between probe and nop instructions. */ 817 return -1; 818 for (i = 0; i < 2; i++) { 819 if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries)) 820 /* Verifier bug - not enough entries. */ 821 return -1; 822 ex = &fp->aux->extable[jit->excnt]; 823 /* Add extable entries for probe and nop instructions. */ 824 prg = i == 0 ? probe->prg : probe->nop_prg; 825 delta = jit->prg_buf + prg - (u8 *)&ex->insn; 826 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) 827 /* JIT bug - code and extable must be close. */ 828 return -1; 829 ex->insn = delta; 830 /* 831 * Land on the current instruction. Note that the extable 832 * infrastructure ignores the fixup field; it is handled by 833 * ex_handler_bpf(). 834 */ 835 delta = jit->prg_buf + jit->prg - (u8 *)&ex->fixup; 836 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) 837 /* JIT bug - landing pad and extable must be close. */ 838 return -1; 839 ex->fixup = delta; 840 ex->type = EX_TYPE_BPF; 841 ex->data = probe->reg; 842 jit->excnt++; 843 } 844 return 0; 845 } 846 847 /* 848 * Sign-extend the register if necessary 849 */ 850 static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags) 851 { 852 if (!(flags & BTF_FMODEL_SIGNED_ARG)) 853 return 0; 854 855 switch (size) { 856 case 1: 857 /* lgbr %r,%r */ 858 EMIT4(0xb9060000, r, r); 859 return 0; 860 case 2: 861 /* lghr %r,%r */ 862 EMIT4(0xb9070000, r, r); 863 return 0; 864 case 4: 865 /* lgfr %r,%r */ 866 EMIT4(0xb9140000, r, r); 867 return 0; 868 case 8: 869 return 0; 870 default: 871 return -1; 872 } 873 } 874 875 /* 876 * Compile one eBPF instruction into s390x code 877 * 878 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of 879 * stack space for the large switch statement. 880 */ 881 static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, 882 int i, bool extra_pass) 883 { 884 struct bpf_insn *insn = &fp->insnsi[i]; 885 s32 branch_oc_off = insn->off; 886 u32 dst_reg = insn->dst_reg; 887 u32 src_reg = insn->src_reg; 888 struct bpf_jit_probe probe; 889 int last, insn_count = 1; 890 u32 *addrs = jit->addrs; 891 s32 imm = insn->imm; 892 s16 off = insn->off; 893 unsigned int mask; 894 int err; 895 896 bpf_jit_probe_init(&probe); 897 898 switch (insn->code) { 899 /* 900 * BPF_MOV 901 */ 902 case BPF_ALU | BPF_MOV | BPF_X: 903 switch (insn->off) { 904 case 0: /* DST = (u32) SRC */ 905 /* llgfr %dst,%src */ 906 EMIT4(0xb9160000, dst_reg, src_reg); 907 if (insn_is_zext(&insn[1])) 908 insn_count = 2; 909 break; 910 case 8: /* DST = (u32)(s8) SRC */ 911 /* lbr %dst,%src */ 912 EMIT4(0xb9260000, dst_reg, src_reg); 913 /* llgfr %dst,%dst */ 914 EMIT4(0xb9160000, dst_reg, dst_reg); 915 break; 916 case 16: /* DST = (u32)(s16) SRC */ 917 /* lhr %dst,%src */ 918 EMIT4(0xb9270000, dst_reg, src_reg); 919 /* llgfr %dst,%dst */ 920 EMIT4(0xb9160000, dst_reg, dst_reg); 921 break; 922 } 923 break; 924 case BPF_ALU64 | BPF_MOV | BPF_X: 925 if (insn_is_cast_user(insn)) { 926 int patch_brc; 927 928 /* ltgr %dst,%src */ 929 EMIT4(0xb9020000, dst_reg, src_reg); 930 /* brc 8,0f */ 931 patch_brc = jit->prg; 932 EMIT4_PCREL_RIC(0xa7040000, 8, 0); 933 /* iihf %dst,user_arena>>32 */ 934 EMIT6_IMM(0xc0080000, dst_reg, jit->user_arena >> 32); 935 /* 0: */ 936 if (jit->prg_buf) 937 *(u16 *)(jit->prg_buf + patch_brc + 2) = 938 (jit->prg - patch_brc) >> 1; 939 break; 940 } 941 switch (insn->off) { 942 case 0: /* DST = SRC */ 943 /* lgr %dst,%src */ 944 EMIT4(0xb9040000, dst_reg, src_reg); 945 break; 946 case 8: /* DST = (s8) SRC */ 947 /* lgbr %dst,%src */ 948 EMIT4(0xb9060000, dst_reg, src_reg); 949 break; 950 case 16: /* DST = (s16) SRC */ 951 /* lghr %dst,%src */ 952 EMIT4(0xb9070000, dst_reg, src_reg); 953 break; 954 case 32: /* DST = (s32) SRC */ 955 /* lgfr %dst,%src */ 956 EMIT4(0xb9140000, dst_reg, src_reg); 957 break; 958 } 959 break; 960 case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */ 961 /* llilf %dst,imm */ 962 EMIT6_IMM(0xc00f0000, dst_reg, imm); 963 if (insn_is_zext(&insn[1])) 964 insn_count = 2; 965 break; 966 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */ 967 /* lgfi %dst,imm */ 968 EMIT6_IMM(0xc0010000, dst_reg, imm); 969 break; 970 /* 971 * BPF_LD 64 972 */ 973 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ 974 { 975 /* 16 byte instruction that uses two 'struct bpf_insn' */ 976 u64 imm64; 977 978 imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32; 979 /* lgrl %dst,imm */ 980 EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64)); 981 insn_count = 2; 982 break; 983 } 984 /* 985 * BPF_ADD 986 */ 987 case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */ 988 /* ar %dst,%src */ 989 EMIT2(0x1a00, dst_reg, src_reg); 990 EMIT_ZERO(dst_reg); 991 break; 992 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */ 993 /* agr %dst,%src */ 994 EMIT4(0xb9080000, dst_reg, src_reg); 995 break; 996 case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */ 997 if (imm != 0) { 998 /* alfi %dst,imm */ 999 EMIT6_IMM(0xc20b0000, dst_reg, imm); 1000 } 1001 EMIT_ZERO(dst_reg); 1002 break; 1003 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */ 1004 if (!imm) 1005 break; 1006 /* agfi %dst,imm */ 1007 EMIT6_IMM(0xc2080000, dst_reg, imm); 1008 break; 1009 /* 1010 * BPF_SUB 1011 */ 1012 case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */ 1013 /* sr %dst,%src */ 1014 EMIT2(0x1b00, dst_reg, src_reg); 1015 EMIT_ZERO(dst_reg); 1016 break; 1017 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */ 1018 /* sgr %dst,%src */ 1019 EMIT4(0xb9090000, dst_reg, src_reg); 1020 break; 1021 case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */ 1022 if (imm != 0) { 1023 /* alfi %dst,-imm */ 1024 EMIT6_IMM(0xc20b0000, dst_reg, -imm); 1025 } 1026 EMIT_ZERO(dst_reg); 1027 break; 1028 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */ 1029 if (!imm) 1030 break; 1031 if (imm == -0x80000000) { 1032 /* algfi %dst,0x80000000 */ 1033 EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000); 1034 } else { 1035 /* agfi %dst,-imm */ 1036 EMIT6_IMM(0xc2080000, dst_reg, -imm); 1037 } 1038 break; 1039 /* 1040 * BPF_MUL 1041 */ 1042 case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */ 1043 /* msr %dst,%src */ 1044 EMIT4(0xb2520000, dst_reg, src_reg); 1045 EMIT_ZERO(dst_reg); 1046 break; 1047 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */ 1048 /* msgr %dst,%src */ 1049 EMIT4(0xb90c0000, dst_reg, src_reg); 1050 break; 1051 case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */ 1052 if (imm != 1) { 1053 /* msfi %r5,imm */ 1054 EMIT6_IMM(0xc2010000, dst_reg, imm); 1055 } 1056 EMIT_ZERO(dst_reg); 1057 break; 1058 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */ 1059 if (imm == 1) 1060 break; 1061 /* msgfi %dst,imm */ 1062 EMIT6_IMM(0xc2000000, dst_reg, imm); 1063 break; 1064 /* 1065 * BPF_DIV / BPF_MOD 1066 */ 1067 case BPF_ALU | BPF_DIV | BPF_X: 1068 case BPF_ALU | BPF_MOD | BPF_X: 1069 { 1070 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 1071 1072 switch (off) { 1073 case 0: /* dst = (u32) dst {/,%} (u32) src */ 1074 /* xr %w0,%w0 */ 1075 EMIT2(0x1700, REG_W0, REG_W0); 1076 /* lr %w1,%dst */ 1077 EMIT2(0x1800, REG_W1, dst_reg); 1078 /* dlr %w0,%src */ 1079 EMIT4(0xb9970000, REG_W0, src_reg); 1080 break; 1081 case 1: /* dst = (u32) ((s32) dst {/,%} (s32) src) */ 1082 /* lgfr %r1,%dst */ 1083 EMIT4(0xb9140000, REG_W1, dst_reg); 1084 /* dsgfr %r0,%src */ 1085 EMIT4(0xb91d0000, REG_W0, src_reg); 1086 break; 1087 } 1088 /* llgfr %dst,%rc */ 1089 EMIT4(0xb9160000, dst_reg, rc_reg); 1090 if (insn_is_zext(&insn[1])) 1091 insn_count = 2; 1092 break; 1093 } 1094 case BPF_ALU64 | BPF_DIV | BPF_X: 1095 case BPF_ALU64 | BPF_MOD | BPF_X: 1096 { 1097 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 1098 1099 switch (off) { 1100 case 0: /* dst = dst {/,%} src */ 1101 /* lghi %w0,0 */ 1102 EMIT4_IMM(0xa7090000, REG_W0, 0); 1103 /* lgr %w1,%dst */ 1104 EMIT4(0xb9040000, REG_W1, dst_reg); 1105 /* dlgr %w0,%src */ 1106 EMIT4(0xb9870000, REG_W0, src_reg); 1107 break; 1108 case 1: /* dst = (s64) dst {/,%} (s64) src */ 1109 /* lgr %w1,%dst */ 1110 EMIT4(0xb9040000, REG_W1, dst_reg); 1111 /* dsgr %w0,%src */ 1112 EMIT4(0xb90d0000, REG_W0, src_reg); 1113 break; 1114 } 1115 /* lgr %dst,%rc */ 1116 EMIT4(0xb9040000, dst_reg, rc_reg); 1117 break; 1118 } 1119 case BPF_ALU | BPF_DIV | BPF_K: 1120 case BPF_ALU | BPF_MOD | BPF_K: 1121 { 1122 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 1123 1124 if (imm == 1) { 1125 if (BPF_OP(insn->code) == BPF_MOD) 1126 /* lghi %dst,0 */ 1127 EMIT4_IMM(0xa7090000, dst_reg, 0); 1128 else 1129 EMIT_ZERO(dst_reg); 1130 break; 1131 } 1132 if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) { 1133 switch (off) { 1134 case 0: /* dst = (u32) dst {/,%} (u32) imm */ 1135 /* xr %w0,%w0 */ 1136 EMIT2(0x1700, REG_W0, REG_W0); 1137 /* lr %w1,%dst */ 1138 EMIT2(0x1800, REG_W1, dst_reg); 1139 /* dl %w0,<d(imm)>(%l) */ 1140 EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, 1141 REG_L, EMIT_CONST_U32(imm)); 1142 break; 1143 case 1: /* dst = (s32) dst {/,%} (s32) imm */ 1144 /* lgfr %r1,%dst */ 1145 EMIT4(0xb9140000, REG_W1, dst_reg); 1146 /* dsgf %r0,<d(imm)>(%l) */ 1147 EMIT6_DISP_LH(0xe3000000, 0x001d, REG_W0, REG_0, 1148 REG_L, EMIT_CONST_U32(imm)); 1149 break; 1150 } 1151 } else { 1152 switch (off) { 1153 case 0: /* dst = (u32) dst {/,%} (u32) imm */ 1154 /* xr %w0,%w0 */ 1155 EMIT2(0x1700, REG_W0, REG_W0); 1156 /* lr %w1,%dst */ 1157 EMIT2(0x1800, REG_W1, dst_reg); 1158 /* lrl %dst,imm */ 1159 EMIT6_PCREL_RILB(0xc40d0000, dst_reg, 1160 _EMIT_CONST_U32(imm)); 1161 jit->seen |= SEEN_LITERAL; 1162 /* dlr %w0,%dst */ 1163 EMIT4(0xb9970000, REG_W0, dst_reg); 1164 break; 1165 case 1: /* dst = (s32) dst {/,%} (s32) imm */ 1166 /* lgfr %w1,%dst */ 1167 EMIT4(0xb9140000, REG_W1, dst_reg); 1168 /* lgfrl %dst,imm */ 1169 EMIT6_PCREL_RILB(0xc40c0000, dst_reg, 1170 _EMIT_CONST_U32(imm)); 1171 jit->seen |= SEEN_LITERAL; 1172 /* dsgr %w0,%dst */ 1173 EMIT4(0xb90d0000, REG_W0, dst_reg); 1174 break; 1175 } 1176 } 1177 /* llgfr %dst,%rc */ 1178 EMIT4(0xb9160000, dst_reg, rc_reg); 1179 if (insn_is_zext(&insn[1])) 1180 insn_count = 2; 1181 break; 1182 } 1183 case BPF_ALU64 | BPF_DIV | BPF_K: 1184 case BPF_ALU64 | BPF_MOD | BPF_K: 1185 { 1186 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 1187 1188 if (imm == 1) { 1189 if (BPF_OP(insn->code) == BPF_MOD) 1190 /* lhgi %dst,0 */ 1191 EMIT4_IMM(0xa7090000, dst_reg, 0); 1192 break; 1193 } 1194 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { 1195 switch (off) { 1196 case 0: /* dst = dst {/,%} imm */ 1197 /* lghi %w0,0 */ 1198 EMIT4_IMM(0xa7090000, REG_W0, 0); 1199 /* lgr %w1,%dst */ 1200 EMIT4(0xb9040000, REG_W1, dst_reg); 1201 /* dlg %w0,<d(imm)>(%l) */ 1202 EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, 1203 REG_L, EMIT_CONST_U64(imm)); 1204 break; 1205 case 1: /* dst = (s64) dst {/,%} (s64) imm */ 1206 /* lgr %w1,%dst */ 1207 EMIT4(0xb9040000, REG_W1, dst_reg); 1208 /* dsg %w0,<d(imm)>(%l) */ 1209 EMIT6_DISP_LH(0xe3000000, 0x000d, REG_W0, REG_0, 1210 REG_L, EMIT_CONST_U64(imm)); 1211 break; 1212 } 1213 } else { 1214 switch (off) { 1215 case 0: /* dst = dst {/,%} imm */ 1216 /* lghi %w0,0 */ 1217 EMIT4_IMM(0xa7090000, REG_W0, 0); 1218 /* lgr %w1,%dst */ 1219 EMIT4(0xb9040000, REG_W1, dst_reg); 1220 /* lgrl %dst,imm */ 1221 EMIT6_PCREL_RILB(0xc4080000, dst_reg, 1222 _EMIT_CONST_U64(imm)); 1223 jit->seen |= SEEN_LITERAL; 1224 /* dlgr %w0,%dst */ 1225 EMIT4(0xb9870000, REG_W0, dst_reg); 1226 break; 1227 case 1: /* dst = (s64) dst {/,%} (s64) imm */ 1228 /* lgr %w1,%dst */ 1229 EMIT4(0xb9040000, REG_W1, dst_reg); 1230 /* lgrl %dst,imm */ 1231 EMIT6_PCREL_RILB(0xc4080000, dst_reg, 1232 _EMIT_CONST_U64(imm)); 1233 jit->seen |= SEEN_LITERAL; 1234 /* dsgr %w0,%dst */ 1235 EMIT4(0xb90d0000, REG_W0, dst_reg); 1236 break; 1237 } 1238 } 1239 /* lgr %dst,%rc */ 1240 EMIT4(0xb9040000, dst_reg, rc_reg); 1241 break; 1242 } 1243 /* 1244 * BPF_AND 1245 */ 1246 case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */ 1247 /* nr %dst,%src */ 1248 EMIT2(0x1400, dst_reg, src_reg); 1249 EMIT_ZERO(dst_reg); 1250 break; 1251 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ 1252 /* ngr %dst,%src */ 1253 EMIT4(0xb9800000, dst_reg, src_reg); 1254 break; 1255 case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */ 1256 /* nilf %dst,imm */ 1257 EMIT6_IMM(0xc00b0000, dst_reg, imm); 1258 EMIT_ZERO(dst_reg); 1259 break; 1260 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ 1261 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { 1262 /* ng %dst,<d(imm)>(%l) */ 1263 EMIT6_DISP_LH(0xe3000000, 0x0080, 1264 dst_reg, REG_0, REG_L, 1265 EMIT_CONST_U64(imm)); 1266 } else { 1267 /* lgrl %w0,imm */ 1268 EMIT6_PCREL_RILB(0xc4080000, REG_W0, 1269 _EMIT_CONST_U64(imm)); 1270 jit->seen |= SEEN_LITERAL; 1271 /* ngr %dst,%w0 */ 1272 EMIT4(0xb9800000, dst_reg, REG_W0); 1273 } 1274 break; 1275 /* 1276 * BPF_OR 1277 */ 1278 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ 1279 /* or %dst,%src */ 1280 EMIT2(0x1600, dst_reg, src_reg); 1281 EMIT_ZERO(dst_reg); 1282 break; 1283 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ 1284 /* ogr %dst,%src */ 1285 EMIT4(0xb9810000, dst_reg, src_reg); 1286 break; 1287 case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */ 1288 /* oilf %dst,imm */ 1289 EMIT6_IMM(0xc00d0000, dst_reg, imm); 1290 EMIT_ZERO(dst_reg); 1291 break; 1292 case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */ 1293 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { 1294 /* og %dst,<d(imm)>(%l) */ 1295 EMIT6_DISP_LH(0xe3000000, 0x0081, 1296 dst_reg, REG_0, REG_L, 1297 EMIT_CONST_U64(imm)); 1298 } else { 1299 /* lgrl %w0,imm */ 1300 EMIT6_PCREL_RILB(0xc4080000, REG_W0, 1301 _EMIT_CONST_U64(imm)); 1302 jit->seen |= SEEN_LITERAL; 1303 /* ogr %dst,%w0 */ 1304 EMIT4(0xb9810000, dst_reg, REG_W0); 1305 } 1306 break; 1307 /* 1308 * BPF_XOR 1309 */ 1310 case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */ 1311 /* xr %dst,%src */ 1312 EMIT2(0x1700, dst_reg, src_reg); 1313 EMIT_ZERO(dst_reg); 1314 break; 1315 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */ 1316 /* xgr %dst,%src */ 1317 EMIT4(0xb9820000, dst_reg, src_reg); 1318 break; 1319 case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */ 1320 if (imm != 0) { 1321 /* xilf %dst,imm */ 1322 EMIT6_IMM(0xc0070000, dst_reg, imm); 1323 } 1324 EMIT_ZERO(dst_reg); 1325 break; 1326 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */ 1327 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { 1328 /* xg %dst,<d(imm)>(%l) */ 1329 EMIT6_DISP_LH(0xe3000000, 0x0082, 1330 dst_reg, REG_0, REG_L, 1331 EMIT_CONST_U64(imm)); 1332 } else { 1333 /* lgrl %w0,imm */ 1334 EMIT6_PCREL_RILB(0xc4080000, REG_W0, 1335 _EMIT_CONST_U64(imm)); 1336 jit->seen |= SEEN_LITERAL; 1337 /* xgr %dst,%w0 */ 1338 EMIT4(0xb9820000, dst_reg, REG_W0); 1339 } 1340 break; 1341 /* 1342 * BPF_LSH 1343 */ 1344 case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */ 1345 /* sll %dst,0(%src) */ 1346 EMIT4_DISP(0x89000000, dst_reg, src_reg, 0); 1347 EMIT_ZERO(dst_reg); 1348 break; 1349 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */ 1350 /* sllg %dst,%dst,0(%src) */ 1351 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0); 1352 break; 1353 case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */ 1354 if (imm != 0) { 1355 /* sll %dst,imm(%r0) */ 1356 EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); 1357 } 1358 EMIT_ZERO(dst_reg); 1359 break; 1360 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */ 1361 if (imm == 0) 1362 break; 1363 /* sllg %dst,%dst,imm(%r0) */ 1364 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm); 1365 break; 1366 /* 1367 * BPF_RSH 1368 */ 1369 case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */ 1370 /* srl %dst,0(%src) */ 1371 EMIT4_DISP(0x88000000, dst_reg, src_reg, 0); 1372 EMIT_ZERO(dst_reg); 1373 break; 1374 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */ 1375 /* srlg %dst,%dst,0(%src) */ 1376 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0); 1377 break; 1378 case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */ 1379 if (imm != 0) { 1380 /* srl %dst,imm(%r0) */ 1381 EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); 1382 } 1383 EMIT_ZERO(dst_reg); 1384 break; 1385 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */ 1386 if (imm == 0) 1387 break; 1388 /* srlg %dst,%dst,imm(%r0) */ 1389 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm); 1390 break; 1391 /* 1392 * BPF_ARSH 1393 */ 1394 case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */ 1395 /* sra %dst,%dst,0(%src) */ 1396 EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0); 1397 EMIT_ZERO(dst_reg); 1398 break; 1399 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */ 1400 /* srag %dst,%dst,0(%src) */ 1401 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0); 1402 break; 1403 case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */ 1404 if (imm != 0) { 1405 /* sra %dst,imm(%r0) */ 1406 EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm); 1407 } 1408 EMIT_ZERO(dst_reg); 1409 break; 1410 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */ 1411 if (imm == 0) 1412 break; 1413 /* srag %dst,%dst,imm(%r0) */ 1414 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm); 1415 break; 1416 /* 1417 * BPF_NEG 1418 */ 1419 case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */ 1420 /* lcr %dst,%dst */ 1421 EMIT2(0x1300, dst_reg, dst_reg); 1422 EMIT_ZERO(dst_reg); 1423 break; 1424 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 1425 /* lcgr %dst,%dst */ 1426 EMIT4(0xb9030000, dst_reg, dst_reg); 1427 break; 1428 /* 1429 * BPF_FROM_BE/LE 1430 */ 1431 case BPF_ALU | BPF_END | BPF_FROM_BE: 1432 /* s390 is big endian, therefore only clear high order bytes */ 1433 switch (imm) { 1434 case 16: /* dst = (u16) cpu_to_be16(dst) */ 1435 /* llghr %dst,%dst */ 1436 EMIT4(0xb9850000, dst_reg, dst_reg); 1437 if (insn_is_zext(&insn[1])) 1438 insn_count = 2; 1439 break; 1440 case 32: /* dst = (u32) cpu_to_be32(dst) */ 1441 if (!fp->aux->verifier_zext) 1442 /* llgfr %dst,%dst */ 1443 EMIT4(0xb9160000, dst_reg, dst_reg); 1444 break; 1445 case 64: /* dst = (u64) cpu_to_be64(dst) */ 1446 break; 1447 } 1448 break; 1449 case BPF_ALU | BPF_END | BPF_FROM_LE: 1450 case BPF_ALU64 | BPF_END | BPF_FROM_LE: 1451 switch (imm) { 1452 case 16: /* dst = (u16) cpu_to_le16(dst) */ 1453 /* lrvr %dst,%dst */ 1454 EMIT4(0xb91f0000, dst_reg, dst_reg); 1455 /* srl %dst,16(%r0) */ 1456 EMIT4_DISP(0x88000000, dst_reg, REG_0, 16); 1457 /* llghr %dst,%dst */ 1458 EMIT4(0xb9850000, dst_reg, dst_reg); 1459 if (insn_is_zext(&insn[1])) 1460 insn_count = 2; 1461 break; 1462 case 32: /* dst = (u32) cpu_to_le32(dst) */ 1463 /* lrvr %dst,%dst */ 1464 EMIT4(0xb91f0000, dst_reg, dst_reg); 1465 if (!fp->aux->verifier_zext) 1466 /* llgfr %dst,%dst */ 1467 EMIT4(0xb9160000, dst_reg, dst_reg); 1468 break; 1469 case 64: /* dst = (u64) cpu_to_le64(dst) */ 1470 /* lrvgr %dst,%dst */ 1471 EMIT4(0xb90f0000, dst_reg, dst_reg); 1472 break; 1473 } 1474 break; 1475 /* 1476 * BPF_NOSPEC (speculation barrier) 1477 */ 1478 case BPF_ST | BPF_NOSPEC: 1479 break; 1480 /* 1481 * BPF_ST(X) 1482 */ 1483 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */ 1484 case BPF_STX | BPF_PROBE_MEM32 | BPF_B: 1485 bpf_jit_probe_store_pre(jit, insn, &probe); 1486 /* stcy %src,off(%dst,%arena) */ 1487 EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, 1488 probe.arena_reg, off); 1489 err = bpf_jit_probe_post(jit, fp, &probe); 1490 if (err < 0) 1491 return err; 1492 jit->seen |= SEEN_MEM; 1493 break; 1494 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ 1495 case BPF_STX | BPF_PROBE_MEM32 | BPF_H: 1496 bpf_jit_probe_store_pre(jit, insn, &probe); 1497 /* sthy %src,off(%dst,%arena) */ 1498 EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, 1499 probe.arena_reg, off); 1500 err = bpf_jit_probe_post(jit, fp, &probe); 1501 if (err < 0) 1502 return err; 1503 jit->seen |= SEEN_MEM; 1504 break; 1505 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ 1506 case BPF_STX | BPF_PROBE_MEM32 | BPF_W: 1507 bpf_jit_probe_store_pre(jit, insn, &probe); 1508 /* sty %src,off(%dst,%arena) */ 1509 EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, 1510 probe.arena_reg, off); 1511 err = bpf_jit_probe_post(jit, fp, &probe); 1512 if (err < 0) 1513 return err; 1514 jit->seen |= SEEN_MEM; 1515 break; 1516 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ 1517 case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: 1518 bpf_jit_probe_store_pre(jit, insn, &probe); 1519 /* stg %src,off(%dst,%arena) */ 1520 EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, 1521 probe.arena_reg, off); 1522 err = bpf_jit_probe_post(jit, fp, &probe); 1523 if (err < 0) 1524 return err; 1525 jit->seen |= SEEN_MEM; 1526 break; 1527 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ 1528 case BPF_ST | BPF_PROBE_MEM32 | BPF_B: 1529 /* lhi %w0,imm */ 1530 EMIT4_IMM(0xa7080000, REG_W0, (u8) imm); 1531 bpf_jit_probe_store_pre(jit, insn, &probe); 1532 /* stcy %w0,off(%dst,%arena) */ 1533 EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, 1534 probe.arena_reg, off); 1535 err = bpf_jit_probe_post(jit, fp, &probe); 1536 if (err < 0) 1537 return err; 1538 jit->seen |= SEEN_MEM; 1539 break; 1540 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ 1541 case BPF_ST | BPF_PROBE_MEM32 | BPF_H: 1542 /* lhi %w0,imm */ 1543 EMIT4_IMM(0xa7080000, REG_W0, (u16) imm); 1544 bpf_jit_probe_store_pre(jit, insn, &probe); 1545 /* sthy %w0,off(%dst,%arena) */ 1546 EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, 1547 probe.arena_reg, off); 1548 err = bpf_jit_probe_post(jit, fp, &probe); 1549 if (err < 0) 1550 return err; 1551 jit->seen |= SEEN_MEM; 1552 break; 1553 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ 1554 case BPF_ST | BPF_PROBE_MEM32 | BPF_W: 1555 /* llilf %w0,imm */ 1556 EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm); 1557 bpf_jit_probe_store_pre(jit, insn, &probe); 1558 /* sty %w0,off(%dst,%arena) */ 1559 EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, 1560 probe.arena_reg, off); 1561 err = bpf_jit_probe_post(jit, fp, &probe); 1562 if (err < 0) 1563 return err; 1564 jit->seen |= SEEN_MEM; 1565 break; 1566 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ 1567 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: 1568 /* lgfi %w0,imm */ 1569 EMIT6_IMM(0xc0010000, REG_W0, imm); 1570 bpf_jit_probe_store_pre(jit, insn, &probe); 1571 /* stg %w0,off(%dst,%arena) */ 1572 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, 1573 probe.arena_reg, off); 1574 err = bpf_jit_probe_post(jit, fp, &probe); 1575 if (err < 0) 1576 return err; 1577 jit->seen |= SEEN_MEM; 1578 break; 1579 /* 1580 * BPF_ATOMIC 1581 */ 1582 case BPF_STX | BPF_ATOMIC | BPF_DW: 1583 case BPF_STX | BPF_ATOMIC | BPF_W: 1584 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: 1585 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: 1586 { 1587 bool is32 = BPF_SIZE(insn->code) == BPF_W; 1588 1589 /* 1590 * Unlike loads and stores, atomics have only a base register, 1591 * but no index register. For the non-arena case, simply use 1592 * %dst as a base. For the arena case, use the work register 1593 * %r1: first, load the arena base into it, and then add %dst 1594 * to it. 1595 */ 1596 probe.arena_reg = dst_reg; 1597 1598 switch (insn->imm) { 1599 #define EMIT_ATOMIC(op32, op64) do { \ 1600 bpf_jit_probe_atomic_pre(jit, insn, &probe); \ 1601 /* {op32|op64} {%w0|%src},%src,off(%arena) */ \ 1602 EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \ 1603 (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \ 1604 src_reg, probe.arena_reg, off); \ 1605 err = bpf_jit_probe_post(jit, fp, &probe); \ 1606 if (err < 0) \ 1607 return err; \ 1608 if (insn->imm & BPF_FETCH) { \ 1609 /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \ 1610 _EMIT2(0x07e0); \ 1611 if (is32) \ 1612 EMIT_ZERO(src_reg); \ 1613 } \ 1614 } while (0) 1615 case BPF_ADD: 1616 case BPF_ADD | BPF_FETCH: 1617 /* {laal|laalg} */ 1618 EMIT_ATOMIC(0x00fa, 0x00ea); 1619 break; 1620 case BPF_AND: 1621 case BPF_AND | BPF_FETCH: 1622 /* {lan|lang} */ 1623 EMIT_ATOMIC(0x00f4, 0x00e4); 1624 break; 1625 case BPF_OR: 1626 case BPF_OR | BPF_FETCH: 1627 /* {lao|laog} */ 1628 EMIT_ATOMIC(0x00f6, 0x00e6); 1629 break; 1630 case BPF_XOR: 1631 case BPF_XOR | BPF_FETCH: 1632 /* {lax|laxg} */ 1633 EMIT_ATOMIC(0x00f7, 0x00e7); 1634 break; 1635 #undef EMIT_ATOMIC 1636 case BPF_XCHG: { 1637 struct bpf_jit_probe load_probe = probe; 1638 int loop_start; 1639 1640 bpf_jit_probe_atomic_pre(jit, insn, &load_probe); 1641 /* {ly|lg} %w0,off(%arena) */ 1642 EMIT6_DISP_LH(0xe3000000, 1643 is32 ? 0x0058 : 0x0004, REG_W0, REG_0, 1644 load_probe.arena_reg, off); 1645 bpf_jit_probe_emit_nop(jit, &load_probe); 1646 /* Reuse {ly|lg}'s arena_reg for {csy|csg}. */ 1647 if (load_probe.prg != -1) { 1648 probe.prg = jit->prg; 1649 probe.arena_reg = load_probe.arena_reg; 1650 } 1651 loop_start = jit->prg; 1652 /* 0: {csy|csg} %w0,%src,off(%arena) */ 1653 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, 1654 REG_W0, src_reg, probe.arena_reg, off); 1655 bpf_jit_probe_emit_nop(jit, &probe); 1656 /* brc 4,0b */ 1657 EMIT4_PCREL_RIC(0xa7040000, 4, loop_start); 1658 /* {llgfr|lgr} %src,%w0 */ 1659 EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0); 1660 /* Both probes should land here on exception. */ 1661 err = bpf_jit_probe_post(jit, fp, &load_probe); 1662 if (err < 0) 1663 return err; 1664 err = bpf_jit_probe_post(jit, fp, &probe); 1665 if (err < 0) 1666 return err; 1667 if (is32 && insn_is_zext(&insn[1])) 1668 insn_count = 2; 1669 break; 1670 } 1671 case BPF_CMPXCHG: 1672 bpf_jit_probe_atomic_pre(jit, insn, &probe); 1673 /* 0: {csy|csg} %b0,%src,off(%arena) */ 1674 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, 1675 BPF_REG_0, src_reg, 1676 probe.arena_reg, off); 1677 err = bpf_jit_probe_post(jit, fp, &probe); 1678 if (err < 0) 1679 return err; 1680 break; 1681 default: 1682 pr_err("Unknown atomic operation %02x\n", insn->imm); 1683 return -1; 1684 } 1685 1686 jit->seen |= SEEN_MEM; 1687 break; 1688 } 1689 /* 1690 * BPF_LDX 1691 */ 1692 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ 1693 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1694 case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: 1695 bpf_jit_probe_load_pre(jit, insn, &probe); 1696 /* llgc %dst,off(%src,%arena) */ 1697 EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, 1698 probe.arena_reg, off); 1699 err = bpf_jit_probe_post(jit, fp, &probe); 1700 if (err < 0) 1701 return err; 1702 jit->seen |= SEEN_MEM; 1703 if (insn_is_zext(&insn[1])) 1704 insn_count = 2; 1705 break; 1706 case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */ 1707 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: 1708 bpf_jit_probe_load_pre(jit, insn, &probe); 1709 /* lgb %dst,off(%src) */ 1710 EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off); 1711 err = bpf_jit_probe_post(jit, fp, &probe); 1712 if (err < 0) 1713 return err; 1714 jit->seen |= SEEN_MEM; 1715 break; 1716 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ 1717 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1718 case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: 1719 bpf_jit_probe_load_pre(jit, insn, &probe); 1720 /* llgh %dst,off(%src,%arena) */ 1721 EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, 1722 probe.arena_reg, off); 1723 err = bpf_jit_probe_post(jit, fp, &probe); 1724 if (err < 0) 1725 return err; 1726 jit->seen |= SEEN_MEM; 1727 if (insn_is_zext(&insn[1])) 1728 insn_count = 2; 1729 break; 1730 case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */ 1731 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: 1732 bpf_jit_probe_load_pre(jit, insn, &probe); 1733 /* lgh %dst,off(%src) */ 1734 EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off); 1735 err = bpf_jit_probe_post(jit, fp, &probe); 1736 if (err < 0) 1737 return err; 1738 jit->seen |= SEEN_MEM; 1739 break; 1740 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ 1741 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1742 case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: 1743 bpf_jit_probe_load_pre(jit, insn, &probe); 1744 /* llgf %dst,off(%src) */ 1745 jit->seen |= SEEN_MEM; 1746 EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, 1747 probe.arena_reg, off); 1748 err = bpf_jit_probe_post(jit, fp, &probe); 1749 if (err < 0) 1750 return err; 1751 if (insn_is_zext(&insn[1])) 1752 insn_count = 2; 1753 break; 1754 case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */ 1755 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: 1756 bpf_jit_probe_load_pre(jit, insn, &probe); 1757 /* lgf %dst,off(%src) */ 1758 jit->seen |= SEEN_MEM; 1759 EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off); 1760 err = bpf_jit_probe_post(jit, fp, &probe); 1761 if (err < 0) 1762 return err; 1763 break; 1764 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ 1765 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1766 case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: 1767 bpf_jit_probe_load_pre(jit, insn, &probe); 1768 /* lg %dst,off(%src,%arena) */ 1769 jit->seen |= SEEN_MEM; 1770 EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, 1771 probe.arena_reg, off); 1772 err = bpf_jit_probe_post(jit, fp, &probe); 1773 if (err < 0) 1774 return err; 1775 break; 1776 /* 1777 * BPF_JMP / CALL 1778 */ 1779 case BPF_JMP | BPF_CALL: 1780 { 1781 const struct btf_func_model *m; 1782 bool func_addr_fixed; 1783 int j, ret; 1784 u64 func; 1785 1786 ret = bpf_jit_get_func_addr(fp, insn, extra_pass, 1787 &func, &func_addr_fixed); 1788 if (ret < 0) 1789 return -1; 1790 1791 REG_SET_SEEN(BPF_REG_5); 1792 jit->seen |= SEEN_FUNC; 1793 /* 1794 * Copy the tail call counter to where the callee expects it. 1795 * 1796 * Note 1: The callee can increment the tail call counter, but 1797 * we do not load it back, since the x86 JIT does not do this 1798 * either. 1799 * 1800 * Note 2: We assume that the verifier does not let us call the 1801 * main program, which clears the tail call counter on entry. 1802 */ 1803 /* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */ 1804 _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt), 1805 0xf000 | (jit->frame_off + 1806 offsetof(struct prog_frame, tail_call_cnt))); 1807 1808 /* Sign-extend the kfunc arguments. */ 1809 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 1810 m = bpf_jit_find_kfunc_model(fp, insn); 1811 if (!m) 1812 return -1; 1813 1814 for (j = 0; j < m->nr_args; j++) { 1815 if (sign_extend(jit, BPF_REG_1 + j, 1816 m->arg_size[j], 1817 m->arg_flags[j])) 1818 return -1; 1819 } 1820 } 1821 1822 /* lgrl %w1,func */ 1823 EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func)); 1824 /* %r1() */ 1825 call_r1(jit); 1826 /* lgr %b0,%r2: load return value into %b0 */ 1827 EMIT4(0xb9040000, BPF_REG_0, REG_2); 1828 break; 1829 } 1830 case BPF_JMP | BPF_TAIL_CALL: { 1831 int patch_1_clrj, patch_2_clij, patch_3_brc; 1832 1833 /* 1834 * Implicit input: 1835 * B1: pointer to ctx 1836 * B2: pointer to bpf_array 1837 * B3: index in bpf_array 1838 * 1839 * if (index >= array->map.max_entries) 1840 * goto out; 1841 */ 1842 1843 /* llgf %w1,map.max_entries(%b2) */ 1844 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, 1845 offsetof(struct bpf_array, map.max_entries)); 1846 /* if ((u32)%b3 >= (u32)%w1) goto out; */ 1847 /* clrj %b3,%w1,0xa,out */ 1848 patch_1_clrj = jit->prg; 1849 EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa, 1850 jit->prg); 1851 1852 /* 1853 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 1854 * goto out; 1855 */ 1856 1857 off = jit->frame_off + 1858 offsetof(struct prog_frame, tail_call_cnt); 1859 /* lhi %w0,1 */ 1860 EMIT4_IMM(0xa7080000, REG_W0, 1); 1861 /* laal %w1,%w0,off(%r15) */ 1862 EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off); 1863 /* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */ 1864 patch_2_clij = jit->prg; 1865 EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1, 1866 2, jit->prg); 1867 1868 /* 1869 * prog = array->ptrs[index]; 1870 * if (prog == NULL) 1871 * goto out; 1872 */ 1873 1874 /* llgfr %r1,%b3: %r1 = (u32) index */ 1875 EMIT4(0xb9160000, REG_1, BPF_REG_3); 1876 /* sllg %r1,%r1,3: %r1 *= 8 */ 1877 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3); 1878 /* ltg %r1,prog(%b2,%r1) */ 1879 EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2, 1880 REG_1, offsetof(struct bpf_array, ptrs)); 1881 /* brc 0x8,out */ 1882 patch_3_brc = jit->prg; 1883 EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg); 1884 1885 /* 1886 * Restore registers before calling function 1887 */ 1888 save_restore_regs(jit, REGS_RESTORE, 0); 1889 1890 /* 1891 * goto *(prog->bpf_func + tail_call_start); 1892 */ 1893 1894 /* lg %r1,bpf_func(%r1) */ 1895 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0, 1896 offsetof(struct bpf_prog, bpf_func)); 1897 if (nospec_uses_trampoline()) { 1898 jit->seen |= SEEN_FUNC; 1899 /* aghi %r1,tail_call_start */ 1900 EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start); 1901 /* brcl 0xf,__s390_indirect_jump_r1 */ 1902 EMIT6_PCREL_RILC_PTR(0xc0040000, 0xf, 1903 __s390_indirect_jump_r1); 1904 } else { 1905 /* bc 0xf,tail_call_start(%r1) */ 1906 _EMIT4(0x47f01000 + jit->tail_call_start); 1907 } 1908 /* out: */ 1909 if (jit->prg_buf) { 1910 *(u16 *)(jit->prg_buf + patch_1_clrj + 2) = 1911 (jit->prg - patch_1_clrj) >> 1; 1912 *(u16 *)(jit->prg_buf + patch_2_clij + 2) = 1913 (jit->prg - patch_2_clij) >> 1; 1914 *(u16 *)(jit->prg_buf + patch_3_brc + 2) = 1915 (jit->prg - patch_3_brc) >> 1; 1916 } 1917 break; 1918 } 1919 case BPF_JMP | BPF_EXIT: /* return b0 */ 1920 last = (i == fp->len - 1) ? 1 : 0; 1921 if (last) 1922 break; 1923 if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip)) 1924 /* brc 0xf, <exit> */ 1925 EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip); 1926 else 1927 /* brcl 0xf, <exit> */ 1928 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip); 1929 break; 1930 /* 1931 * Branch relative (number of skipped instructions) to offset on 1932 * condition. 1933 * 1934 * Condition code to mask mapping: 1935 * 1936 * CC | Description | Mask 1937 * ------------------------------ 1938 * 0 | Operands equal | 8 1939 * 1 | First operand low | 4 1940 * 2 | First operand high | 2 1941 * 3 | Unused | 1 1942 * 1943 * For s390x relative branches: ip = ip + off_bytes 1944 * For BPF relative branches: insn = insn + off_insns + 1 1945 * 1946 * For example for s390x with offset 0 we jump to the branch 1947 * instruction itself (loop) and for BPF with offset 0 we 1948 * branch to the instruction behind the branch. 1949 */ 1950 case BPF_JMP32 | BPF_JA: /* if (true) */ 1951 branch_oc_off = imm; 1952 fallthrough; 1953 case BPF_JMP | BPF_JA: /* if (true) */ 1954 mask = 0xf000; /* j */ 1955 goto branch_oc; 1956 case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */ 1957 case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */ 1958 mask = 0x2000; /* jh */ 1959 goto branch_ks; 1960 case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */ 1961 case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */ 1962 mask = 0x4000; /* jl */ 1963 goto branch_ks; 1964 case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */ 1965 case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */ 1966 mask = 0xa000; /* jhe */ 1967 goto branch_ks; 1968 case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */ 1969 case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */ 1970 mask = 0xc000; /* jle */ 1971 goto branch_ks; 1972 case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */ 1973 case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */ 1974 mask = 0x2000; /* jh */ 1975 goto branch_ku; 1976 case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */ 1977 case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */ 1978 mask = 0x4000; /* jl */ 1979 goto branch_ku; 1980 case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */ 1981 case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */ 1982 mask = 0xa000; /* jhe */ 1983 goto branch_ku; 1984 case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */ 1985 case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */ 1986 mask = 0xc000; /* jle */ 1987 goto branch_ku; 1988 case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */ 1989 case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */ 1990 mask = 0x7000; /* jne */ 1991 goto branch_ku; 1992 case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */ 1993 case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */ 1994 mask = 0x8000; /* je */ 1995 goto branch_ku; 1996 case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */ 1997 case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */ 1998 mask = 0x7000; /* jnz */ 1999 if (BPF_CLASS(insn->code) == BPF_JMP32) { 2000 /* llilf %w1,imm (load zero extend imm) */ 2001 EMIT6_IMM(0xc00f0000, REG_W1, imm); 2002 /* nr %w1,%dst */ 2003 EMIT2(0x1400, REG_W1, dst_reg); 2004 } else { 2005 /* lgfi %w1,imm (load sign extend imm) */ 2006 EMIT6_IMM(0xc0010000, REG_W1, imm); 2007 /* ngr %w1,%dst */ 2008 EMIT4(0xb9800000, REG_W1, dst_reg); 2009 } 2010 goto branch_oc; 2011 2012 case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */ 2013 case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */ 2014 mask = 0x2000; /* jh */ 2015 goto branch_xs; 2016 case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */ 2017 case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */ 2018 mask = 0x4000; /* jl */ 2019 goto branch_xs; 2020 case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */ 2021 case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */ 2022 mask = 0xa000; /* jhe */ 2023 goto branch_xs; 2024 case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */ 2025 case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */ 2026 mask = 0xc000; /* jle */ 2027 goto branch_xs; 2028 case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */ 2029 case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */ 2030 mask = 0x2000; /* jh */ 2031 goto branch_xu; 2032 case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */ 2033 case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */ 2034 mask = 0x4000; /* jl */ 2035 goto branch_xu; 2036 case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */ 2037 case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */ 2038 mask = 0xa000; /* jhe */ 2039 goto branch_xu; 2040 case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */ 2041 case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */ 2042 mask = 0xc000; /* jle */ 2043 goto branch_xu; 2044 case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */ 2045 case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */ 2046 mask = 0x7000; /* jne */ 2047 goto branch_xu; 2048 case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */ 2049 case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */ 2050 mask = 0x8000; /* je */ 2051 goto branch_xu; 2052 case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */ 2053 case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */ 2054 { 2055 bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 2056 2057 mask = 0x7000; /* jnz */ 2058 /* nrk or ngrk %w1,%dst,%src */ 2059 EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000), 2060 REG_W1, dst_reg, src_reg); 2061 goto branch_oc; 2062 branch_ks: 2063 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 2064 /* cfi or cgfi %dst,imm */ 2065 EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000, 2066 dst_reg, imm); 2067 if (!is_first_pass(jit) && 2068 can_use_rel(jit, addrs[i + off + 1])) { 2069 /* brc mask,off */ 2070 EMIT4_PCREL_RIC(0xa7040000, 2071 mask >> 12, addrs[i + off + 1]); 2072 } else { 2073 /* brcl mask,off */ 2074 EMIT6_PCREL_RILC(0xc0040000, 2075 mask >> 12, addrs[i + off + 1]); 2076 } 2077 break; 2078 branch_ku: 2079 /* lgfi %w1,imm (load sign extend imm) */ 2080 src_reg = REG_1; 2081 EMIT6_IMM(0xc0010000, src_reg, imm); 2082 goto branch_xu; 2083 branch_xs: 2084 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 2085 if (!is_first_pass(jit) && 2086 can_use_rel(jit, addrs[i + off + 1])) { 2087 /* crj or cgrj %dst,%src,mask,off */ 2088 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), 2089 dst_reg, src_reg, i, off, mask); 2090 } else { 2091 /* cr or cgr %dst,%src */ 2092 if (is_jmp32) 2093 EMIT2(0x1900, dst_reg, src_reg); 2094 else 2095 EMIT4(0xb9200000, dst_reg, src_reg); 2096 /* brcl mask,off */ 2097 EMIT6_PCREL_RILC(0xc0040000, 2098 mask >> 12, addrs[i + off + 1]); 2099 } 2100 break; 2101 branch_xu: 2102 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 2103 if (!is_first_pass(jit) && 2104 can_use_rel(jit, addrs[i + off + 1])) { 2105 /* clrj or clgrj %dst,%src,mask,off */ 2106 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), 2107 dst_reg, src_reg, i, off, mask); 2108 } else { 2109 /* clr or clgr %dst,%src */ 2110 if (is_jmp32) 2111 EMIT2(0x1500, dst_reg, src_reg); 2112 else 2113 EMIT4(0xb9210000, dst_reg, src_reg); 2114 /* brcl mask,off */ 2115 EMIT6_PCREL_RILC(0xc0040000, 2116 mask >> 12, addrs[i + off + 1]); 2117 } 2118 break; 2119 branch_oc: 2120 if (!is_first_pass(jit) && 2121 can_use_rel(jit, addrs[i + branch_oc_off + 1])) { 2122 /* brc mask,off */ 2123 EMIT4_PCREL_RIC(0xa7040000, 2124 mask >> 12, 2125 addrs[i + branch_oc_off + 1]); 2126 } else { 2127 /* brcl mask,off */ 2128 EMIT6_PCREL_RILC(0xc0040000, 2129 mask >> 12, 2130 addrs[i + branch_oc_off + 1]); 2131 } 2132 break; 2133 } 2134 default: /* too complex, give up */ 2135 pr_err("Unknown opcode %02x\n", insn->code); 2136 return -1; 2137 } 2138 2139 return insn_count; 2140 } 2141 2142 /* 2143 * Return whether new i-th instruction address does not violate any invariant 2144 */ 2145 static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i) 2146 { 2147 /* On the first pass anything goes */ 2148 if (is_first_pass(jit)) 2149 return true; 2150 2151 /* The codegen pass must not change anything */ 2152 if (is_codegen_pass(jit)) 2153 return jit->addrs[i] == jit->prg; 2154 2155 /* Passes in between must not increase code size */ 2156 return jit->addrs[i] >= jit->prg; 2157 } 2158 2159 /* 2160 * Update the address of i-th instruction 2161 */ 2162 static int bpf_set_addr(struct bpf_jit *jit, int i) 2163 { 2164 int delta; 2165 2166 if (is_codegen_pass(jit)) { 2167 delta = jit->prg - jit->addrs[i]; 2168 if (delta < 0) 2169 bpf_skip(jit, -delta); 2170 } 2171 if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i))) 2172 return -1; 2173 jit->addrs[i] = jit->prg; 2174 return 0; 2175 } 2176 2177 /* 2178 * Compile eBPF program into s390x code 2179 */ 2180 static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, 2181 bool extra_pass) 2182 { 2183 int i, insn_count, lit32_size, lit64_size; 2184 u64 kern_arena; 2185 2186 jit->lit32 = jit->lit32_start; 2187 jit->lit64 = jit->lit64_start; 2188 jit->prg = 0; 2189 jit->excnt = 0; 2190 if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) 2191 jit->frame_off = sizeof(struct prog_frame) - 2192 offsetofend(struct prog_frame, unused) + 2193 round_up(fp->aux->stack_depth, 8); 2194 else 2195 jit->frame_off = 0; 2196 2197 kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena); 2198 if (kern_arena) 2199 jit->kern_arena = _EMIT_CONST_U64(kern_arena); 2200 jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena); 2201 2202 bpf_jit_prologue(jit, fp); 2203 if (bpf_set_addr(jit, 0) < 0) 2204 return -1; 2205 for (i = 0; i < fp->len; i += insn_count) { 2206 insn_count = bpf_jit_insn(jit, fp, i, extra_pass); 2207 if (insn_count < 0) 2208 return -1; 2209 /* Next instruction address */ 2210 if (bpf_set_addr(jit, i + insn_count) < 0) 2211 return -1; 2212 } 2213 bpf_jit_epilogue(jit); 2214 2215 lit32_size = jit->lit32 - jit->lit32_start; 2216 lit64_size = jit->lit64 - jit->lit64_start; 2217 jit->lit32_start = jit->prg; 2218 if (lit32_size) 2219 jit->lit32_start = ALIGN(jit->lit32_start, 4); 2220 jit->lit64_start = jit->lit32_start + lit32_size; 2221 if (lit64_size) 2222 jit->lit64_start = ALIGN(jit->lit64_start, 8); 2223 jit->size = jit->lit64_start + lit64_size; 2224 jit->size_prg = jit->prg; 2225 2226 if (WARN_ON_ONCE(fp->aux->extable && 2227 jit->excnt != fp->aux->num_exentries)) 2228 /* Verifier bug - too many entries. */ 2229 return -1; 2230 2231 return 0; 2232 } 2233 2234 bool bpf_jit_needs_zext(void) 2235 { 2236 return true; 2237 } 2238 2239 struct s390_jit_data { 2240 struct bpf_binary_header *header; 2241 struct bpf_jit ctx; 2242 int pass; 2243 }; 2244 2245 static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit, 2246 struct bpf_prog *fp) 2247 { 2248 struct bpf_binary_header *header; 2249 struct bpf_insn *insn; 2250 u32 extable_size; 2251 u32 code_size; 2252 int i; 2253 2254 for (i = 0; i < fp->len; i++) { 2255 insn = &fp->insnsi[i]; 2256 2257 if (BPF_CLASS(insn->code) == BPF_STX && 2258 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC && 2259 (BPF_SIZE(insn->code) == BPF_DW || 2260 BPF_SIZE(insn->code) == BPF_W) && 2261 insn->imm == BPF_XCHG) 2262 /* 2263 * bpf_jit_insn() emits a load and a compare-and-swap, 2264 * both of which need to be probed. 2265 */ 2266 fp->aux->num_exentries += 1; 2267 } 2268 /* We need two entries per insn. */ 2269 fp->aux->num_exentries *= 2; 2270 2271 code_size = roundup(jit->size, 2272 __alignof__(struct exception_table_entry)); 2273 extable_size = fp->aux->num_exentries * 2274 sizeof(struct exception_table_entry); 2275 header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf, 2276 8, jit_fill_hole); 2277 if (!header) 2278 return NULL; 2279 fp->aux->extable = (struct exception_table_entry *) 2280 (jit->prg_buf + code_size); 2281 return header; 2282 } 2283 2284 /* 2285 * Compile eBPF program "fp" 2286 */ 2287 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) 2288 { 2289 struct bpf_prog *tmp, *orig_fp = fp; 2290 struct bpf_binary_header *header; 2291 struct s390_jit_data *jit_data; 2292 bool tmp_blinded = false; 2293 bool extra_pass = false; 2294 struct bpf_jit jit; 2295 int pass; 2296 2297 if (!fp->jit_requested) 2298 return orig_fp; 2299 2300 tmp = bpf_jit_blind_constants(fp); 2301 /* 2302 * If blinding was requested and we failed during blinding, 2303 * we must fall back to the interpreter. 2304 */ 2305 if (IS_ERR(tmp)) 2306 return orig_fp; 2307 if (tmp != fp) { 2308 tmp_blinded = true; 2309 fp = tmp; 2310 } 2311 2312 jit_data = fp->aux->jit_data; 2313 if (!jit_data) { 2314 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2315 if (!jit_data) { 2316 fp = orig_fp; 2317 goto out; 2318 } 2319 fp->aux->jit_data = jit_data; 2320 } 2321 if (jit_data->ctx.addrs) { 2322 jit = jit_data->ctx; 2323 header = jit_data->header; 2324 extra_pass = true; 2325 pass = jit_data->pass + 1; 2326 goto skip_init_ctx; 2327 } 2328 2329 memset(&jit, 0, sizeof(jit)); 2330 jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); 2331 if (jit.addrs == NULL) { 2332 fp = orig_fp; 2333 goto free_addrs; 2334 } 2335 /* 2336 * Three initial passes: 2337 * - 1/2: Determine clobbered registers 2338 * - 3: Calculate program size and addrs array 2339 */ 2340 for (pass = 1; pass <= 3; pass++) { 2341 if (bpf_jit_prog(&jit, fp, extra_pass)) { 2342 fp = orig_fp; 2343 goto free_addrs; 2344 } 2345 } 2346 /* 2347 * Final pass: Allocate and generate program 2348 */ 2349 header = bpf_jit_alloc(&jit, fp); 2350 if (!header) { 2351 fp = orig_fp; 2352 goto free_addrs; 2353 } 2354 skip_init_ctx: 2355 if (bpf_jit_prog(&jit, fp, extra_pass)) { 2356 bpf_jit_binary_free(header); 2357 fp = orig_fp; 2358 goto free_addrs; 2359 } 2360 if (bpf_jit_enable > 1) { 2361 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf); 2362 print_fn_code(jit.prg_buf, jit.size_prg); 2363 } 2364 if (!fp->is_func || extra_pass) { 2365 if (bpf_jit_binary_lock_ro(header)) { 2366 bpf_jit_binary_free(header); 2367 fp = orig_fp; 2368 goto free_addrs; 2369 } 2370 } else { 2371 jit_data->header = header; 2372 jit_data->ctx = jit; 2373 jit_data->pass = pass; 2374 } 2375 fp->bpf_func = (void *) jit.prg_buf; 2376 fp->jited = 1; 2377 fp->jited_len = jit.size; 2378 2379 if (!fp->is_func || extra_pass) { 2380 bpf_prog_fill_jited_linfo(fp, jit.addrs + 1); 2381 free_addrs: 2382 kvfree(jit.addrs); 2383 kfree(jit_data); 2384 fp->aux->jit_data = NULL; 2385 } 2386 out: 2387 if (tmp_blinded) 2388 bpf_jit_prog_release_other(fp, fp == orig_fp ? 2389 tmp : orig_fp); 2390 return fp; 2391 } 2392 2393 bool bpf_jit_supports_kfunc_call(void) 2394 { 2395 return true; 2396 } 2397 2398 bool bpf_jit_supports_far_kfunc_call(void) 2399 { 2400 return true; 2401 } 2402 2403 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2404 void *old_addr, void *new_addr) 2405 { 2406 struct bpf_plt expected_plt, current_plt, new_plt, *plt; 2407 struct { 2408 u16 opc; 2409 s32 disp; 2410 } __packed insn; 2411 char *ret; 2412 int err; 2413 2414 /* Verify the branch to be patched. */ 2415 err = copy_from_kernel_nofault(&insn, ip, sizeof(insn)); 2416 if (err < 0) 2417 return err; 2418 if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0))) 2419 return -EINVAL; 2420 2421 if (t == BPF_MOD_JUMP && 2422 insn.disp == ((char *)new_addr - (char *)ip) >> 1) { 2423 /* 2424 * The branch already points to the destination, 2425 * there is no PLT. 2426 */ 2427 } else { 2428 /* Verify the PLT. */ 2429 plt = ip + (insn.disp << 1); 2430 err = copy_from_kernel_nofault(¤t_plt, plt, 2431 sizeof(current_plt)); 2432 if (err < 0) 2433 return err; 2434 ret = (char *)ip + 6; 2435 bpf_jit_plt(&expected_plt, ret, old_addr); 2436 if (memcmp(¤t_plt, &expected_plt, sizeof(current_plt))) 2437 return -EINVAL; 2438 /* Adjust the call address. */ 2439 bpf_jit_plt(&new_plt, ret, new_addr); 2440 s390_kernel_write(&plt->target, &new_plt.target, 2441 sizeof(void *)); 2442 } 2443 2444 /* Adjust the mask of the branch. */ 2445 insn.opc = 0xc004 | (new_addr ? 0xf0 : 0); 2446 s390_kernel_write((char *)ip + 1, (char *)&insn.opc + 1, 1); 2447 2448 /* Make the new code visible to the other CPUs. */ 2449 text_poke_sync_lock(); 2450 2451 return 0; 2452 } 2453 2454 struct bpf_tramp_jit { 2455 struct bpf_jit common; 2456 int orig_stack_args_off;/* Offset of arguments placed on stack by the 2457 * func_addr's original caller 2458 */ 2459 int stack_size; /* Trampoline stack size */ 2460 int backchain_off; /* Offset of backchain */ 2461 int stack_args_off; /* Offset of stack arguments for calling 2462 * func_addr, has to be at the top 2463 */ 2464 int reg_args_off; /* Offset of register arguments for calling 2465 * func_addr 2466 */ 2467 int ip_off; /* For bpf_get_func_ip(), has to be at 2468 * (ctx - 16) 2469 */ 2470 int arg_cnt_off; /* For bpf_get_func_arg_cnt(), has to be at 2471 * (ctx - 8) 2472 */ 2473 int bpf_args_off; /* Offset of BPF_PROG context, which consists 2474 * of BPF arguments followed by return value 2475 */ 2476 int retval_off; /* Offset of return value (see above) */ 2477 int r7_r8_off; /* Offset of saved %r7 and %r8, which are used 2478 * for __bpf_prog_enter() return value and 2479 * func_addr respectively 2480 */ 2481 int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */ 2482 int tccnt_off; /* Offset of saved tailcall counter */ 2483 int r14_off; /* Offset of saved %r14, has to be at the 2484 * bottom */ 2485 int do_fexit; /* do_fexit: label */ 2486 }; 2487 2488 static void load_imm64(struct bpf_jit *jit, int dst_reg, u64 val) 2489 { 2490 /* llihf %dst_reg,val_hi */ 2491 EMIT6_IMM(0xc00e0000, dst_reg, (val >> 32)); 2492 /* oilf %rdst_reg,val_lo */ 2493 EMIT6_IMM(0xc00d0000, dst_reg, val); 2494 } 2495 2496 static int invoke_bpf_prog(struct bpf_tramp_jit *tjit, 2497 const struct btf_func_model *m, 2498 struct bpf_tramp_link *tlink, bool save_ret) 2499 { 2500 struct bpf_jit *jit = &tjit->common; 2501 int cookie_off = tjit->run_ctx_off + 2502 offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 2503 struct bpf_prog *p = tlink->link.prog; 2504 int patch; 2505 2506 /* 2507 * run_ctx.cookie = tlink->cookie; 2508 */ 2509 2510 /* %r0 = tlink->cookie */ 2511 load_imm64(jit, REG_W0, tlink->cookie); 2512 /* stg %r0,cookie_off(%r15) */ 2513 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, REG_0, REG_15, cookie_off); 2514 2515 /* 2516 * if ((start = __bpf_prog_enter(p, &run_ctx)) == 0) 2517 * goto skip; 2518 */ 2519 2520 /* %r1 = __bpf_prog_enter */ 2521 load_imm64(jit, REG_1, (u64)bpf_trampoline_enter(p)); 2522 /* %r2 = p */ 2523 load_imm64(jit, REG_2, (u64)p); 2524 /* la %r3,run_ctx_off(%r15) */ 2525 EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off); 2526 /* %r1() */ 2527 call_r1(jit); 2528 /* ltgr %r7,%r2 */ 2529 EMIT4(0xb9020000, REG_7, REG_2); 2530 /* brcl 8,skip */ 2531 patch = jit->prg; 2532 EMIT6_PCREL_RILC(0xc0040000, 8, 0); 2533 2534 /* 2535 * retval = bpf_func(args, p->insnsi); 2536 */ 2537 2538 /* %r1 = p->bpf_func */ 2539 load_imm64(jit, REG_1, (u64)p->bpf_func); 2540 /* la %r2,bpf_args_off(%r15) */ 2541 EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off); 2542 /* %r3 = p->insnsi */ 2543 if (!p->jited) 2544 load_imm64(jit, REG_3, (u64)p->insnsi); 2545 /* %r1() */ 2546 call_r1(jit); 2547 /* stg %r2,retval_off(%r15) */ 2548 if (save_ret) { 2549 if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags)) 2550 return -1; 2551 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15, 2552 tjit->retval_off); 2553 } 2554 2555 /* skip: */ 2556 if (jit->prg_buf) 2557 *(u32 *)&jit->prg_buf[patch + 2] = (jit->prg - patch) >> 1; 2558 2559 /* 2560 * __bpf_prog_exit(p, start, &run_ctx); 2561 */ 2562 2563 /* %r1 = __bpf_prog_exit */ 2564 load_imm64(jit, REG_1, (u64)bpf_trampoline_exit(p)); 2565 /* %r2 = p */ 2566 load_imm64(jit, REG_2, (u64)p); 2567 /* lgr %r3,%r7 */ 2568 EMIT4(0xb9040000, REG_3, REG_7); 2569 /* la %r4,run_ctx_off(%r15) */ 2570 EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off); 2571 /* %r1() */ 2572 call_r1(jit); 2573 2574 return 0; 2575 } 2576 2577 static int alloc_stack(struct bpf_tramp_jit *tjit, size_t size) 2578 { 2579 int stack_offset = tjit->stack_size; 2580 2581 tjit->stack_size += size; 2582 return stack_offset; 2583 } 2584 2585 /* ABI uses %r2 - %r6 for parameter passing. */ 2586 #define MAX_NR_REG_ARGS 5 2587 2588 /* The "L" field of the "mvc" instruction is 8 bits. */ 2589 #define MAX_MVC_SIZE 256 2590 #define MAX_NR_STACK_ARGS (MAX_MVC_SIZE / sizeof(u64)) 2591 2592 /* -mfentry generates a 6-byte nop on s390x. */ 2593 #define S390X_PATCH_SIZE 6 2594 2595 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, 2596 struct bpf_tramp_jit *tjit, 2597 const struct btf_func_model *m, 2598 u32 flags, 2599 struct bpf_tramp_links *tlinks, 2600 void *func_addr) 2601 { 2602 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2603 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2604 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2605 int nr_bpf_args, nr_reg_args, nr_stack_args; 2606 struct bpf_jit *jit = &tjit->common; 2607 int arg, bpf_arg_off; 2608 int i, j; 2609 2610 /* Support as many stack arguments as "mvc" instruction can handle. */ 2611 nr_reg_args = min_t(int, m->nr_args, MAX_NR_REG_ARGS); 2612 nr_stack_args = m->nr_args - nr_reg_args; 2613 if (nr_stack_args > MAX_NR_STACK_ARGS) 2614 return -ENOTSUPP; 2615 2616 /* Return to %r14 in the struct_ops case. */ 2617 if (flags & BPF_TRAMP_F_INDIRECT) 2618 flags |= BPF_TRAMP_F_SKIP_FRAME; 2619 2620 /* 2621 * Compute how many arguments we need to pass to BPF programs. 2622 * BPF ABI mirrors that of x86_64: arguments that are 16 bytes or 2623 * smaller are packed into 1 or 2 registers; larger arguments are 2624 * passed via pointers. 2625 * In s390x ABI, arguments that are 8 bytes or smaller are packed into 2626 * a register; larger arguments are passed via pointers. 2627 * We need to deal with this difference. 2628 */ 2629 nr_bpf_args = 0; 2630 for (i = 0; i < m->nr_args; i++) { 2631 if (m->arg_size[i] <= 8) 2632 nr_bpf_args += 1; 2633 else if (m->arg_size[i] <= 16) 2634 nr_bpf_args += 2; 2635 else 2636 return -ENOTSUPP; 2637 } 2638 2639 /* 2640 * Calculate the stack layout. 2641 */ 2642 2643 /* 2644 * Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x 2645 * ABI requires, put our backchain at the end of the allocated memory. 2646 */ 2647 tjit->stack_size = STACK_FRAME_OVERHEAD; 2648 tjit->backchain_off = tjit->stack_size - sizeof(u64); 2649 tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64)); 2650 tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64)); 2651 tjit->ip_off = alloc_stack(tjit, sizeof(u64)); 2652 tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64)); 2653 tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64)); 2654 tjit->retval_off = alloc_stack(tjit, sizeof(u64)); 2655 tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64)); 2656 tjit->run_ctx_off = alloc_stack(tjit, 2657 sizeof(struct bpf_tramp_run_ctx)); 2658 tjit->tccnt_off = alloc_stack(tjit, sizeof(u64)); 2659 tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2); 2660 /* 2661 * In accordance with the s390x ABI, the caller has allocated 2662 * STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's 2663 * backchain, and the rest we can use. 2664 */ 2665 tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64); 2666 tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD; 2667 2668 /* lgr %r1,%r15 */ 2669 EMIT4(0xb9040000, REG_1, REG_15); 2670 /* aghi %r15,-stack_size */ 2671 EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size); 2672 /* stg %r1,backchain_off(%r15) */ 2673 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15, 2674 tjit->backchain_off); 2675 /* mvc tccnt_off(4,%r15),stack_size+tail_call_cnt(%r15) */ 2676 _EMIT6(0xd203f000 | tjit->tccnt_off, 2677 0xf000 | (tjit->stack_size + 2678 offsetof(struct prog_frame, tail_call_cnt))); 2679 /* stmg %r2,%rN,fwd_reg_args_off(%r15) */ 2680 if (nr_reg_args) 2681 EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2, 2682 REG_2 + (nr_reg_args - 1), REG_15, 2683 tjit->reg_args_off); 2684 for (i = 0, j = 0; i < m->nr_args; i++) { 2685 if (i < MAX_NR_REG_ARGS) 2686 arg = REG_2 + i; 2687 else 2688 arg = tjit->orig_stack_args_off + 2689 (i - MAX_NR_REG_ARGS) * sizeof(u64); 2690 bpf_arg_off = tjit->bpf_args_off + j * sizeof(u64); 2691 if (m->arg_size[i] <= 8) { 2692 if (i < MAX_NR_REG_ARGS) 2693 /* stg %arg,bpf_arg_off(%r15) */ 2694 EMIT6_DISP_LH(0xe3000000, 0x0024, arg, 2695 REG_0, REG_15, bpf_arg_off); 2696 else 2697 /* mvc bpf_arg_off(8,%r15),arg(%r15) */ 2698 _EMIT6(0xd207f000 | bpf_arg_off, 2699 0xf000 | arg); 2700 j += 1; 2701 } else { 2702 if (i < MAX_NR_REG_ARGS) { 2703 /* mvc bpf_arg_off(16,%r15),0(%arg) */ 2704 _EMIT6(0xd20ff000 | bpf_arg_off, 2705 reg2hex[arg] << 12); 2706 } else { 2707 /* lg %r1,arg(%r15) */ 2708 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_0, 2709 REG_15, arg); 2710 /* mvc bpf_arg_off(16,%r15),0(%r1) */ 2711 _EMIT6(0xd20ff000 | bpf_arg_off, 0x1000); 2712 } 2713 j += 2; 2714 } 2715 } 2716 /* stmg %r7,%r8,r7_r8_off(%r15) */ 2717 EMIT6_DISP_LH(0xeb000000, 0x0024, REG_7, REG_8, REG_15, 2718 tjit->r7_r8_off); 2719 /* stg %r14,r14_off(%r15) */ 2720 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_14, REG_0, REG_15, tjit->r14_off); 2721 2722 if (flags & BPF_TRAMP_F_ORIG_STACK) { 2723 /* 2724 * The ftrace trampoline puts the return address (which is the 2725 * address of the original function + S390X_PATCH_SIZE) into 2726 * %r0; see ftrace_shared_hotpatch_trampoline_br and 2727 * ftrace_init_nop() for details. 2728 */ 2729 2730 /* lgr %r8,%r0 */ 2731 EMIT4(0xb9040000, REG_8, REG_0); 2732 } else { 2733 /* %r8 = func_addr + S390X_PATCH_SIZE */ 2734 load_imm64(jit, REG_8, (u64)func_addr + S390X_PATCH_SIZE); 2735 } 2736 2737 /* 2738 * ip = func_addr; 2739 * arg_cnt = m->nr_args; 2740 */ 2741 2742 if (flags & BPF_TRAMP_F_IP_ARG) { 2743 /* %r0 = func_addr */ 2744 load_imm64(jit, REG_0, (u64)func_addr); 2745 /* stg %r0,ip_off(%r15) */ 2746 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15, 2747 tjit->ip_off); 2748 } 2749 /* lghi %r0,nr_bpf_args */ 2750 EMIT4_IMM(0xa7090000, REG_0, nr_bpf_args); 2751 /* stg %r0,arg_cnt_off(%r15) */ 2752 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15, 2753 tjit->arg_cnt_off); 2754 2755 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2756 /* 2757 * __bpf_tramp_enter(im); 2758 */ 2759 2760 /* %r1 = __bpf_tramp_enter */ 2761 load_imm64(jit, REG_1, (u64)__bpf_tramp_enter); 2762 /* %r2 = im */ 2763 load_imm64(jit, REG_2, (u64)im); 2764 /* %r1() */ 2765 call_r1(jit); 2766 } 2767 2768 for (i = 0; i < fentry->nr_links; i++) 2769 if (invoke_bpf_prog(tjit, m, fentry->links[i], 2770 flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2771 return -EINVAL; 2772 2773 if (fmod_ret->nr_links) { 2774 /* 2775 * retval = 0; 2776 */ 2777 2778 /* xc retval_off(8,%r15),retval_off(%r15) */ 2779 _EMIT6(0xd707f000 | tjit->retval_off, 2780 0xf000 | tjit->retval_off); 2781 2782 for (i = 0; i < fmod_ret->nr_links; i++) { 2783 if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true)) 2784 return -EINVAL; 2785 2786 /* 2787 * if (retval) 2788 * goto do_fexit; 2789 */ 2790 2791 /* ltg %r0,retval_off(%r15) */ 2792 EMIT6_DISP_LH(0xe3000000, 0x0002, REG_0, REG_0, REG_15, 2793 tjit->retval_off); 2794 /* brcl 7,do_fexit */ 2795 EMIT6_PCREL_RILC(0xc0040000, 7, tjit->do_fexit); 2796 } 2797 } 2798 2799 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2800 /* 2801 * retval = func_addr(args); 2802 */ 2803 2804 /* lmg %r2,%rN,reg_args_off(%r15) */ 2805 if (nr_reg_args) 2806 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2, 2807 REG_2 + (nr_reg_args - 1), REG_15, 2808 tjit->reg_args_off); 2809 /* mvc stack_args_off(N,%r15),orig_stack_args_off(%r15) */ 2810 if (nr_stack_args) 2811 _EMIT6(0xd200f000 | 2812 (nr_stack_args * sizeof(u64) - 1) << 16 | 2813 tjit->stack_args_off, 2814 0xf000 | tjit->orig_stack_args_off); 2815 /* mvc tail_call_cnt(4,%r15),tccnt_off(%r15) */ 2816 _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt), 2817 0xf000 | tjit->tccnt_off); 2818 /* lgr %r1,%r8 */ 2819 EMIT4(0xb9040000, REG_1, REG_8); 2820 /* %r1() */ 2821 call_r1(jit); 2822 /* stg %r2,retval_off(%r15) */ 2823 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15, 2824 tjit->retval_off); 2825 2826 im->ip_after_call = jit->prg_buf + jit->prg; 2827 2828 /* 2829 * The following nop will be patched by bpf_tramp_image_put(). 2830 */ 2831 2832 /* brcl 0,im->ip_epilogue */ 2833 EMIT6_PCREL_RILC(0xc0040000, 0, (u64)im->ip_epilogue); 2834 } 2835 2836 /* do_fexit: */ 2837 tjit->do_fexit = jit->prg; 2838 for (i = 0; i < fexit->nr_links; i++) 2839 if (invoke_bpf_prog(tjit, m, fexit->links[i], false)) 2840 return -EINVAL; 2841 2842 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2843 im->ip_epilogue = jit->prg_buf + jit->prg; 2844 2845 /* 2846 * __bpf_tramp_exit(im); 2847 */ 2848 2849 /* %r1 = __bpf_tramp_exit */ 2850 load_imm64(jit, REG_1, (u64)__bpf_tramp_exit); 2851 /* %r2 = im */ 2852 load_imm64(jit, REG_2, (u64)im); 2853 /* %r1() */ 2854 call_r1(jit); 2855 } 2856 2857 /* lmg %r2,%rN,reg_args_off(%r15) */ 2858 if ((flags & BPF_TRAMP_F_RESTORE_REGS) && nr_reg_args) 2859 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2, 2860 REG_2 + (nr_reg_args - 1), REG_15, 2861 tjit->reg_args_off); 2862 /* lgr %r1,%r8 */ 2863 if (!(flags & BPF_TRAMP_F_SKIP_FRAME)) 2864 EMIT4(0xb9040000, REG_1, REG_8); 2865 /* lmg %r7,%r8,r7_r8_off(%r15) */ 2866 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_7, REG_8, REG_15, 2867 tjit->r7_r8_off); 2868 /* lg %r14,r14_off(%r15) */ 2869 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_14, REG_0, REG_15, tjit->r14_off); 2870 /* lg %r2,retval_off(%r15) */ 2871 if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET)) 2872 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15, 2873 tjit->retval_off); 2874 /* mvc stack_size+tail_call_cnt(4,%r15),tccnt_off(%r15) */ 2875 _EMIT6(0xd203f000 | (tjit->stack_size + 2876 offsetof(struct prog_frame, tail_call_cnt)), 2877 0xf000 | tjit->tccnt_off); 2878 /* aghi %r15,stack_size */ 2879 EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size); 2880 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2881 EMIT_JUMP_REG(14); 2882 else 2883 EMIT_JUMP_REG(1); 2884 2885 return 0; 2886 } 2887 2888 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, 2889 struct bpf_tramp_links *tlinks, void *orig_call) 2890 { 2891 struct bpf_tramp_image im; 2892 struct bpf_tramp_jit tjit; 2893 int ret; 2894 2895 memset(&tjit, 0, sizeof(tjit)); 2896 2897 ret = __arch_prepare_bpf_trampoline(&im, &tjit, m, flags, 2898 tlinks, orig_call); 2899 2900 return ret < 0 ? ret : tjit.common.prg; 2901 } 2902 2903 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, 2904 void *image_end, const struct btf_func_model *m, 2905 u32 flags, struct bpf_tramp_links *tlinks, 2906 void *func_addr) 2907 { 2908 struct bpf_tramp_jit tjit; 2909 int ret; 2910 2911 /* Compute offsets, check whether the code fits. */ 2912 memset(&tjit, 0, sizeof(tjit)); 2913 ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags, 2914 tlinks, func_addr); 2915 2916 if (ret < 0) 2917 return ret; 2918 if (tjit.common.prg > (char *)image_end - (char *)image) 2919 /* 2920 * Use the same error code as for exceeding 2921 * BPF_MAX_TRAMP_LINKS. 2922 */ 2923 return -E2BIG; 2924 2925 tjit.common.prg = 0; 2926 tjit.common.prg_buf = image; 2927 ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags, 2928 tlinks, func_addr); 2929 2930 return ret < 0 ? ret : tjit.common.prg; 2931 } 2932 2933 bool bpf_jit_supports_subprog_tailcalls(void) 2934 { 2935 return true; 2936 } 2937 2938 bool bpf_jit_supports_arena(void) 2939 { 2940 return true; 2941 } 2942 2943 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) 2944 { 2945 if (!in_arena) 2946 return true; 2947 switch (insn->code) { 2948 case BPF_STX | BPF_ATOMIC | BPF_B: 2949 case BPF_STX | BPF_ATOMIC | BPF_H: 2950 case BPF_STX | BPF_ATOMIC | BPF_W: 2951 case BPF_STX | BPF_ATOMIC | BPF_DW: 2952 if (bpf_atomic_is_load_store(insn)) 2953 return false; 2954 } 2955 return true; 2956 } 2957 2958 bool bpf_jit_supports_exceptions(void) 2959 { 2960 /* 2961 * Exceptions require unwinding support, which is always available, 2962 * because the kernel is always built with backchain. 2963 */ 2964 return true; 2965 } 2966 2967 void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64), 2968 void *cookie) 2969 { 2970 unsigned long addr, prev_addr = 0; 2971 struct unwind_state state; 2972 2973 unwind_for_each_frame(&state, NULL, NULL, 0) { 2974 addr = unwind_get_return_address(&state); 2975 if (!addr) 2976 break; 2977 /* 2978 * addr is a return address and state.sp is the value of %r15 2979 * at this address. exception_cb needs %r15 at entry to the 2980 * function containing addr, so take the next state.sp. 2981 * 2982 * There is no bp, and the exception_cb prog does not need one 2983 * to perform a quasi-longjmp. The common code requires a 2984 * non-zero bp, so pass sp there as well. 2985 */ 2986 if (prev_addr && !consume_fn(cookie, prev_addr, state.sp, 2987 state.sp)) 2988 break; 2989 prev_addr = addr; 2990 } 2991 } 2992