1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * BPF Jit compiler for s390. 4 * 5 * Minimum build requirements: 6 * 7 * - HAVE_MARCH_Z196_FEATURES: laal, laalg 8 * - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj 9 * - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf 10 * - 64BIT 11 * 12 * Copyright IBM Corp. 2012,2015 13 * 14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 15 * Michael Holzheu <holzheu@linux.vnet.ibm.com> 16 */ 17 18 #define KMSG_COMPONENT "bpf_jit" 19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 20 21 #include <linux/netdevice.h> 22 #include <linux/filter.h> 23 #include <linux/init.h> 24 #include <linux/bpf.h> 25 #include <linux/mm.h> 26 #include <linux/kernel.h> 27 #include <asm/cacheflush.h> 28 #include <asm/extable.h> 29 #include <asm/dis.h> 30 #include <asm/facility.h> 31 #include <asm/nospec-branch.h> 32 #include <asm/set_memory.h> 33 #include <asm/text-patching.h> 34 #include <asm/unwind.h> 35 #include "bpf_jit.h" 36 37 struct bpf_jit { 38 u32 seen; /* Flags to remember seen eBPF instructions */ 39 u16 seen_regs; /* Mask to remember which registers are used */ 40 u32 *addrs; /* Array with relative instruction addresses */ 41 u8 *prg_buf; /* Start of program */ 42 int size; /* Size of program and literal pool */ 43 int size_prg; /* Size of program */ 44 int prg; /* Current position in program */ 45 int lit32_start; /* Start of 32-bit literal pool */ 46 int lit32; /* Current position in 32-bit literal pool */ 47 int lit64_start; /* Start of 64-bit literal pool */ 48 int lit64; /* Current position in 64-bit literal pool */ 49 int base_ip; /* Base address for literal pool */ 50 int exit_ip; /* Address of exit */ 51 int tail_call_start; /* Tail call start offset */ 52 int excnt; /* Number of exception table entries */ 53 int prologue_plt_ret; /* Return address for prologue hotpatch PLT */ 54 int prologue_plt; /* Start of prologue hotpatch PLT */ 55 int kern_arena; /* Pool offset of kernel arena address */ 56 u64 user_arena; /* User arena address */ 57 }; 58 59 #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ 60 #define SEEN_LITERAL BIT(1) /* code uses literals */ 61 #define SEEN_FUNC BIT(2) /* calls C functions */ 62 #define SEEN_STACK (SEEN_FUNC | SEEN_MEM) 63 64 #define NVREGS 0xffc0 /* %r6-%r15 */ 65 66 /* 67 * s390 registers 68 */ 69 #define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */ 70 #define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */ 71 #define REG_L (MAX_BPF_JIT_REG + 2) /* Literal pool register */ 72 #define REG_15 (MAX_BPF_JIT_REG + 3) /* Register 15 */ 73 #define REG_0 REG_W0 /* Register 0 */ 74 #define REG_1 REG_W1 /* Register 1 */ 75 #define REG_2 BPF_REG_1 /* Register 2 */ 76 #define REG_3 BPF_REG_2 /* Register 3 */ 77 #define REG_4 BPF_REG_3 /* Register 4 */ 78 #define REG_7 BPF_REG_6 /* Register 7 */ 79 #define REG_8 BPF_REG_7 /* Register 8 */ 80 #define REG_14 BPF_REG_0 /* Register 14 */ 81 82 /* 83 * Mapping of BPF registers to s390 registers 84 */ 85 static const int reg2hex[] = { 86 /* Return code */ 87 [BPF_REG_0] = 14, 88 /* Function parameters */ 89 [BPF_REG_1] = 2, 90 [BPF_REG_2] = 3, 91 [BPF_REG_3] = 4, 92 [BPF_REG_4] = 5, 93 [BPF_REG_5] = 6, 94 /* Call saved registers */ 95 [BPF_REG_6] = 7, 96 [BPF_REG_7] = 8, 97 [BPF_REG_8] = 9, 98 [BPF_REG_9] = 10, 99 /* BPF stack pointer */ 100 [BPF_REG_FP] = 13, 101 /* Register for blinding */ 102 [BPF_REG_AX] = 12, 103 /* Work registers for s390x backend */ 104 [REG_W0] = 0, 105 [REG_W1] = 1, 106 [REG_L] = 11, 107 [REG_15] = 15, 108 }; 109 110 static inline u32 reg(u32 dst_reg, u32 src_reg) 111 { 112 return reg2hex[dst_reg] << 4 | reg2hex[src_reg]; 113 } 114 115 static inline u32 reg_high(u32 reg) 116 { 117 return reg2hex[reg] << 4; 118 } 119 120 static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) 121 { 122 u32 r1 = reg2hex[b1]; 123 124 if (r1 >= 6 && r1 <= 15) 125 jit->seen_regs |= (1 << r1); 126 } 127 128 static s32 off_to_pcrel(struct bpf_jit *jit, u32 off) 129 { 130 return off - jit->prg; 131 } 132 133 static s64 ptr_to_pcrel(struct bpf_jit *jit, const void *ptr) 134 { 135 if (jit->prg_buf) 136 return (const u8 *)ptr - ((const u8 *)jit->prg_buf + jit->prg); 137 return 0; 138 } 139 140 #define REG_SET_SEEN(b1) \ 141 ({ \ 142 reg_set_seen(jit, b1); \ 143 }) 144 145 /* 146 * EMIT macros for code generation 147 */ 148 149 #define _EMIT2(op) \ 150 ({ \ 151 if (jit->prg_buf) \ 152 *(u16 *) (jit->prg_buf + jit->prg) = (op); \ 153 jit->prg += 2; \ 154 }) 155 156 #define EMIT2(op, b1, b2) \ 157 ({ \ 158 _EMIT2((op) | reg(b1, b2)); \ 159 REG_SET_SEEN(b1); \ 160 REG_SET_SEEN(b2); \ 161 }) 162 163 #define _EMIT4(op) \ 164 ({ \ 165 if (jit->prg_buf) \ 166 *(u32 *) (jit->prg_buf + jit->prg) = (op); \ 167 jit->prg += 4; \ 168 }) 169 170 #define EMIT4(op, b1, b2) \ 171 ({ \ 172 _EMIT4((op) | reg(b1, b2)); \ 173 REG_SET_SEEN(b1); \ 174 REG_SET_SEEN(b2); \ 175 }) 176 177 #define EMIT4_RRF(op, b1, b2, b3) \ 178 ({ \ 179 _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \ 180 REG_SET_SEEN(b1); \ 181 REG_SET_SEEN(b2); \ 182 REG_SET_SEEN(b3); \ 183 }) 184 185 #define _EMIT4_DISP(op, disp) \ 186 ({ \ 187 unsigned int __disp = (disp) & 0xfff; \ 188 _EMIT4((op) | __disp); \ 189 }) 190 191 #define EMIT4_DISP(op, b1, b2, disp) \ 192 ({ \ 193 _EMIT4_DISP((op) | reg_high(b1) << 16 | \ 194 reg_high(b2) << 8, (disp)); \ 195 REG_SET_SEEN(b1); \ 196 REG_SET_SEEN(b2); \ 197 }) 198 199 #define EMIT4_IMM(op, b1, imm) \ 200 ({ \ 201 unsigned int __imm = (imm) & 0xffff; \ 202 _EMIT4((op) | reg_high(b1) << 16 | __imm); \ 203 REG_SET_SEEN(b1); \ 204 }) 205 206 #define EMIT4_PCREL(op, pcrel) \ 207 ({ \ 208 long __pcrel = ((pcrel) >> 1) & 0xffff; \ 209 _EMIT4((op) | __pcrel); \ 210 }) 211 212 #define EMIT4_PCREL_RIC(op, mask, target) \ 213 ({ \ 214 int __rel = off_to_pcrel(jit, target) / 2; \ 215 _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \ 216 }) 217 218 #define _EMIT6(op1, op2) \ 219 ({ \ 220 if (jit->prg_buf) { \ 221 *(u32 *) (jit->prg_buf + jit->prg) = (op1); \ 222 *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \ 223 } \ 224 jit->prg += 6; \ 225 }) 226 227 #define _EMIT6_DISP(op1, op2, disp) \ 228 ({ \ 229 unsigned int __disp = (disp) & 0xfff; \ 230 _EMIT6((op1) | __disp, op2); \ 231 }) 232 233 #define _EMIT6_DISP_LH(op1, op2, disp) \ 234 ({ \ 235 u32 _disp = (u32) (disp); \ 236 unsigned int __disp_h = _disp & 0xff000; \ 237 unsigned int __disp_l = _disp & 0x00fff; \ 238 _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \ 239 }) 240 241 #define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \ 242 ({ \ 243 _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \ 244 reg_high(b3) << 8, op2, disp); \ 245 REG_SET_SEEN(b1); \ 246 REG_SET_SEEN(b2); \ 247 REG_SET_SEEN(b3); \ 248 }) 249 250 #define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \ 251 ({ \ 252 unsigned int rel = off_to_pcrel(jit, target) / 2; \ 253 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \ 254 (op2) | (mask) << 12); \ 255 REG_SET_SEEN(b1); \ 256 REG_SET_SEEN(b2); \ 257 }) 258 259 #define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \ 260 ({ \ 261 unsigned int rel = off_to_pcrel(jit, target) / 2; \ 262 _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \ 263 (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \ 264 REG_SET_SEEN(b1); \ 265 BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \ 266 }) 267 268 #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ 269 ({ \ 270 int rel = off_to_pcrel(jit, addrs[(i) + (off) + 1]) / 2;\ 271 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\ 272 REG_SET_SEEN(b1); \ 273 REG_SET_SEEN(b2); \ 274 }) 275 276 static void emit6_pcrel_ril(struct bpf_jit *jit, u32 op, s64 pcrel) 277 { 278 u32 pc32dbl = (s32)(pcrel / 2); 279 280 _EMIT6(op | pc32dbl >> 16, pc32dbl & 0xffff); 281 } 282 283 static void emit6_pcrel_rilb(struct bpf_jit *jit, u32 op, u8 b, s64 pcrel) 284 { 285 emit6_pcrel_ril(jit, op | reg_high(b) << 16, pcrel); 286 REG_SET_SEEN(b); 287 } 288 289 #define EMIT6_PCREL_RILB(op, b, target) \ 290 emit6_pcrel_rilb(jit, op, b, off_to_pcrel(jit, target)) 291 292 #define EMIT6_PCREL_RILB_PTR(op, b, target_ptr) \ 293 emit6_pcrel_rilb(jit, op, b, ptr_to_pcrel(jit, target_ptr)) 294 295 static void emit6_pcrel_rilc(struct bpf_jit *jit, u32 op, u8 mask, s64 pcrel) 296 { 297 emit6_pcrel_ril(jit, op | mask << 20, pcrel); 298 } 299 300 #define EMIT6_PCREL_RILC(op, mask, target) \ 301 emit6_pcrel_rilc(jit, op, mask, off_to_pcrel(jit, target)) 302 303 #define EMIT6_PCREL_RILC_PTR(op, mask, target_ptr) \ 304 emit6_pcrel_rilc(jit, op, mask, ptr_to_pcrel(jit, target_ptr)) 305 306 #define _EMIT6_IMM(op, imm) \ 307 ({ \ 308 unsigned int __imm = (imm); \ 309 _EMIT6((op) | (__imm >> 16), __imm & 0xffff); \ 310 }) 311 312 #define EMIT6_IMM(op, b1, imm) \ 313 ({ \ 314 _EMIT6_IMM((op) | reg_high(b1) << 16, imm); \ 315 REG_SET_SEEN(b1); \ 316 }) 317 318 #define _EMIT_CONST_U32(val) \ 319 ({ \ 320 unsigned int ret; \ 321 ret = jit->lit32; \ 322 if (jit->prg_buf) \ 323 *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\ 324 jit->lit32 += 4; \ 325 ret; \ 326 }) 327 328 #define EMIT_CONST_U32(val) \ 329 ({ \ 330 jit->seen |= SEEN_LITERAL; \ 331 _EMIT_CONST_U32(val) - jit->base_ip; \ 332 }) 333 334 #define _EMIT_CONST_U64(val) \ 335 ({ \ 336 unsigned int ret; \ 337 ret = jit->lit64; \ 338 if (jit->prg_buf) \ 339 *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\ 340 jit->lit64 += 8; \ 341 ret; \ 342 }) 343 344 #define EMIT_CONST_U64(val) \ 345 ({ \ 346 jit->seen |= SEEN_LITERAL; \ 347 _EMIT_CONST_U64(val) - jit->base_ip; \ 348 }) 349 350 #define EMIT_ZERO(b1) \ 351 ({ \ 352 if (!fp->aux->verifier_zext) { \ 353 /* llgfr %dst,%dst (zero extend to 64 bit) */ \ 354 EMIT4(0xb9160000, b1, b1); \ 355 REG_SET_SEEN(b1); \ 356 } \ 357 }) 358 359 /* 360 * Return whether this is the first pass. The first pass is special, since we 361 * don't know any sizes yet, and thus must be conservative. 362 */ 363 static bool is_first_pass(struct bpf_jit *jit) 364 { 365 return jit->size == 0; 366 } 367 368 /* 369 * Return whether this is the code generation pass. The code generation pass is 370 * special, since we should change as little as possible. 371 */ 372 static bool is_codegen_pass(struct bpf_jit *jit) 373 { 374 return jit->prg_buf; 375 } 376 377 /* 378 * Return whether "rel" can be encoded as a short PC-relative offset 379 */ 380 static bool is_valid_rel(int rel) 381 { 382 return rel >= -65536 && rel <= 65534; 383 } 384 385 /* 386 * Return whether "off" can be reached using a short PC-relative offset 387 */ 388 static bool can_use_rel(struct bpf_jit *jit, int off) 389 { 390 return is_valid_rel(off - jit->prg); 391 } 392 393 /* 394 * Return whether given displacement can be encoded using 395 * Long-Displacement Facility 396 */ 397 static bool is_valid_ldisp(int disp) 398 { 399 return disp >= -524288 && disp <= 524287; 400 } 401 402 /* 403 * Return whether the next 32-bit literal pool entry can be referenced using 404 * Long-Displacement Facility 405 */ 406 static bool can_use_ldisp_for_lit32(struct bpf_jit *jit) 407 { 408 return is_valid_ldisp(jit->lit32 - jit->base_ip); 409 } 410 411 /* 412 * Return whether the next 64-bit literal pool entry can be referenced using 413 * Long-Displacement Facility 414 */ 415 static bool can_use_ldisp_for_lit64(struct bpf_jit *jit) 416 { 417 return is_valid_ldisp(jit->lit64 - jit->base_ip); 418 } 419 420 /* 421 * Fill whole space with illegal instructions 422 */ 423 static void jit_fill_hole(void *area, unsigned int size) 424 { 425 memset(area, 0, size); 426 } 427 428 /* 429 * Save registers from "rs" (register start) to "re" (register end) on stack 430 */ 431 static void save_regs(struct bpf_jit *jit, u32 rs, u32 re) 432 { 433 u32 off = STK_OFF_R6 + (rs - 6) * 8; 434 435 if (rs == re) 436 /* stg %rs,off(%r15) */ 437 _EMIT6(0xe300f000 | rs << 20 | off, 0x0024); 438 else 439 /* stmg %rs,%re,off(%r15) */ 440 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off); 441 } 442 443 /* 444 * Restore registers from "rs" (register start) to "re" (register end) on stack 445 */ 446 static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth) 447 { 448 u32 off = STK_OFF_R6 + (rs - 6) * 8; 449 450 if (jit->seen & SEEN_STACK) 451 off += STK_OFF + stack_depth; 452 453 if (rs == re) 454 /* lg %rs,off(%r15) */ 455 _EMIT6(0xe300f000 | rs << 20 | off, 0x0004); 456 else 457 /* lmg %rs,%re,off(%r15) */ 458 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off); 459 } 460 461 /* 462 * Return first seen register (from start) 463 */ 464 static int get_start(u16 seen_regs, int start) 465 { 466 int i; 467 468 for (i = start; i <= 15; i++) { 469 if (seen_regs & (1 << i)) 470 return i; 471 } 472 return 0; 473 } 474 475 /* 476 * Return last seen register (from start) (gap >= 2) 477 */ 478 static int get_end(u16 seen_regs, int start) 479 { 480 int i; 481 482 for (i = start; i < 15; i++) { 483 if (!(seen_regs & (3 << i))) 484 return i - 1; 485 } 486 return (seen_regs & (1 << 15)) ? 15 : 14; 487 } 488 489 #define REGS_SAVE 1 490 #define REGS_RESTORE 0 491 /* 492 * Save and restore clobbered registers (6-15) on stack. 493 * We save/restore registers in chunks with gap >= 2 registers. 494 */ 495 static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth, 496 u16 extra_regs) 497 { 498 u16 seen_regs = jit->seen_regs | extra_regs; 499 const int last = 15, save_restore_size = 6; 500 int re = 6, rs; 501 502 if (is_first_pass(jit)) { 503 /* 504 * We don't know yet which registers are used. Reserve space 505 * conservatively. 506 */ 507 jit->prg += (last - re + 1) * save_restore_size; 508 return; 509 } 510 511 do { 512 rs = get_start(seen_regs, re); 513 if (!rs) 514 break; 515 re = get_end(seen_regs, rs + 1); 516 if (op == REGS_SAVE) 517 save_regs(jit, rs, re); 518 else 519 restore_regs(jit, rs, re, stack_depth); 520 re++; 521 } while (re <= last); 522 } 523 524 static void bpf_skip(struct bpf_jit *jit, int size) 525 { 526 if (size >= 6 && !is_valid_rel(size)) { 527 /* brcl 0xf,size */ 528 EMIT6_PCREL_RILC(0xc0040000, 0xf, size); 529 size -= 6; 530 } else if (size >= 4 && is_valid_rel(size)) { 531 /* brc 0xf,size */ 532 EMIT4_PCREL(0xa7f40000, size); 533 size -= 4; 534 } 535 while (size >= 2) { 536 /* bcr 0,%0 */ 537 _EMIT2(0x0700); 538 size -= 2; 539 } 540 } 541 542 /* 543 * PLT for hotpatchable calls. The calling convention is the same as for the 544 * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered. 545 */ 546 struct bpf_plt { 547 char code[16]; 548 void *ret; 549 void *target; 550 } __packed; 551 extern const struct bpf_plt bpf_plt; 552 asm( 553 ".pushsection .rodata\n" 554 " .balign 8\n" 555 "bpf_plt:\n" 556 " lgrl %r0,bpf_plt_ret\n" 557 " lgrl %r1,bpf_plt_target\n" 558 " br %r1\n" 559 " .balign 8\n" 560 "bpf_plt_ret: .quad 0\n" 561 "bpf_plt_target: .quad 0\n" 562 " .popsection\n" 563 ); 564 565 static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target) 566 { 567 memcpy(plt, &bpf_plt, sizeof(*plt)); 568 plt->ret = ret; 569 /* 570 * (target == NULL) implies that the branch to this PLT entry was 571 * patched and became a no-op. However, some CPU could have jumped 572 * to this PLT entry before patching and may be still executing it. 573 * 574 * Since the intention in this case is to make the PLT entry a no-op, 575 * make the target point to the return label instead of NULL. 576 */ 577 plt->target = target ?: ret; 578 } 579 580 /* 581 * Emit function prologue 582 * 583 * Save registers and create stack frame if necessary. 584 * See stack frame layout description in "bpf_jit.h"! 585 */ 586 static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, 587 u32 stack_depth) 588 { 589 /* No-op for hotpatching */ 590 /* brcl 0,prologue_plt */ 591 EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt); 592 jit->prologue_plt_ret = jit->prg; 593 594 if (!bpf_is_subprog(fp)) { 595 /* Initialize the tail call counter in the main program. */ 596 /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */ 597 _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT); 598 } else { 599 /* 600 * Skip the tail call counter initialization in subprograms. 601 * Insert nops in order to have tail_call_start at a 602 * predictable offset. 603 */ 604 bpf_skip(jit, 6); 605 } 606 /* Tail calls have to skip above initialization */ 607 jit->tail_call_start = jit->prg; 608 if (fp->aux->exception_cb) { 609 /* 610 * Switch stack, the new address is in the 2nd parameter. 611 * 612 * Arrange the restoration of %r6-%r15 in the epilogue. 613 * Do not restore them now, the prog does not need them. 614 */ 615 /* lgr %r15,%r3 */ 616 EMIT4(0xb9040000, REG_15, REG_3); 617 jit->seen_regs |= NVREGS; 618 } else { 619 /* Save registers */ 620 save_restore_regs(jit, REGS_SAVE, stack_depth, 621 fp->aux->exception_boundary ? NVREGS : 0); 622 } 623 /* Setup literal pool */ 624 if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) { 625 if (!is_first_pass(jit) && 626 is_valid_ldisp(jit->size - (jit->prg + 2))) { 627 /* basr %l,0 */ 628 EMIT2(0x0d00, REG_L, REG_0); 629 jit->base_ip = jit->prg; 630 } else { 631 /* larl %l,lit32_start */ 632 EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start); 633 jit->base_ip = jit->lit32_start; 634 } 635 } 636 /* Setup stack and backchain */ 637 if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) { 638 /* lgr %w1,%r15 (backchain) */ 639 EMIT4(0xb9040000, REG_W1, REG_15); 640 /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */ 641 EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED); 642 /* aghi %r15,-STK_OFF */ 643 EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth)); 644 /* stg %w1,152(%r15) (backchain) */ 645 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, 646 REG_15, 152); 647 } 648 } 649 650 /* 651 * Jump using a register either directly or via an expoline thunk 652 */ 653 #define EMIT_JUMP_REG(reg) do { \ 654 if (nospec_uses_trampoline()) \ 655 /* brcl 0xf,__s390_indirect_jump_rN */ \ 656 EMIT6_PCREL_RILC_PTR(0xc0040000, 0x0f, \ 657 __s390_indirect_jump_r ## reg); \ 658 else \ 659 /* br %rN */ \ 660 _EMIT2(0x07f0 | reg); \ 661 } while (0) 662 663 /* 664 * Call r1 either directly or via __s390_indirect_jump_r1 thunk 665 */ 666 static void call_r1(struct bpf_jit *jit) 667 { 668 if (nospec_uses_trampoline()) 669 /* brasl %r14,__s390_indirect_jump_r1 */ 670 EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14, 671 __s390_indirect_jump_r1); 672 else 673 /* basr %r14,%r1 */ 674 EMIT2(0x0d00, REG_14, REG_1); 675 } 676 677 /* 678 * Function epilogue 679 */ 680 static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) 681 { 682 jit->exit_ip = jit->prg; 683 /* Load exit code: lgr %r2,%b0 */ 684 EMIT4(0xb9040000, REG_2, BPF_REG_0); 685 /* Restore registers */ 686 save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); 687 EMIT_JUMP_REG(14); 688 689 jit->prg = ALIGN(jit->prg, 8); 690 jit->prologue_plt = jit->prg; 691 if (jit->prg_buf) 692 bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg), 693 jit->prg_buf + jit->prologue_plt_ret, NULL); 694 jit->prg += sizeof(struct bpf_plt); 695 } 696 697 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 698 { 699 regs->psw.addr = extable_fixup(x); 700 if (x->data != -1) 701 regs->gprs[x->data] = 0; 702 return true; 703 } 704 705 /* 706 * A single BPF probe instruction 707 */ 708 struct bpf_jit_probe { 709 int prg; /* JITed instruction offset */ 710 int nop_prg; /* JITed nop offset */ 711 int reg; /* Register to clear on exception */ 712 int arena_reg; /* Register to use for arena addressing */ 713 }; 714 715 static void bpf_jit_probe_init(struct bpf_jit_probe *probe) 716 { 717 probe->prg = -1; 718 probe->nop_prg = -1; 719 probe->reg = -1; 720 probe->arena_reg = REG_0; 721 } 722 723 /* 724 * Handlers of certain exceptions leave psw.addr pointing to the instruction 725 * directly after the failing one. Therefore, create two exception table 726 * entries and also add a nop in case two probing instructions come directly 727 * after each other. 728 */ 729 static void bpf_jit_probe_emit_nop(struct bpf_jit *jit, 730 struct bpf_jit_probe *probe) 731 { 732 if (probe->prg == -1 || probe->nop_prg != -1) 733 /* The probe is not armed or nop is already emitted. */ 734 return; 735 736 probe->nop_prg = jit->prg; 737 /* bcr 0,%0 */ 738 _EMIT2(0x0700); 739 } 740 741 static void bpf_jit_probe_load_pre(struct bpf_jit *jit, struct bpf_insn *insn, 742 struct bpf_jit_probe *probe) 743 { 744 if (BPF_MODE(insn->code) != BPF_PROBE_MEM && 745 BPF_MODE(insn->code) != BPF_PROBE_MEMSX && 746 BPF_MODE(insn->code) != BPF_PROBE_MEM32) 747 return; 748 749 if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { 750 /* lgrl %r1,kern_arena */ 751 EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); 752 probe->arena_reg = REG_W1; 753 } 754 probe->prg = jit->prg; 755 probe->reg = reg2hex[insn->dst_reg]; 756 } 757 758 static void bpf_jit_probe_store_pre(struct bpf_jit *jit, struct bpf_insn *insn, 759 struct bpf_jit_probe *probe) 760 { 761 if (BPF_MODE(insn->code) != BPF_PROBE_MEM32) 762 return; 763 764 /* lgrl %r1,kern_arena */ 765 EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); 766 probe->arena_reg = REG_W1; 767 probe->prg = jit->prg; 768 } 769 770 static void bpf_jit_probe_atomic_pre(struct bpf_jit *jit, 771 struct bpf_insn *insn, 772 struct bpf_jit_probe *probe) 773 { 774 if (BPF_MODE(insn->code) != BPF_PROBE_ATOMIC) 775 return; 776 777 /* lgrl %r1,kern_arena */ 778 EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); 779 /* agr %r1,%dst */ 780 EMIT4(0xb9080000, REG_W1, insn->dst_reg); 781 probe->arena_reg = REG_W1; 782 probe->prg = jit->prg; 783 } 784 785 static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp, 786 struct bpf_jit_probe *probe) 787 { 788 struct exception_table_entry *ex; 789 int i, prg; 790 s64 delta; 791 u8 *insn; 792 793 if (probe->prg == -1) 794 /* The probe is not armed. */ 795 return 0; 796 bpf_jit_probe_emit_nop(jit, probe); 797 if (!fp->aux->extable) 798 /* Do nothing during early JIT passes. */ 799 return 0; 800 insn = jit->prg_buf + probe->prg; 801 if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg)) 802 /* JIT bug - gap between probe and nop instructions. */ 803 return -1; 804 for (i = 0; i < 2; i++) { 805 if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries)) 806 /* Verifier bug - not enough entries. */ 807 return -1; 808 ex = &fp->aux->extable[jit->excnt]; 809 /* Add extable entries for probe and nop instructions. */ 810 prg = i == 0 ? probe->prg : probe->nop_prg; 811 delta = jit->prg_buf + prg - (u8 *)&ex->insn; 812 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) 813 /* JIT bug - code and extable must be close. */ 814 return -1; 815 ex->insn = delta; 816 /* 817 * Land on the current instruction. Note that the extable 818 * infrastructure ignores the fixup field; it is handled by 819 * ex_handler_bpf(). 820 */ 821 delta = jit->prg_buf + jit->prg - (u8 *)&ex->fixup; 822 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) 823 /* JIT bug - landing pad and extable must be close. */ 824 return -1; 825 ex->fixup = delta; 826 ex->type = EX_TYPE_BPF; 827 ex->data = probe->reg; 828 jit->excnt++; 829 } 830 return 0; 831 } 832 833 /* 834 * Sign-extend the register if necessary 835 */ 836 static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags) 837 { 838 if (!(flags & BTF_FMODEL_SIGNED_ARG)) 839 return 0; 840 841 switch (size) { 842 case 1: 843 /* lgbr %r,%r */ 844 EMIT4(0xb9060000, r, r); 845 return 0; 846 case 2: 847 /* lghr %r,%r */ 848 EMIT4(0xb9070000, r, r); 849 return 0; 850 case 4: 851 /* lgfr %r,%r */ 852 EMIT4(0xb9140000, r, r); 853 return 0; 854 case 8: 855 return 0; 856 default: 857 return -1; 858 } 859 } 860 861 /* 862 * Compile one eBPF instruction into s390x code 863 * 864 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of 865 * stack space for the large switch statement. 866 */ 867 static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, 868 int i, bool extra_pass, u32 stack_depth) 869 { 870 struct bpf_insn *insn = &fp->insnsi[i]; 871 s32 branch_oc_off = insn->off; 872 u32 dst_reg = insn->dst_reg; 873 u32 src_reg = insn->src_reg; 874 struct bpf_jit_probe probe; 875 int last, insn_count = 1; 876 u32 *addrs = jit->addrs; 877 s32 imm = insn->imm; 878 s16 off = insn->off; 879 unsigned int mask; 880 int err; 881 882 bpf_jit_probe_init(&probe); 883 884 switch (insn->code) { 885 /* 886 * BPF_MOV 887 */ 888 case BPF_ALU | BPF_MOV | BPF_X: 889 switch (insn->off) { 890 case 0: /* DST = (u32) SRC */ 891 /* llgfr %dst,%src */ 892 EMIT4(0xb9160000, dst_reg, src_reg); 893 if (insn_is_zext(&insn[1])) 894 insn_count = 2; 895 break; 896 case 8: /* DST = (u32)(s8) SRC */ 897 /* lbr %dst,%src */ 898 EMIT4(0xb9260000, dst_reg, src_reg); 899 /* llgfr %dst,%dst */ 900 EMIT4(0xb9160000, dst_reg, dst_reg); 901 break; 902 case 16: /* DST = (u32)(s16) SRC */ 903 /* lhr %dst,%src */ 904 EMIT4(0xb9270000, dst_reg, src_reg); 905 /* llgfr %dst,%dst */ 906 EMIT4(0xb9160000, dst_reg, dst_reg); 907 break; 908 } 909 break; 910 case BPF_ALU64 | BPF_MOV | BPF_X: 911 if (insn_is_cast_user(insn)) { 912 int patch_brc; 913 914 /* ltgr %dst,%src */ 915 EMIT4(0xb9020000, dst_reg, src_reg); 916 /* brc 8,0f */ 917 patch_brc = jit->prg; 918 EMIT4_PCREL_RIC(0xa7040000, 8, 0); 919 /* iihf %dst,user_arena>>32 */ 920 EMIT6_IMM(0xc0080000, dst_reg, jit->user_arena >> 32); 921 /* 0: */ 922 if (jit->prg_buf) 923 *(u16 *)(jit->prg_buf + patch_brc + 2) = 924 (jit->prg - patch_brc) >> 1; 925 break; 926 } 927 switch (insn->off) { 928 case 0: /* DST = SRC */ 929 /* lgr %dst,%src */ 930 EMIT4(0xb9040000, dst_reg, src_reg); 931 break; 932 case 8: /* DST = (s8) SRC */ 933 /* lgbr %dst,%src */ 934 EMIT4(0xb9060000, dst_reg, src_reg); 935 break; 936 case 16: /* DST = (s16) SRC */ 937 /* lghr %dst,%src */ 938 EMIT4(0xb9070000, dst_reg, src_reg); 939 break; 940 case 32: /* DST = (s32) SRC */ 941 /* lgfr %dst,%src */ 942 EMIT4(0xb9140000, dst_reg, src_reg); 943 break; 944 } 945 break; 946 case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */ 947 /* llilf %dst,imm */ 948 EMIT6_IMM(0xc00f0000, dst_reg, imm); 949 if (insn_is_zext(&insn[1])) 950 insn_count = 2; 951 break; 952 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */ 953 /* lgfi %dst,imm */ 954 EMIT6_IMM(0xc0010000, dst_reg, imm); 955 break; 956 /* 957 * BPF_LD 64 958 */ 959 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ 960 { 961 /* 16 byte instruction that uses two 'struct bpf_insn' */ 962 u64 imm64; 963 964 imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32; 965 /* lgrl %dst,imm */ 966 EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64)); 967 insn_count = 2; 968 break; 969 } 970 /* 971 * BPF_ADD 972 */ 973 case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */ 974 /* ar %dst,%src */ 975 EMIT2(0x1a00, dst_reg, src_reg); 976 EMIT_ZERO(dst_reg); 977 break; 978 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */ 979 /* agr %dst,%src */ 980 EMIT4(0xb9080000, dst_reg, src_reg); 981 break; 982 case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */ 983 if (imm != 0) { 984 /* alfi %dst,imm */ 985 EMIT6_IMM(0xc20b0000, dst_reg, imm); 986 } 987 EMIT_ZERO(dst_reg); 988 break; 989 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */ 990 if (!imm) 991 break; 992 /* agfi %dst,imm */ 993 EMIT6_IMM(0xc2080000, dst_reg, imm); 994 break; 995 /* 996 * BPF_SUB 997 */ 998 case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */ 999 /* sr %dst,%src */ 1000 EMIT2(0x1b00, dst_reg, src_reg); 1001 EMIT_ZERO(dst_reg); 1002 break; 1003 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */ 1004 /* sgr %dst,%src */ 1005 EMIT4(0xb9090000, dst_reg, src_reg); 1006 break; 1007 case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */ 1008 if (imm != 0) { 1009 /* alfi %dst,-imm */ 1010 EMIT6_IMM(0xc20b0000, dst_reg, -imm); 1011 } 1012 EMIT_ZERO(dst_reg); 1013 break; 1014 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */ 1015 if (!imm) 1016 break; 1017 if (imm == -0x80000000) { 1018 /* algfi %dst,0x80000000 */ 1019 EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000); 1020 } else { 1021 /* agfi %dst,-imm */ 1022 EMIT6_IMM(0xc2080000, dst_reg, -imm); 1023 } 1024 break; 1025 /* 1026 * BPF_MUL 1027 */ 1028 case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */ 1029 /* msr %dst,%src */ 1030 EMIT4(0xb2520000, dst_reg, src_reg); 1031 EMIT_ZERO(dst_reg); 1032 break; 1033 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */ 1034 /* msgr %dst,%src */ 1035 EMIT4(0xb90c0000, dst_reg, src_reg); 1036 break; 1037 case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */ 1038 if (imm != 1) { 1039 /* msfi %r5,imm */ 1040 EMIT6_IMM(0xc2010000, dst_reg, imm); 1041 } 1042 EMIT_ZERO(dst_reg); 1043 break; 1044 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */ 1045 if (imm == 1) 1046 break; 1047 /* msgfi %dst,imm */ 1048 EMIT6_IMM(0xc2000000, dst_reg, imm); 1049 break; 1050 /* 1051 * BPF_DIV / BPF_MOD 1052 */ 1053 case BPF_ALU | BPF_DIV | BPF_X: 1054 case BPF_ALU | BPF_MOD | BPF_X: 1055 { 1056 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 1057 1058 switch (off) { 1059 case 0: /* dst = (u32) dst {/,%} (u32) src */ 1060 /* xr %w0,%w0 */ 1061 EMIT2(0x1700, REG_W0, REG_W0); 1062 /* lr %w1,%dst */ 1063 EMIT2(0x1800, REG_W1, dst_reg); 1064 /* dlr %w0,%src */ 1065 EMIT4(0xb9970000, REG_W0, src_reg); 1066 break; 1067 case 1: /* dst = (u32) ((s32) dst {/,%} (s32) src) */ 1068 /* lgfr %r1,%dst */ 1069 EMIT4(0xb9140000, REG_W1, dst_reg); 1070 /* dsgfr %r0,%src */ 1071 EMIT4(0xb91d0000, REG_W0, src_reg); 1072 break; 1073 } 1074 /* llgfr %dst,%rc */ 1075 EMIT4(0xb9160000, dst_reg, rc_reg); 1076 if (insn_is_zext(&insn[1])) 1077 insn_count = 2; 1078 break; 1079 } 1080 case BPF_ALU64 | BPF_DIV | BPF_X: 1081 case BPF_ALU64 | BPF_MOD | BPF_X: 1082 { 1083 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 1084 1085 switch (off) { 1086 case 0: /* dst = dst {/,%} src */ 1087 /* lghi %w0,0 */ 1088 EMIT4_IMM(0xa7090000, REG_W0, 0); 1089 /* lgr %w1,%dst */ 1090 EMIT4(0xb9040000, REG_W1, dst_reg); 1091 /* dlgr %w0,%src */ 1092 EMIT4(0xb9870000, REG_W0, src_reg); 1093 break; 1094 case 1: /* dst = (s64) dst {/,%} (s64) src */ 1095 /* lgr %w1,%dst */ 1096 EMIT4(0xb9040000, REG_W1, dst_reg); 1097 /* dsgr %w0,%src */ 1098 EMIT4(0xb90d0000, REG_W0, src_reg); 1099 break; 1100 } 1101 /* lgr %dst,%rc */ 1102 EMIT4(0xb9040000, dst_reg, rc_reg); 1103 break; 1104 } 1105 case BPF_ALU | BPF_DIV | BPF_K: 1106 case BPF_ALU | BPF_MOD | BPF_K: 1107 { 1108 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 1109 1110 if (imm == 1) { 1111 if (BPF_OP(insn->code) == BPF_MOD) 1112 /* lghi %dst,0 */ 1113 EMIT4_IMM(0xa7090000, dst_reg, 0); 1114 else 1115 EMIT_ZERO(dst_reg); 1116 break; 1117 } 1118 if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) { 1119 switch (off) { 1120 case 0: /* dst = (u32) dst {/,%} (u32) imm */ 1121 /* xr %w0,%w0 */ 1122 EMIT2(0x1700, REG_W0, REG_W0); 1123 /* lr %w1,%dst */ 1124 EMIT2(0x1800, REG_W1, dst_reg); 1125 /* dl %w0,<d(imm)>(%l) */ 1126 EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, 1127 REG_L, EMIT_CONST_U32(imm)); 1128 break; 1129 case 1: /* dst = (s32) dst {/,%} (s32) imm */ 1130 /* lgfr %r1,%dst */ 1131 EMIT4(0xb9140000, REG_W1, dst_reg); 1132 /* dsgf %r0,<d(imm)>(%l) */ 1133 EMIT6_DISP_LH(0xe3000000, 0x001d, REG_W0, REG_0, 1134 REG_L, EMIT_CONST_U32(imm)); 1135 break; 1136 } 1137 } else { 1138 switch (off) { 1139 case 0: /* dst = (u32) dst {/,%} (u32) imm */ 1140 /* xr %w0,%w0 */ 1141 EMIT2(0x1700, REG_W0, REG_W0); 1142 /* lr %w1,%dst */ 1143 EMIT2(0x1800, REG_W1, dst_reg); 1144 /* lrl %dst,imm */ 1145 EMIT6_PCREL_RILB(0xc40d0000, dst_reg, 1146 _EMIT_CONST_U32(imm)); 1147 jit->seen |= SEEN_LITERAL; 1148 /* dlr %w0,%dst */ 1149 EMIT4(0xb9970000, REG_W0, dst_reg); 1150 break; 1151 case 1: /* dst = (s32) dst {/,%} (s32) imm */ 1152 /* lgfr %w1,%dst */ 1153 EMIT4(0xb9140000, REG_W1, dst_reg); 1154 /* lgfrl %dst,imm */ 1155 EMIT6_PCREL_RILB(0xc40c0000, dst_reg, 1156 _EMIT_CONST_U32(imm)); 1157 jit->seen |= SEEN_LITERAL; 1158 /* dsgr %w0,%dst */ 1159 EMIT4(0xb90d0000, REG_W0, dst_reg); 1160 break; 1161 } 1162 } 1163 /* llgfr %dst,%rc */ 1164 EMIT4(0xb9160000, dst_reg, rc_reg); 1165 if (insn_is_zext(&insn[1])) 1166 insn_count = 2; 1167 break; 1168 } 1169 case BPF_ALU64 | BPF_DIV | BPF_K: 1170 case BPF_ALU64 | BPF_MOD | BPF_K: 1171 { 1172 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 1173 1174 if (imm == 1) { 1175 if (BPF_OP(insn->code) == BPF_MOD) 1176 /* lhgi %dst,0 */ 1177 EMIT4_IMM(0xa7090000, dst_reg, 0); 1178 break; 1179 } 1180 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { 1181 switch (off) { 1182 case 0: /* dst = dst {/,%} imm */ 1183 /* lghi %w0,0 */ 1184 EMIT4_IMM(0xa7090000, REG_W0, 0); 1185 /* lgr %w1,%dst */ 1186 EMIT4(0xb9040000, REG_W1, dst_reg); 1187 /* dlg %w0,<d(imm)>(%l) */ 1188 EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, 1189 REG_L, EMIT_CONST_U64(imm)); 1190 break; 1191 case 1: /* dst = (s64) dst {/,%} (s64) imm */ 1192 /* lgr %w1,%dst */ 1193 EMIT4(0xb9040000, REG_W1, dst_reg); 1194 /* dsg %w0,<d(imm)>(%l) */ 1195 EMIT6_DISP_LH(0xe3000000, 0x000d, REG_W0, REG_0, 1196 REG_L, EMIT_CONST_U64(imm)); 1197 break; 1198 } 1199 } else { 1200 switch (off) { 1201 case 0: /* dst = dst {/,%} imm */ 1202 /* lghi %w0,0 */ 1203 EMIT4_IMM(0xa7090000, REG_W0, 0); 1204 /* lgr %w1,%dst */ 1205 EMIT4(0xb9040000, REG_W1, dst_reg); 1206 /* lgrl %dst,imm */ 1207 EMIT6_PCREL_RILB(0xc4080000, dst_reg, 1208 _EMIT_CONST_U64(imm)); 1209 jit->seen |= SEEN_LITERAL; 1210 /* dlgr %w0,%dst */ 1211 EMIT4(0xb9870000, REG_W0, dst_reg); 1212 break; 1213 case 1: /* dst = (s64) dst {/,%} (s64) imm */ 1214 /* lgr %w1,%dst */ 1215 EMIT4(0xb9040000, REG_W1, dst_reg); 1216 /* lgrl %dst,imm */ 1217 EMIT6_PCREL_RILB(0xc4080000, dst_reg, 1218 _EMIT_CONST_U64(imm)); 1219 jit->seen |= SEEN_LITERAL; 1220 /* dsgr %w0,%dst */ 1221 EMIT4(0xb90d0000, REG_W0, dst_reg); 1222 break; 1223 } 1224 } 1225 /* lgr %dst,%rc */ 1226 EMIT4(0xb9040000, dst_reg, rc_reg); 1227 break; 1228 } 1229 /* 1230 * BPF_AND 1231 */ 1232 case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */ 1233 /* nr %dst,%src */ 1234 EMIT2(0x1400, dst_reg, src_reg); 1235 EMIT_ZERO(dst_reg); 1236 break; 1237 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ 1238 /* ngr %dst,%src */ 1239 EMIT4(0xb9800000, dst_reg, src_reg); 1240 break; 1241 case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */ 1242 /* nilf %dst,imm */ 1243 EMIT6_IMM(0xc00b0000, dst_reg, imm); 1244 EMIT_ZERO(dst_reg); 1245 break; 1246 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ 1247 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { 1248 /* ng %dst,<d(imm)>(%l) */ 1249 EMIT6_DISP_LH(0xe3000000, 0x0080, 1250 dst_reg, REG_0, REG_L, 1251 EMIT_CONST_U64(imm)); 1252 } else { 1253 /* lgrl %w0,imm */ 1254 EMIT6_PCREL_RILB(0xc4080000, REG_W0, 1255 _EMIT_CONST_U64(imm)); 1256 jit->seen |= SEEN_LITERAL; 1257 /* ngr %dst,%w0 */ 1258 EMIT4(0xb9800000, dst_reg, REG_W0); 1259 } 1260 break; 1261 /* 1262 * BPF_OR 1263 */ 1264 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ 1265 /* or %dst,%src */ 1266 EMIT2(0x1600, dst_reg, src_reg); 1267 EMIT_ZERO(dst_reg); 1268 break; 1269 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ 1270 /* ogr %dst,%src */ 1271 EMIT4(0xb9810000, dst_reg, src_reg); 1272 break; 1273 case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */ 1274 /* oilf %dst,imm */ 1275 EMIT6_IMM(0xc00d0000, dst_reg, imm); 1276 EMIT_ZERO(dst_reg); 1277 break; 1278 case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */ 1279 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { 1280 /* og %dst,<d(imm)>(%l) */ 1281 EMIT6_DISP_LH(0xe3000000, 0x0081, 1282 dst_reg, REG_0, REG_L, 1283 EMIT_CONST_U64(imm)); 1284 } else { 1285 /* lgrl %w0,imm */ 1286 EMIT6_PCREL_RILB(0xc4080000, REG_W0, 1287 _EMIT_CONST_U64(imm)); 1288 jit->seen |= SEEN_LITERAL; 1289 /* ogr %dst,%w0 */ 1290 EMIT4(0xb9810000, dst_reg, REG_W0); 1291 } 1292 break; 1293 /* 1294 * BPF_XOR 1295 */ 1296 case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */ 1297 /* xr %dst,%src */ 1298 EMIT2(0x1700, dst_reg, src_reg); 1299 EMIT_ZERO(dst_reg); 1300 break; 1301 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */ 1302 /* xgr %dst,%src */ 1303 EMIT4(0xb9820000, dst_reg, src_reg); 1304 break; 1305 case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */ 1306 if (imm != 0) { 1307 /* xilf %dst,imm */ 1308 EMIT6_IMM(0xc0070000, dst_reg, imm); 1309 } 1310 EMIT_ZERO(dst_reg); 1311 break; 1312 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */ 1313 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { 1314 /* xg %dst,<d(imm)>(%l) */ 1315 EMIT6_DISP_LH(0xe3000000, 0x0082, 1316 dst_reg, REG_0, REG_L, 1317 EMIT_CONST_U64(imm)); 1318 } else { 1319 /* lgrl %w0,imm */ 1320 EMIT6_PCREL_RILB(0xc4080000, REG_W0, 1321 _EMIT_CONST_U64(imm)); 1322 jit->seen |= SEEN_LITERAL; 1323 /* xgr %dst,%w0 */ 1324 EMIT4(0xb9820000, dst_reg, REG_W0); 1325 } 1326 break; 1327 /* 1328 * BPF_LSH 1329 */ 1330 case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */ 1331 /* sll %dst,0(%src) */ 1332 EMIT4_DISP(0x89000000, dst_reg, src_reg, 0); 1333 EMIT_ZERO(dst_reg); 1334 break; 1335 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */ 1336 /* sllg %dst,%dst,0(%src) */ 1337 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0); 1338 break; 1339 case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */ 1340 if (imm != 0) { 1341 /* sll %dst,imm(%r0) */ 1342 EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); 1343 } 1344 EMIT_ZERO(dst_reg); 1345 break; 1346 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */ 1347 if (imm == 0) 1348 break; 1349 /* sllg %dst,%dst,imm(%r0) */ 1350 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm); 1351 break; 1352 /* 1353 * BPF_RSH 1354 */ 1355 case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */ 1356 /* srl %dst,0(%src) */ 1357 EMIT4_DISP(0x88000000, dst_reg, src_reg, 0); 1358 EMIT_ZERO(dst_reg); 1359 break; 1360 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */ 1361 /* srlg %dst,%dst,0(%src) */ 1362 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0); 1363 break; 1364 case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */ 1365 if (imm != 0) { 1366 /* srl %dst,imm(%r0) */ 1367 EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); 1368 } 1369 EMIT_ZERO(dst_reg); 1370 break; 1371 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */ 1372 if (imm == 0) 1373 break; 1374 /* srlg %dst,%dst,imm(%r0) */ 1375 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm); 1376 break; 1377 /* 1378 * BPF_ARSH 1379 */ 1380 case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */ 1381 /* sra %dst,%dst,0(%src) */ 1382 EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0); 1383 EMIT_ZERO(dst_reg); 1384 break; 1385 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */ 1386 /* srag %dst,%dst,0(%src) */ 1387 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0); 1388 break; 1389 case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */ 1390 if (imm != 0) { 1391 /* sra %dst,imm(%r0) */ 1392 EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm); 1393 } 1394 EMIT_ZERO(dst_reg); 1395 break; 1396 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */ 1397 if (imm == 0) 1398 break; 1399 /* srag %dst,%dst,imm(%r0) */ 1400 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm); 1401 break; 1402 /* 1403 * BPF_NEG 1404 */ 1405 case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */ 1406 /* lcr %dst,%dst */ 1407 EMIT2(0x1300, dst_reg, dst_reg); 1408 EMIT_ZERO(dst_reg); 1409 break; 1410 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 1411 /* lcgr %dst,%dst */ 1412 EMIT4(0xb9030000, dst_reg, dst_reg); 1413 break; 1414 /* 1415 * BPF_FROM_BE/LE 1416 */ 1417 case BPF_ALU | BPF_END | BPF_FROM_BE: 1418 /* s390 is big endian, therefore only clear high order bytes */ 1419 switch (imm) { 1420 case 16: /* dst = (u16) cpu_to_be16(dst) */ 1421 /* llghr %dst,%dst */ 1422 EMIT4(0xb9850000, dst_reg, dst_reg); 1423 if (insn_is_zext(&insn[1])) 1424 insn_count = 2; 1425 break; 1426 case 32: /* dst = (u32) cpu_to_be32(dst) */ 1427 if (!fp->aux->verifier_zext) 1428 /* llgfr %dst,%dst */ 1429 EMIT4(0xb9160000, dst_reg, dst_reg); 1430 break; 1431 case 64: /* dst = (u64) cpu_to_be64(dst) */ 1432 break; 1433 } 1434 break; 1435 case BPF_ALU | BPF_END | BPF_FROM_LE: 1436 case BPF_ALU64 | BPF_END | BPF_FROM_LE: 1437 switch (imm) { 1438 case 16: /* dst = (u16) cpu_to_le16(dst) */ 1439 /* lrvr %dst,%dst */ 1440 EMIT4(0xb91f0000, dst_reg, dst_reg); 1441 /* srl %dst,16(%r0) */ 1442 EMIT4_DISP(0x88000000, dst_reg, REG_0, 16); 1443 /* llghr %dst,%dst */ 1444 EMIT4(0xb9850000, dst_reg, dst_reg); 1445 if (insn_is_zext(&insn[1])) 1446 insn_count = 2; 1447 break; 1448 case 32: /* dst = (u32) cpu_to_le32(dst) */ 1449 /* lrvr %dst,%dst */ 1450 EMIT4(0xb91f0000, dst_reg, dst_reg); 1451 if (!fp->aux->verifier_zext) 1452 /* llgfr %dst,%dst */ 1453 EMIT4(0xb9160000, dst_reg, dst_reg); 1454 break; 1455 case 64: /* dst = (u64) cpu_to_le64(dst) */ 1456 /* lrvgr %dst,%dst */ 1457 EMIT4(0xb90f0000, dst_reg, dst_reg); 1458 break; 1459 } 1460 break; 1461 /* 1462 * BPF_NOSPEC (speculation barrier) 1463 */ 1464 case BPF_ST | BPF_NOSPEC: 1465 break; 1466 /* 1467 * BPF_ST(X) 1468 */ 1469 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */ 1470 case BPF_STX | BPF_PROBE_MEM32 | BPF_B: 1471 bpf_jit_probe_store_pre(jit, insn, &probe); 1472 /* stcy %src,off(%dst,%arena) */ 1473 EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, 1474 probe.arena_reg, off); 1475 err = bpf_jit_probe_post(jit, fp, &probe); 1476 if (err < 0) 1477 return err; 1478 jit->seen |= SEEN_MEM; 1479 break; 1480 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ 1481 case BPF_STX | BPF_PROBE_MEM32 | BPF_H: 1482 bpf_jit_probe_store_pre(jit, insn, &probe); 1483 /* sthy %src,off(%dst,%arena) */ 1484 EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, 1485 probe.arena_reg, off); 1486 err = bpf_jit_probe_post(jit, fp, &probe); 1487 if (err < 0) 1488 return err; 1489 jit->seen |= SEEN_MEM; 1490 break; 1491 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ 1492 case BPF_STX | BPF_PROBE_MEM32 | BPF_W: 1493 bpf_jit_probe_store_pre(jit, insn, &probe); 1494 /* sty %src,off(%dst,%arena) */ 1495 EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, 1496 probe.arena_reg, off); 1497 err = bpf_jit_probe_post(jit, fp, &probe); 1498 if (err < 0) 1499 return err; 1500 jit->seen |= SEEN_MEM; 1501 break; 1502 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ 1503 case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: 1504 bpf_jit_probe_store_pre(jit, insn, &probe); 1505 /* stg %src,off(%dst,%arena) */ 1506 EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, 1507 probe.arena_reg, off); 1508 err = bpf_jit_probe_post(jit, fp, &probe); 1509 if (err < 0) 1510 return err; 1511 jit->seen |= SEEN_MEM; 1512 break; 1513 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ 1514 case BPF_ST | BPF_PROBE_MEM32 | BPF_B: 1515 /* lhi %w0,imm */ 1516 EMIT4_IMM(0xa7080000, REG_W0, (u8) imm); 1517 bpf_jit_probe_store_pre(jit, insn, &probe); 1518 /* stcy %w0,off(%dst,%arena) */ 1519 EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, 1520 probe.arena_reg, off); 1521 err = bpf_jit_probe_post(jit, fp, &probe); 1522 if (err < 0) 1523 return err; 1524 jit->seen |= SEEN_MEM; 1525 break; 1526 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ 1527 case BPF_ST | BPF_PROBE_MEM32 | BPF_H: 1528 /* lhi %w0,imm */ 1529 EMIT4_IMM(0xa7080000, REG_W0, (u16) imm); 1530 bpf_jit_probe_store_pre(jit, insn, &probe); 1531 /* sthy %w0,off(%dst,%arena) */ 1532 EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, 1533 probe.arena_reg, off); 1534 err = bpf_jit_probe_post(jit, fp, &probe); 1535 if (err < 0) 1536 return err; 1537 jit->seen |= SEEN_MEM; 1538 break; 1539 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ 1540 case BPF_ST | BPF_PROBE_MEM32 | BPF_W: 1541 /* llilf %w0,imm */ 1542 EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm); 1543 bpf_jit_probe_store_pre(jit, insn, &probe); 1544 /* sty %w0,off(%dst,%arena) */ 1545 EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, 1546 probe.arena_reg, off); 1547 err = bpf_jit_probe_post(jit, fp, &probe); 1548 if (err < 0) 1549 return err; 1550 jit->seen |= SEEN_MEM; 1551 break; 1552 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ 1553 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: 1554 /* lgfi %w0,imm */ 1555 EMIT6_IMM(0xc0010000, REG_W0, imm); 1556 bpf_jit_probe_store_pre(jit, insn, &probe); 1557 /* stg %w0,off(%dst,%arena) */ 1558 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, 1559 probe.arena_reg, off); 1560 err = bpf_jit_probe_post(jit, fp, &probe); 1561 if (err < 0) 1562 return err; 1563 jit->seen |= SEEN_MEM; 1564 break; 1565 /* 1566 * BPF_ATOMIC 1567 */ 1568 case BPF_STX | BPF_ATOMIC | BPF_DW: 1569 case BPF_STX | BPF_ATOMIC | BPF_W: 1570 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: 1571 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: 1572 { 1573 bool is32 = BPF_SIZE(insn->code) == BPF_W; 1574 1575 /* 1576 * Unlike loads and stores, atomics have only a base register, 1577 * but no index register. For the non-arena case, simply use 1578 * %dst as a base. For the arena case, use the work register 1579 * %r1: first, load the arena base into it, and then add %dst 1580 * to it. 1581 */ 1582 probe.arena_reg = dst_reg; 1583 1584 switch (insn->imm) { 1585 #define EMIT_ATOMIC(op32, op64) do { \ 1586 bpf_jit_probe_atomic_pre(jit, insn, &probe); \ 1587 /* {op32|op64} {%w0|%src},%src,off(%arena) */ \ 1588 EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \ 1589 (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \ 1590 src_reg, probe.arena_reg, off); \ 1591 err = bpf_jit_probe_post(jit, fp, &probe); \ 1592 if (err < 0) \ 1593 return err; \ 1594 if (insn->imm & BPF_FETCH) { \ 1595 /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \ 1596 _EMIT2(0x07e0); \ 1597 if (is32) \ 1598 EMIT_ZERO(src_reg); \ 1599 } \ 1600 } while (0) 1601 case BPF_ADD: 1602 case BPF_ADD | BPF_FETCH: 1603 /* {laal|laalg} */ 1604 EMIT_ATOMIC(0x00fa, 0x00ea); 1605 break; 1606 case BPF_AND: 1607 case BPF_AND | BPF_FETCH: 1608 /* {lan|lang} */ 1609 EMIT_ATOMIC(0x00f4, 0x00e4); 1610 break; 1611 case BPF_OR: 1612 case BPF_OR | BPF_FETCH: 1613 /* {lao|laog} */ 1614 EMIT_ATOMIC(0x00f6, 0x00e6); 1615 break; 1616 case BPF_XOR: 1617 case BPF_XOR | BPF_FETCH: 1618 /* {lax|laxg} */ 1619 EMIT_ATOMIC(0x00f7, 0x00e7); 1620 break; 1621 #undef EMIT_ATOMIC 1622 case BPF_XCHG: { 1623 struct bpf_jit_probe load_probe = probe; 1624 int loop_start; 1625 1626 bpf_jit_probe_atomic_pre(jit, insn, &load_probe); 1627 /* {ly|lg} %w0,off(%arena) */ 1628 EMIT6_DISP_LH(0xe3000000, 1629 is32 ? 0x0058 : 0x0004, REG_W0, REG_0, 1630 load_probe.arena_reg, off); 1631 bpf_jit_probe_emit_nop(jit, &load_probe); 1632 /* Reuse {ly|lg}'s arena_reg for {csy|csg}. */ 1633 if (load_probe.prg != -1) { 1634 probe.prg = jit->prg; 1635 probe.arena_reg = load_probe.arena_reg; 1636 } 1637 loop_start = jit->prg; 1638 /* 0: {csy|csg} %w0,%src,off(%arena) */ 1639 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, 1640 REG_W0, src_reg, probe.arena_reg, off); 1641 bpf_jit_probe_emit_nop(jit, &probe); 1642 /* brc 4,0b */ 1643 EMIT4_PCREL_RIC(0xa7040000, 4, loop_start); 1644 /* {llgfr|lgr} %src,%w0 */ 1645 EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0); 1646 /* Both probes should land here on exception. */ 1647 err = bpf_jit_probe_post(jit, fp, &load_probe); 1648 if (err < 0) 1649 return err; 1650 err = bpf_jit_probe_post(jit, fp, &probe); 1651 if (err < 0) 1652 return err; 1653 if (is32 && insn_is_zext(&insn[1])) 1654 insn_count = 2; 1655 break; 1656 } 1657 case BPF_CMPXCHG: 1658 bpf_jit_probe_atomic_pre(jit, insn, &probe); 1659 /* 0: {csy|csg} %b0,%src,off(%arena) */ 1660 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, 1661 BPF_REG_0, src_reg, 1662 probe.arena_reg, off); 1663 err = bpf_jit_probe_post(jit, fp, &probe); 1664 if (err < 0) 1665 return err; 1666 break; 1667 default: 1668 pr_err("Unknown atomic operation %02x\n", insn->imm); 1669 return -1; 1670 } 1671 1672 jit->seen |= SEEN_MEM; 1673 break; 1674 } 1675 /* 1676 * BPF_LDX 1677 */ 1678 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ 1679 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1680 case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: 1681 bpf_jit_probe_load_pre(jit, insn, &probe); 1682 /* llgc %dst,off(%src,%arena) */ 1683 EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, 1684 probe.arena_reg, off); 1685 err = bpf_jit_probe_post(jit, fp, &probe); 1686 if (err < 0) 1687 return err; 1688 jit->seen |= SEEN_MEM; 1689 if (insn_is_zext(&insn[1])) 1690 insn_count = 2; 1691 break; 1692 case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */ 1693 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: 1694 bpf_jit_probe_load_pre(jit, insn, &probe); 1695 /* lgb %dst,off(%src) */ 1696 EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off); 1697 err = bpf_jit_probe_post(jit, fp, &probe); 1698 if (err < 0) 1699 return err; 1700 jit->seen |= SEEN_MEM; 1701 break; 1702 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ 1703 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1704 case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: 1705 bpf_jit_probe_load_pre(jit, insn, &probe); 1706 /* llgh %dst,off(%src,%arena) */ 1707 EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, 1708 probe.arena_reg, off); 1709 err = bpf_jit_probe_post(jit, fp, &probe); 1710 if (err < 0) 1711 return err; 1712 jit->seen |= SEEN_MEM; 1713 if (insn_is_zext(&insn[1])) 1714 insn_count = 2; 1715 break; 1716 case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */ 1717 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: 1718 bpf_jit_probe_load_pre(jit, insn, &probe); 1719 /* lgh %dst,off(%src) */ 1720 EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off); 1721 err = bpf_jit_probe_post(jit, fp, &probe); 1722 if (err < 0) 1723 return err; 1724 jit->seen |= SEEN_MEM; 1725 break; 1726 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ 1727 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1728 case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: 1729 bpf_jit_probe_load_pre(jit, insn, &probe); 1730 /* llgf %dst,off(%src) */ 1731 jit->seen |= SEEN_MEM; 1732 EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, 1733 probe.arena_reg, off); 1734 err = bpf_jit_probe_post(jit, fp, &probe); 1735 if (err < 0) 1736 return err; 1737 if (insn_is_zext(&insn[1])) 1738 insn_count = 2; 1739 break; 1740 case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */ 1741 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: 1742 bpf_jit_probe_load_pre(jit, insn, &probe); 1743 /* lgf %dst,off(%src) */ 1744 jit->seen |= SEEN_MEM; 1745 EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off); 1746 err = bpf_jit_probe_post(jit, fp, &probe); 1747 if (err < 0) 1748 return err; 1749 break; 1750 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ 1751 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1752 case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: 1753 bpf_jit_probe_load_pre(jit, insn, &probe); 1754 /* lg %dst,off(%src,%arena) */ 1755 jit->seen |= SEEN_MEM; 1756 EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, 1757 probe.arena_reg, off); 1758 err = bpf_jit_probe_post(jit, fp, &probe); 1759 if (err < 0) 1760 return err; 1761 break; 1762 /* 1763 * BPF_JMP / CALL 1764 */ 1765 case BPF_JMP | BPF_CALL: 1766 { 1767 const struct btf_func_model *m; 1768 bool func_addr_fixed; 1769 int j, ret; 1770 u64 func; 1771 1772 ret = bpf_jit_get_func_addr(fp, insn, extra_pass, 1773 &func, &func_addr_fixed); 1774 if (ret < 0) 1775 return -1; 1776 1777 REG_SET_SEEN(BPF_REG_5); 1778 jit->seen |= SEEN_FUNC; 1779 /* 1780 * Copy the tail call counter to where the callee expects it. 1781 * 1782 * Note 1: The callee can increment the tail call counter, but 1783 * we do not load it back, since the x86 JIT does not do this 1784 * either. 1785 * 1786 * Note 2: We assume that the verifier does not let us call the 1787 * main program, which clears the tail call counter on entry. 1788 */ 1789 /* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */ 1790 _EMIT6(0xd203f000 | STK_OFF_TCCNT, 1791 0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth)); 1792 1793 /* Sign-extend the kfunc arguments. */ 1794 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 1795 m = bpf_jit_find_kfunc_model(fp, insn); 1796 if (!m) 1797 return -1; 1798 1799 for (j = 0; j < m->nr_args; j++) { 1800 if (sign_extend(jit, BPF_REG_1 + j, 1801 m->arg_size[j], 1802 m->arg_flags[j])) 1803 return -1; 1804 } 1805 } 1806 1807 /* lgrl %w1,func */ 1808 EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func)); 1809 /* %r1() */ 1810 call_r1(jit); 1811 /* lgr %b0,%r2: load return value into %b0 */ 1812 EMIT4(0xb9040000, BPF_REG_0, REG_2); 1813 break; 1814 } 1815 case BPF_JMP | BPF_TAIL_CALL: { 1816 int patch_1_clrj, patch_2_clij, patch_3_brc; 1817 1818 /* 1819 * Implicit input: 1820 * B1: pointer to ctx 1821 * B2: pointer to bpf_array 1822 * B3: index in bpf_array 1823 * 1824 * if (index >= array->map.max_entries) 1825 * goto out; 1826 */ 1827 1828 /* llgf %w1,map.max_entries(%b2) */ 1829 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, 1830 offsetof(struct bpf_array, map.max_entries)); 1831 /* if ((u32)%b3 >= (u32)%w1) goto out; */ 1832 /* clrj %b3,%w1,0xa,out */ 1833 patch_1_clrj = jit->prg; 1834 EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa, 1835 jit->prg); 1836 1837 /* 1838 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 1839 * goto out; 1840 */ 1841 1842 if (jit->seen & SEEN_STACK) 1843 off = STK_OFF_TCCNT + STK_OFF + stack_depth; 1844 else 1845 off = STK_OFF_TCCNT; 1846 /* lhi %w0,1 */ 1847 EMIT4_IMM(0xa7080000, REG_W0, 1); 1848 /* laal %w1,%w0,off(%r15) */ 1849 EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off); 1850 /* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */ 1851 patch_2_clij = jit->prg; 1852 EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1, 1853 2, jit->prg); 1854 1855 /* 1856 * prog = array->ptrs[index]; 1857 * if (prog == NULL) 1858 * goto out; 1859 */ 1860 1861 /* llgfr %r1,%b3: %r1 = (u32) index */ 1862 EMIT4(0xb9160000, REG_1, BPF_REG_3); 1863 /* sllg %r1,%r1,3: %r1 *= 8 */ 1864 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3); 1865 /* ltg %r1,prog(%b2,%r1) */ 1866 EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2, 1867 REG_1, offsetof(struct bpf_array, ptrs)); 1868 /* brc 0x8,out */ 1869 patch_3_brc = jit->prg; 1870 EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg); 1871 1872 /* 1873 * Restore registers before calling function 1874 */ 1875 save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); 1876 1877 /* 1878 * goto *(prog->bpf_func + tail_call_start); 1879 */ 1880 1881 /* lg %r1,bpf_func(%r1) */ 1882 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0, 1883 offsetof(struct bpf_prog, bpf_func)); 1884 if (nospec_uses_trampoline()) { 1885 jit->seen |= SEEN_FUNC; 1886 /* aghi %r1,tail_call_start */ 1887 EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start); 1888 /* brcl 0xf,__s390_indirect_jump_r1 */ 1889 EMIT6_PCREL_RILC_PTR(0xc0040000, 0xf, 1890 __s390_indirect_jump_r1); 1891 } else { 1892 /* bc 0xf,tail_call_start(%r1) */ 1893 _EMIT4(0x47f01000 + jit->tail_call_start); 1894 } 1895 /* out: */ 1896 if (jit->prg_buf) { 1897 *(u16 *)(jit->prg_buf + patch_1_clrj + 2) = 1898 (jit->prg - patch_1_clrj) >> 1; 1899 *(u16 *)(jit->prg_buf + patch_2_clij + 2) = 1900 (jit->prg - patch_2_clij) >> 1; 1901 *(u16 *)(jit->prg_buf + patch_3_brc + 2) = 1902 (jit->prg - patch_3_brc) >> 1; 1903 } 1904 break; 1905 } 1906 case BPF_JMP | BPF_EXIT: /* return b0 */ 1907 last = (i == fp->len - 1) ? 1 : 0; 1908 if (last) 1909 break; 1910 if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip)) 1911 /* brc 0xf, <exit> */ 1912 EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip); 1913 else 1914 /* brcl 0xf, <exit> */ 1915 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip); 1916 break; 1917 /* 1918 * Branch relative (number of skipped instructions) to offset on 1919 * condition. 1920 * 1921 * Condition code to mask mapping: 1922 * 1923 * CC | Description | Mask 1924 * ------------------------------ 1925 * 0 | Operands equal | 8 1926 * 1 | First operand low | 4 1927 * 2 | First operand high | 2 1928 * 3 | Unused | 1 1929 * 1930 * For s390x relative branches: ip = ip + off_bytes 1931 * For BPF relative branches: insn = insn + off_insns + 1 1932 * 1933 * For example for s390x with offset 0 we jump to the branch 1934 * instruction itself (loop) and for BPF with offset 0 we 1935 * branch to the instruction behind the branch. 1936 */ 1937 case BPF_JMP32 | BPF_JA: /* if (true) */ 1938 branch_oc_off = imm; 1939 fallthrough; 1940 case BPF_JMP | BPF_JA: /* if (true) */ 1941 mask = 0xf000; /* j */ 1942 goto branch_oc; 1943 case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */ 1944 case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */ 1945 mask = 0x2000; /* jh */ 1946 goto branch_ks; 1947 case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */ 1948 case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */ 1949 mask = 0x4000; /* jl */ 1950 goto branch_ks; 1951 case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */ 1952 case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */ 1953 mask = 0xa000; /* jhe */ 1954 goto branch_ks; 1955 case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */ 1956 case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */ 1957 mask = 0xc000; /* jle */ 1958 goto branch_ks; 1959 case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */ 1960 case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */ 1961 mask = 0x2000; /* jh */ 1962 goto branch_ku; 1963 case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */ 1964 case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */ 1965 mask = 0x4000; /* jl */ 1966 goto branch_ku; 1967 case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */ 1968 case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */ 1969 mask = 0xa000; /* jhe */ 1970 goto branch_ku; 1971 case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */ 1972 case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */ 1973 mask = 0xc000; /* jle */ 1974 goto branch_ku; 1975 case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */ 1976 case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */ 1977 mask = 0x7000; /* jne */ 1978 goto branch_ku; 1979 case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */ 1980 case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */ 1981 mask = 0x8000; /* je */ 1982 goto branch_ku; 1983 case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */ 1984 case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */ 1985 mask = 0x7000; /* jnz */ 1986 if (BPF_CLASS(insn->code) == BPF_JMP32) { 1987 /* llilf %w1,imm (load zero extend imm) */ 1988 EMIT6_IMM(0xc00f0000, REG_W1, imm); 1989 /* nr %w1,%dst */ 1990 EMIT2(0x1400, REG_W1, dst_reg); 1991 } else { 1992 /* lgfi %w1,imm (load sign extend imm) */ 1993 EMIT6_IMM(0xc0010000, REG_W1, imm); 1994 /* ngr %w1,%dst */ 1995 EMIT4(0xb9800000, REG_W1, dst_reg); 1996 } 1997 goto branch_oc; 1998 1999 case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */ 2000 case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */ 2001 mask = 0x2000; /* jh */ 2002 goto branch_xs; 2003 case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */ 2004 case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */ 2005 mask = 0x4000; /* jl */ 2006 goto branch_xs; 2007 case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */ 2008 case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */ 2009 mask = 0xa000; /* jhe */ 2010 goto branch_xs; 2011 case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */ 2012 case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */ 2013 mask = 0xc000; /* jle */ 2014 goto branch_xs; 2015 case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */ 2016 case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */ 2017 mask = 0x2000; /* jh */ 2018 goto branch_xu; 2019 case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */ 2020 case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */ 2021 mask = 0x4000; /* jl */ 2022 goto branch_xu; 2023 case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */ 2024 case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */ 2025 mask = 0xa000; /* jhe */ 2026 goto branch_xu; 2027 case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */ 2028 case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */ 2029 mask = 0xc000; /* jle */ 2030 goto branch_xu; 2031 case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */ 2032 case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */ 2033 mask = 0x7000; /* jne */ 2034 goto branch_xu; 2035 case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */ 2036 case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */ 2037 mask = 0x8000; /* je */ 2038 goto branch_xu; 2039 case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */ 2040 case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */ 2041 { 2042 bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 2043 2044 mask = 0x7000; /* jnz */ 2045 /* nrk or ngrk %w1,%dst,%src */ 2046 EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000), 2047 REG_W1, dst_reg, src_reg); 2048 goto branch_oc; 2049 branch_ks: 2050 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 2051 /* cfi or cgfi %dst,imm */ 2052 EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000, 2053 dst_reg, imm); 2054 if (!is_first_pass(jit) && 2055 can_use_rel(jit, addrs[i + off + 1])) { 2056 /* brc mask,off */ 2057 EMIT4_PCREL_RIC(0xa7040000, 2058 mask >> 12, addrs[i + off + 1]); 2059 } else { 2060 /* brcl mask,off */ 2061 EMIT6_PCREL_RILC(0xc0040000, 2062 mask >> 12, addrs[i + off + 1]); 2063 } 2064 break; 2065 branch_ku: 2066 /* lgfi %w1,imm (load sign extend imm) */ 2067 src_reg = REG_1; 2068 EMIT6_IMM(0xc0010000, src_reg, imm); 2069 goto branch_xu; 2070 branch_xs: 2071 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 2072 if (!is_first_pass(jit) && 2073 can_use_rel(jit, addrs[i + off + 1])) { 2074 /* crj or cgrj %dst,%src,mask,off */ 2075 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), 2076 dst_reg, src_reg, i, off, mask); 2077 } else { 2078 /* cr or cgr %dst,%src */ 2079 if (is_jmp32) 2080 EMIT2(0x1900, dst_reg, src_reg); 2081 else 2082 EMIT4(0xb9200000, dst_reg, src_reg); 2083 /* brcl mask,off */ 2084 EMIT6_PCREL_RILC(0xc0040000, 2085 mask >> 12, addrs[i + off + 1]); 2086 } 2087 break; 2088 branch_xu: 2089 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 2090 if (!is_first_pass(jit) && 2091 can_use_rel(jit, addrs[i + off + 1])) { 2092 /* clrj or clgrj %dst,%src,mask,off */ 2093 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), 2094 dst_reg, src_reg, i, off, mask); 2095 } else { 2096 /* clr or clgr %dst,%src */ 2097 if (is_jmp32) 2098 EMIT2(0x1500, dst_reg, src_reg); 2099 else 2100 EMIT4(0xb9210000, dst_reg, src_reg); 2101 /* brcl mask,off */ 2102 EMIT6_PCREL_RILC(0xc0040000, 2103 mask >> 12, addrs[i + off + 1]); 2104 } 2105 break; 2106 branch_oc: 2107 if (!is_first_pass(jit) && 2108 can_use_rel(jit, addrs[i + branch_oc_off + 1])) { 2109 /* brc mask,off */ 2110 EMIT4_PCREL_RIC(0xa7040000, 2111 mask >> 12, 2112 addrs[i + branch_oc_off + 1]); 2113 } else { 2114 /* brcl mask,off */ 2115 EMIT6_PCREL_RILC(0xc0040000, 2116 mask >> 12, 2117 addrs[i + branch_oc_off + 1]); 2118 } 2119 break; 2120 } 2121 default: /* too complex, give up */ 2122 pr_err("Unknown opcode %02x\n", insn->code); 2123 return -1; 2124 } 2125 2126 return insn_count; 2127 } 2128 2129 /* 2130 * Return whether new i-th instruction address does not violate any invariant 2131 */ 2132 static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i) 2133 { 2134 /* On the first pass anything goes */ 2135 if (is_first_pass(jit)) 2136 return true; 2137 2138 /* The codegen pass must not change anything */ 2139 if (is_codegen_pass(jit)) 2140 return jit->addrs[i] == jit->prg; 2141 2142 /* Passes in between must not increase code size */ 2143 return jit->addrs[i] >= jit->prg; 2144 } 2145 2146 /* 2147 * Update the address of i-th instruction 2148 */ 2149 static int bpf_set_addr(struct bpf_jit *jit, int i) 2150 { 2151 int delta; 2152 2153 if (is_codegen_pass(jit)) { 2154 delta = jit->prg - jit->addrs[i]; 2155 if (delta < 0) 2156 bpf_skip(jit, -delta); 2157 } 2158 if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i))) 2159 return -1; 2160 jit->addrs[i] = jit->prg; 2161 return 0; 2162 } 2163 2164 /* 2165 * Compile eBPF program into s390x code 2166 */ 2167 static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, 2168 bool extra_pass, u32 stack_depth) 2169 { 2170 int i, insn_count, lit32_size, lit64_size; 2171 u64 kern_arena; 2172 2173 jit->lit32 = jit->lit32_start; 2174 jit->lit64 = jit->lit64_start; 2175 jit->prg = 0; 2176 jit->excnt = 0; 2177 2178 kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena); 2179 if (kern_arena) 2180 jit->kern_arena = _EMIT_CONST_U64(kern_arena); 2181 jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena); 2182 2183 bpf_jit_prologue(jit, fp, stack_depth); 2184 if (bpf_set_addr(jit, 0) < 0) 2185 return -1; 2186 for (i = 0; i < fp->len; i += insn_count) { 2187 insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth); 2188 if (insn_count < 0) 2189 return -1; 2190 /* Next instruction address */ 2191 if (bpf_set_addr(jit, i + insn_count) < 0) 2192 return -1; 2193 } 2194 bpf_jit_epilogue(jit, stack_depth); 2195 2196 lit32_size = jit->lit32 - jit->lit32_start; 2197 lit64_size = jit->lit64 - jit->lit64_start; 2198 jit->lit32_start = jit->prg; 2199 if (lit32_size) 2200 jit->lit32_start = ALIGN(jit->lit32_start, 4); 2201 jit->lit64_start = jit->lit32_start + lit32_size; 2202 if (lit64_size) 2203 jit->lit64_start = ALIGN(jit->lit64_start, 8); 2204 jit->size = jit->lit64_start + lit64_size; 2205 jit->size_prg = jit->prg; 2206 2207 if (WARN_ON_ONCE(fp->aux->extable && 2208 jit->excnt != fp->aux->num_exentries)) 2209 /* Verifier bug - too many entries. */ 2210 return -1; 2211 2212 return 0; 2213 } 2214 2215 bool bpf_jit_needs_zext(void) 2216 { 2217 return true; 2218 } 2219 2220 struct s390_jit_data { 2221 struct bpf_binary_header *header; 2222 struct bpf_jit ctx; 2223 int pass; 2224 }; 2225 2226 static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit, 2227 struct bpf_prog *fp) 2228 { 2229 struct bpf_binary_header *header; 2230 struct bpf_insn *insn; 2231 u32 extable_size; 2232 u32 code_size; 2233 int i; 2234 2235 for (i = 0; i < fp->len; i++) { 2236 insn = &fp->insnsi[i]; 2237 2238 if (BPF_CLASS(insn->code) == BPF_STX && 2239 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC && 2240 (BPF_SIZE(insn->code) == BPF_DW || 2241 BPF_SIZE(insn->code) == BPF_W) && 2242 insn->imm == BPF_XCHG) 2243 /* 2244 * bpf_jit_insn() emits a load and a compare-and-swap, 2245 * both of which need to be probed. 2246 */ 2247 fp->aux->num_exentries += 1; 2248 } 2249 /* We need two entries per insn. */ 2250 fp->aux->num_exentries *= 2; 2251 2252 code_size = roundup(jit->size, 2253 __alignof__(struct exception_table_entry)); 2254 extable_size = fp->aux->num_exentries * 2255 sizeof(struct exception_table_entry); 2256 header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf, 2257 8, jit_fill_hole); 2258 if (!header) 2259 return NULL; 2260 fp->aux->extable = (struct exception_table_entry *) 2261 (jit->prg_buf + code_size); 2262 return header; 2263 } 2264 2265 /* 2266 * Compile eBPF program "fp" 2267 */ 2268 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) 2269 { 2270 u32 stack_depth = round_up(fp->aux->stack_depth, 8); 2271 struct bpf_prog *tmp, *orig_fp = fp; 2272 struct bpf_binary_header *header; 2273 struct s390_jit_data *jit_data; 2274 bool tmp_blinded = false; 2275 bool extra_pass = false; 2276 struct bpf_jit jit; 2277 int pass; 2278 2279 if (!fp->jit_requested) 2280 return orig_fp; 2281 2282 tmp = bpf_jit_blind_constants(fp); 2283 /* 2284 * If blinding was requested and we failed during blinding, 2285 * we must fall back to the interpreter. 2286 */ 2287 if (IS_ERR(tmp)) 2288 return orig_fp; 2289 if (tmp != fp) { 2290 tmp_blinded = true; 2291 fp = tmp; 2292 } 2293 2294 jit_data = fp->aux->jit_data; 2295 if (!jit_data) { 2296 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2297 if (!jit_data) { 2298 fp = orig_fp; 2299 goto out; 2300 } 2301 fp->aux->jit_data = jit_data; 2302 } 2303 if (jit_data->ctx.addrs) { 2304 jit = jit_data->ctx; 2305 header = jit_data->header; 2306 extra_pass = true; 2307 pass = jit_data->pass + 1; 2308 goto skip_init_ctx; 2309 } 2310 2311 memset(&jit, 0, sizeof(jit)); 2312 jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); 2313 if (jit.addrs == NULL) { 2314 fp = orig_fp; 2315 goto free_addrs; 2316 } 2317 /* 2318 * Three initial passes: 2319 * - 1/2: Determine clobbered registers 2320 * - 3: Calculate program size and addrs array 2321 */ 2322 for (pass = 1; pass <= 3; pass++) { 2323 if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) { 2324 fp = orig_fp; 2325 goto free_addrs; 2326 } 2327 } 2328 /* 2329 * Final pass: Allocate and generate program 2330 */ 2331 header = bpf_jit_alloc(&jit, fp); 2332 if (!header) { 2333 fp = orig_fp; 2334 goto free_addrs; 2335 } 2336 skip_init_ctx: 2337 if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) { 2338 bpf_jit_binary_free(header); 2339 fp = orig_fp; 2340 goto free_addrs; 2341 } 2342 if (bpf_jit_enable > 1) { 2343 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf); 2344 print_fn_code(jit.prg_buf, jit.size_prg); 2345 } 2346 if (!fp->is_func || extra_pass) { 2347 if (bpf_jit_binary_lock_ro(header)) { 2348 bpf_jit_binary_free(header); 2349 fp = orig_fp; 2350 goto free_addrs; 2351 } 2352 } else { 2353 jit_data->header = header; 2354 jit_data->ctx = jit; 2355 jit_data->pass = pass; 2356 } 2357 fp->bpf_func = (void *) jit.prg_buf; 2358 fp->jited = 1; 2359 fp->jited_len = jit.size; 2360 2361 if (!fp->is_func || extra_pass) { 2362 bpf_prog_fill_jited_linfo(fp, jit.addrs + 1); 2363 free_addrs: 2364 kvfree(jit.addrs); 2365 kfree(jit_data); 2366 fp->aux->jit_data = NULL; 2367 } 2368 out: 2369 if (tmp_blinded) 2370 bpf_jit_prog_release_other(fp, fp == orig_fp ? 2371 tmp : orig_fp); 2372 return fp; 2373 } 2374 2375 bool bpf_jit_supports_kfunc_call(void) 2376 { 2377 return true; 2378 } 2379 2380 bool bpf_jit_supports_far_kfunc_call(void) 2381 { 2382 return true; 2383 } 2384 2385 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2386 void *old_addr, void *new_addr) 2387 { 2388 struct bpf_plt expected_plt, current_plt, new_plt, *plt; 2389 struct { 2390 u16 opc; 2391 s32 disp; 2392 } __packed insn; 2393 char *ret; 2394 int err; 2395 2396 /* Verify the branch to be patched. */ 2397 err = copy_from_kernel_nofault(&insn, ip, sizeof(insn)); 2398 if (err < 0) 2399 return err; 2400 if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0))) 2401 return -EINVAL; 2402 2403 if (t == BPF_MOD_JUMP && 2404 insn.disp == ((char *)new_addr - (char *)ip) >> 1) { 2405 /* 2406 * The branch already points to the destination, 2407 * there is no PLT. 2408 */ 2409 } else { 2410 /* Verify the PLT. */ 2411 plt = ip + (insn.disp << 1); 2412 err = copy_from_kernel_nofault(¤t_plt, plt, 2413 sizeof(current_plt)); 2414 if (err < 0) 2415 return err; 2416 ret = (char *)ip + 6; 2417 bpf_jit_plt(&expected_plt, ret, old_addr); 2418 if (memcmp(¤t_plt, &expected_plt, sizeof(current_plt))) 2419 return -EINVAL; 2420 /* Adjust the call address. */ 2421 bpf_jit_plt(&new_plt, ret, new_addr); 2422 s390_kernel_write(&plt->target, &new_plt.target, 2423 sizeof(void *)); 2424 } 2425 2426 /* Adjust the mask of the branch. */ 2427 insn.opc = 0xc004 | (new_addr ? 0xf0 : 0); 2428 s390_kernel_write((char *)ip + 1, (char *)&insn.opc + 1, 1); 2429 2430 /* Make the new code visible to the other CPUs. */ 2431 text_poke_sync_lock(); 2432 2433 return 0; 2434 } 2435 2436 struct bpf_tramp_jit { 2437 struct bpf_jit common; 2438 int orig_stack_args_off;/* Offset of arguments placed on stack by the 2439 * func_addr's original caller 2440 */ 2441 int stack_size; /* Trampoline stack size */ 2442 int backchain_off; /* Offset of backchain */ 2443 int stack_args_off; /* Offset of stack arguments for calling 2444 * func_addr, has to be at the top 2445 */ 2446 int reg_args_off; /* Offset of register arguments for calling 2447 * func_addr 2448 */ 2449 int ip_off; /* For bpf_get_func_ip(), has to be at 2450 * (ctx - 16) 2451 */ 2452 int arg_cnt_off; /* For bpf_get_func_arg_cnt(), has to be at 2453 * (ctx - 8) 2454 */ 2455 int bpf_args_off; /* Offset of BPF_PROG context, which consists 2456 * of BPF arguments followed by return value 2457 */ 2458 int retval_off; /* Offset of return value (see above) */ 2459 int r7_r8_off; /* Offset of saved %r7 and %r8, which are used 2460 * for __bpf_prog_enter() return value and 2461 * func_addr respectively 2462 */ 2463 int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */ 2464 int tccnt_off; /* Offset of saved tailcall counter */ 2465 int r14_off; /* Offset of saved %r14, has to be at the 2466 * bottom */ 2467 int do_fexit; /* do_fexit: label */ 2468 }; 2469 2470 static void load_imm64(struct bpf_jit *jit, int dst_reg, u64 val) 2471 { 2472 /* llihf %dst_reg,val_hi */ 2473 EMIT6_IMM(0xc00e0000, dst_reg, (val >> 32)); 2474 /* oilf %rdst_reg,val_lo */ 2475 EMIT6_IMM(0xc00d0000, dst_reg, val); 2476 } 2477 2478 static int invoke_bpf_prog(struct bpf_tramp_jit *tjit, 2479 const struct btf_func_model *m, 2480 struct bpf_tramp_link *tlink, bool save_ret) 2481 { 2482 struct bpf_jit *jit = &tjit->common; 2483 int cookie_off = tjit->run_ctx_off + 2484 offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 2485 struct bpf_prog *p = tlink->link.prog; 2486 int patch; 2487 2488 /* 2489 * run_ctx.cookie = tlink->cookie; 2490 */ 2491 2492 /* %r0 = tlink->cookie */ 2493 load_imm64(jit, REG_W0, tlink->cookie); 2494 /* stg %r0,cookie_off(%r15) */ 2495 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, REG_0, REG_15, cookie_off); 2496 2497 /* 2498 * if ((start = __bpf_prog_enter(p, &run_ctx)) == 0) 2499 * goto skip; 2500 */ 2501 2502 /* %r1 = __bpf_prog_enter */ 2503 load_imm64(jit, REG_1, (u64)bpf_trampoline_enter(p)); 2504 /* %r2 = p */ 2505 load_imm64(jit, REG_2, (u64)p); 2506 /* la %r3,run_ctx_off(%r15) */ 2507 EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off); 2508 /* %r1() */ 2509 call_r1(jit); 2510 /* ltgr %r7,%r2 */ 2511 EMIT4(0xb9020000, REG_7, REG_2); 2512 /* brcl 8,skip */ 2513 patch = jit->prg; 2514 EMIT6_PCREL_RILC(0xc0040000, 8, 0); 2515 2516 /* 2517 * retval = bpf_func(args, p->insnsi); 2518 */ 2519 2520 /* %r1 = p->bpf_func */ 2521 load_imm64(jit, REG_1, (u64)p->bpf_func); 2522 /* la %r2,bpf_args_off(%r15) */ 2523 EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off); 2524 /* %r3 = p->insnsi */ 2525 if (!p->jited) 2526 load_imm64(jit, REG_3, (u64)p->insnsi); 2527 /* %r1() */ 2528 call_r1(jit); 2529 /* stg %r2,retval_off(%r15) */ 2530 if (save_ret) { 2531 if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags)) 2532 return -1; 2533 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15, 2534 tjit->retval_off); 2535 } 2536 2537 /* skip: */ 2538 if (jit->prg_buf) 2539 *(u32 *)&jit->prg_buf[patch + 2] = (jit->prg - patch) >> 1; 2540 2541 /* 2542 * __bpf_prog_exit(p, start, &run_ctx); 2543 */ 2544 2545 /* %r1 = __bpf_prog_exit */ 2546 load_imm64(jit, REG_1, (u64)bpf_trampoline_exit(p)); 2547 /* %r2 = p */ 2548 load_imm64(jit, REG_2, (u64)p); 2549 /* lgr %r3,%r7 */ 2550 EMIT4(0xb9040000, REG_3, REG_7); 2551 /* la %r4,run_ctx_off(%r15) */ 2552 EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off); 2553 /* %r1() */ 2554 call_r1(jit); 2555 2556 return 0; 2557 } 2558 2559 static int alloc_stack(struct bpf_tramp_jit *tjit, size_t size) 2560 { 2561 int stack_offset = tjit->stack_size; 2562 2563 tjit->stack_size += size; 2564 return stack_offset; 2565 } 2566 2567 /* ABI uses %r2 - %r6 for parameter passing. */ 2568 #define MAX_NR_REG_ARGS 5 2569 2570 /* The "L" field of the "mvc" instruction is 8 bits. */ 2571 #define MAX_MVC_SIZE 256 2572 #define MAX_NR_STACK_ARGS (MAX_MVC_SIZE / sizeof(u64)) 2573 2574 /* -mfentry generates a 6-byte nop on s390x. */ 2575 #define S390X_PATCH_SIZE 6 2576 2577 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, 2578 struct bpf_tramp_jit *tjit, 2579 const struct btf_func_model *m, 2580 u32 flags, 2581 struct bpf_tramp_links *tlinks, 2582 void *func_addr) 2583 { 2584 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2585 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2586 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2587 int nr_bpf_args, nr_reg_args, nr_stack_args; 2588 struct bpf_jit *jit = &tjit->common; 2589 int arg, bpf_arg_off; 2590 int i, j; 2591 2592 /* Support as many stack arguments as "mvc" instruction can handle. */ 2593 nr_reg_args = min_t(int, m->nr_args, MAX_NR_REG_ARGS); 2594 nr_stack_args = m->nr_args - nr_reg_args; 2595 if (nr_stack_args > MAX_NR_STACK_ARGS) 2596 return -ENOTSUPP; 2597 2598 /* Return to %r14 in the struct_ops case. */ 2599 if (flags & BPF_TRAMP_F_INDIRECT) 2600 flags |= BPF_TRAMP_F_SKIP_FRAME; 2601 2602 /* 2603 * Compute how many arguments we need to pass to BPF programs. 2604 * BPF ABI mirrors that of x86_64: arguments that are 16 bytes or 2605 * smaller are packed into 1 or 2 registers; larger arguments are 2606 * passed via pointers. 2607 * In s390x ABI, arguments that are 8 bytes or smaller are packed into 2608 * a register; larger arguments are passed via pointers. 2609 * We need to deal with this difference. 2610 */ 2611 nr_bpf_args = 0; 2612 for (i = 0; i < m->nr_args; i++) { 2613 if (m->arg_size[i] <= 8) 2614 nr_bpf_args += 1; 2615 else if (m->arg_size[i] <= 16) 2616 nr_bpf_args += 2; 2617 else 2618 return -ENOTSUPP; 2619 } 2620 2621 /* 2622 * Calculate the stack layout. 2623 */ 2624 2625 /* 2626 * Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x 2627 * ABI requires, put our backchain at the end of the allocated memory. 2628 */ 2629 tjit->stack_size = STACK_FRAME_OVERHEAD; 2630 tjit->backchain_off = tjit->stack_size - sizeof(u64); 2631 tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64)); 2632 tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64)); 2633 tjit->ip_off = alloc_stack(tjit, sizeof(u64)); 2634 tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64)); 2635 tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64)); 2636 tjit->retval_off = alloc_stack(tjit, sizeof(u64)); 2637 tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64)); 2638 tjit->run_ctx_off = alloc_stack(tjit, 2639 sizeof(struct bpf_tramp_run_ctx)); 2640 tjit->tccnt_off = alloc_stack(tjit, sizeof(u64)); 2641 tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2); 2642 /* 2643 * In accordance with the s390x ABI, the caller has allocated 2644 * STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's 2645 * backchain, and the rest we can use. 2646 */ 2647 tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64); 2648 tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD; 2649 2650 /* lgr %r1,%r15 */ 2651 EMIT4(0xb9040000, REG_1, REG_15); 2652 /* aghi %r15,-stack_size */ 2653 EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size); 2654 /* stg %r1,backchain_off(%r15) */ 2655 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15, 2656 tjit->backchain_off); 2657 /* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */ 2658 _EMIT6(0xd203f000 | tjit->tccnt_off, 2659 0xf000 | (tjit->stack_size + STK_OFF_TCCNT)); 2660 /* stmg %r2,%rN,fwd_reg_args_off(%r15) */ 2661 if (nr_reg_args) 2662 EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2, 2663 REG_2 + (nr_reg_args - 1), REG_15, 2664 tjit->reg_args_off); 2665 for (i = 0, j = 0; i < m->nr_args; i++) { 2666 if (i < MAX_NR_REG_ARGS) 2667 arg = REG_2 + i; 2668 else 2669 arg = tjit->orig_stack_args_off + 2670 (i - MAX_NR_REG_ARGS) * sizeof(u64); 2671 bpf_arg_off = tjit->bpf_args_off + j * sizeof(u64); 2672 if (m->arg_size[i] <= 8) { 2673 if (i < MAX_NR_REG_ARGS) 2674 /* stg %arg,bpf_arg_off(%r15) */ 2675 EMIT6_DISP_LH(0xe3000000, 0x0024, arg, 2676 REG_0, REG_15, bpf_arg_off); 2677 else 2678 /* mvc bpf_arg_off(8,%r15),arg(%r15) */ 2679 _EMIT6(0xd207f000 | bpf_arg_off, 2680 0xf000 | arg); 2681 j += 1; 2682 } else { 2683 if (i < MAX_NR_REG_ARGS) { 2684 /* mvc bpf_arg_off(16,%r15),0(%arg) */ 2685 _EMIT6(0xd20ff000 | bpf_arg_off, 2686 reg2hex[arg] << 12); 2687 } else { 2688 /* lg %r1,arg(%r15) */ 2689 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_0, 2690 REG_15, arg); 2691 /* mvc bpf_arg_off(16,%r15),0(%r1) */ 2692 _EMIT6(0xd20ff000 | bpf_arg_off, 0x1000); 2693 } 2694 j += 2; 2695 } 2696 } 2697 /* stmg %r7,%r8,r7_r8_off(%r15) */ 2698 EMIT6_DISP_LH(0xeb000000, 0x0024, REG_7, REG_8, REG_15, 2699 tjit->r7_r8_off); 2700 /* stg %r14,r14_off(%r15) */ 2701 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_14, REG_0, REG_15, tjit->r14_off); 2702 2703 if (flags & BPF_TRAMP_F_ORIG_STACK) { 2704 /* 2705 * The ftrace trampoline puts the return address (which is the 2706 * address of the original function + S390X_PATCH_SIZE) into 2707 * %r0; see ftrace_shared_hotpatch_trampoline_br and 2708 * ftrace_init_nop() for details. 2709 */ 2710 2711 /* lgr %r8,%r0 */ 2712 EMIT4(0xb9040000, REG_8, REG_0); 2713 } else { 2714 /* %r8 = func_addr + S390X_PATCH_SIZE */ 2715 load_imm64(jit, REG_8, (u64)func_addr + S390X_PATCH_SIZE); 2716 } 2717 2718 /* 2719 * ip = func_addr; 2720 * arg_cnt = m->nr_args; 2721 */ 2722 2723 if (flags & BPF_TRAMP_F_IP_ARG) { 2724 /* %r0 = func_addr */ 2725 load_imm64(jit, REG_0, (u64)func_addr); 2726 /* stg %r0,ip_off(%r15) */ 2727 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15, 2728 tjit->ip_off); 2729 } 2730 /* lghi %r0,nr_bpf_args */ 2731 EMIT4_IMM(0xa7090000, REG_0, nr_bpf_args); 2732 /* stg %r0,arg_cnt_off(%r15) */ 2733 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15, 2734 tjit->arg_cnt_off); 2735 2736 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2737 /* 2738 * __bpf_tramp_enter(im); 2739 */ 2740 2741 /* %r1 = __bpf_tramp_enter */ 2742 load_imm64(jit, REG_1, (u64)__bpf_tramp_enter); 2743 /* %r2 = im */ 2744 load_imm64(jit, REG_2, (u64)im); 2745 /* %r1() */ 2746 call_r1(jit); 2747 } 2748 2749 for (i = 0; i < fentry->nr_links; i++) 2750 if (invoke_bpf_prog(tjit, m, fentry->links[i], 2751 flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2752 return -EINVAL; 2753 2754 if (fmod_ret->nr_links) { 2755 /* 2756 * retval = 0; 2757 */ 2758 2759 /* xc retval_off(8,%r15),retval_off(%r15) */ 2760 _EMIT6(0xd707f000 | tjit->retval_off, 2761 0xf000 | tjit->retval_off); 2762 2763 for (i = 0; i < fmod_ret->nr_links; i++) { 2764 if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true)) 2765 return -EINVAL; 2766 2767 /* 2768 * if (retval) 2769 * goto do_fexit; 2770 */ 2771 2772 /* ltg %r0,retval_off(%r15) */ 2773 EMIT6_DISP_LH(0xe3000000, 0x0002, REG_0, REG_0, REG_15, 2774 tjit->retval_off); 2775 /* brcl 7,do_fexit */ 2776 EMIT6_PCREL_RILC(0xc0040000, 7, tjit->do_fexit); 2777 } 2778 } 2779 2780 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2781 /* 2782 * retval = func_addr(args); 2783 */ 2784 2785 /* lmg %r2,%rN,reg_args_off(%r15) */ 2786 if (nr_reg_args) 2787 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2, 2788 REG_2 + (nr_reg_args - 1), REG_15, 2789 tjit->reg_args_off); 2790 /* mvc stack_args_off(N,%r15),orig_stack_args_off(%r15) */ 2791 if (nr_stack_args) 2792 _EMIT6(0xd200f000 | 2793 (nr_stack_args * sizeof(u64) - 1) << 16 | 2794 tjit->stack_args_off, 2795 0xf000 | tjit->orig_stack_args_off); 2796 /* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */ 2797 _EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off); 2798 /* lgr %r1,%r8 */ 2799 EMIT4(0xb9040000, REG_1, REG_8); 2800 /* %r1() */ 2801 call_r1(jit); 2802 /* stg %r2,retval_off(%r15) */ 2803 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15, 2804 tjit->retval_off); 2805 2806 im->ip_after_call = jit->prg_buf + jit->prg; 2807 2808 /* 2809 * The following nop will be patched by bpf_tramp_image_put(). 2810 */ 2811 2812 /* brcl 0,im->ip_epilogue */ 2813 EMIT6_PCREL_RILC(0xc0040000, 0, (u64)im->ip_epilogue); 2814 } 2815 2816 /* do_fexit: */ 2817 tjit->do_fexit = jit->prg; 2818 for (i = 0; i < fexit->nr_links; i++) 2819 if (invoke_bpf_prog(tjit, m, fexit->links[i], false)) 2820 return -EINVAL; 2821 2822 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2823 im->ip_epilogue = jit->prg_buf + jit->prg; 2824 2825 /* 2826 * __bpf_tramp_exit(im); 2827 */ 2828 2829 /* %r1 = __bpf_tramp_exit */ 2830 load_imm64(jit, REG_1, (u64)__bpf_tramp_exit); 2831 /* %r2 = im */ 2832 load_imm64(jit, REG_2, (u64)im); 2833 /* %r1() */ 2834 call_r1(jit); 2835 } 2836 2837 /* lmg %r2,%rN,reg_args_off(%r15) */ 2838 if ((flags & BPF_TRAMP_F_RESTORE_REGS) && nr_reg_args) 2839 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2, 2840 REG_2 + (nr_reg_args - 1), REG_15, 2841 tjit->reg_args_off); 2842 /* lgr %r1,%r8 */ 2843 if (!(flags & BPF_TRAMP_F_SKIP_FRAME)) 2844 EMIT4(0xb9040000, REG_1, REG_8); 2845 /* lmg %r7,%r8,r7_r8_off(%r15) */ 2846 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_7, REG_8, REG_15, 2847 tjit->r7_r8_off); 2848 /* lg %r14,r14_off(%r15) */ 2849 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_14, REG_0, REG_15, tjit->r14_off); 2850 /* lg %r2,retval_off(%r15) */ 2851 if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET)) 2852 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15, 2853 tjit->retval_off); 2854 /* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */ 2855 _EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT), 2856 0xf000 | tjit->tccnt_off); 2857 /* aghi %r15,stack_size */ 2858 EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size); 2859 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2860 EMIT_JUMP_REG(14); 2861 else 2862 EMIT_JUMP_REG(1); 2863 2864 return 0; 2865 } 2866 2867 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, 2868 struct bpf_tramp_links *tlinks, void *orig_call) 2869 { 2870 struct bpf_tramp_image im; 2871 struct bpf_tramp_jit tjit; 2872 int ret; 2873 2874 memset(&tjit, 0, sizeof(tjit)); 2875 2876 ret = __arch_prepare_bpf_trampoline(&im, &tjit, m, flags, 2877 tlinks, orig_call); 2878 2879 return ret < 0 ? ret : tjit.common.prg; 2880 } 2881 2882 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, 2883 void *image_end, const struct btf_func_model *m, 2884 u32 flags, struct bpf_tramp_links *tlinks, 2885 void *func_addr) 2886 { 2887 struct bpf_tramp_jit tjit; 2888 int ret; 2889 2890 /* Compute offsets, check whether the code fits. */ 2891 memset(&tjit, 0, sizeof(tjit)); 2892 ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags, 2893 tlinks, func_addr); 2894 2895 if (ret < 0) 2896 return ret; 2897 if (tjit.common.prg > (char *)image_end - (char *)image) 2898 /* 2899 * Use the same error code as for exceeding 2900 * BPF_MAX_TRAMP_LINKS. 2901 */ 2902 return -E2BIG; 2903 2904 tjit.common.prg = 0; 2905 tjit.common.prg_buf = image; 2906 ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags, 2907 tlinks, func_addr); 2908 2909 return ret < 0 ? ret : tjit.common.prg; 2910 } 2911 2912 bool bpf_jit_supports_subprog_tailcalls(void) 2913 { 2914 return true; 2915 } 2916 2917 bool bpf_jit_supports_arena(void) 2918 { 2919 return true; 2920 } 2921 2922 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) 2923 { 2924 if (!in_arena) 2925 return true; 2926 switch (insn->code) { 2927 case BPF_STX | BPF_ATOMIC | BPF_B: 2928 case BPF_STX | BPF_ATOMIC | BPF_H: 2929 case BPF_STX | BPF_ATOMIC | BPF_W: 2930 case BPF_STX | BPF_ATOMIC | BPF_DW: 2931 if (bpf_atomic_is_load_store(insn)) 2932 return false; 2933 } 2934 return true; 2935 } 2936 2937 bool bpf_jit_supports_exceptions(void) 2938 { 2939 /* 2940 * Exceptions require unwinding support, which is always available, 2941 * because the kernel is always built with backchain. 2942 */ 2943 return true; 2944 } 2945 2946 void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64), 2947 void *cookie) 2948 { 2949 unsigned long addr, prev_addr = 0; 2950 struct unwind_state state; 2951 2952 unwind_for_each_frame(&state, NULL, NULL, 0) { 2953 addr = unwind_get_return_address(&state); 2954 if (!addr) 2955 break; 2956 /* 2957 * addr is a return address and state.sp is the value of %r15 2958 * at this address. exception_cb needs %r15 at entry to the 2959 * function containing addr, so take the next state.sp. 2960 * 2961 * There is no bp, and the exception_cb prog does not need one 2962 * to perform a quasi-longjmp. The common code requires a 2963 * non-zero bp, so pass sp there as well. 2964 */ 2965 if (prev_addr && !consume_fn(cookie, prev_addr, state.sp, 2966 state.sp)) 2967 break; 2968 prev_addr = addr; 2969 } 2970 } 2971