Lines Matching +full:im +full:-
1 // SPDX-License-Identifier: GPL-2.0
7 * - HAVE_MARCH_Z196_FEATURES: laal, laalg
8 * - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
9 * - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
10 * - 64BIT
31 #include <asm/nospec-branch.h>
33 #include <asm/text-patching.h>
45 int lit32_start; /* Start of 32-bit literal pool */
46 int lit32; /* Current position in 32-bit literal pool */
47 int lit64_start; /* Start of 64-bit literal pool */
48 int lit64; /* Current position in 64-bit literal pool */
66 #define NVREGS 0xffc0 /* %r6-%r15 */
127 jit->seen_regs |= (1 << r1); in reg_set_seen()
141 if (jit->prg_buf) \
142 *(u16 *) (jit->prg_buf + jit->prg) = (op); \
143 jit->prg += 2; \
155 if (jit->prg_buf) \
156 *(u32 *) (jit->prg_buf + jit->prg) = (op); \
157 jit->prg += 4; \
204 int __rel = ((target) - jit->prg) / 2; \
210 if (jit->prg_buf) { \
211 *(u32 *) (jit->prg_buf + jit->prg) = (op1); \
212 *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \
214 jit->prg += 6; \
242 unsigned int rel = (int)((target) - jit->prg) / 2; \
251 unsigned int rel = (int)((target) - jit->prg) / 2; \
260 int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
268 unsigned int rel = (int)((target) - jit->prg) / 2; \
275 unsigned int rel = (int)((target) - jit->prg) / 2; \
299 ret = jit->lit32; \
300 if (jit->prg_buf) \
301 *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
302 jit->lit32 += 4; \
308 jit->seen |= SEEN_LITERAL; \
309 _EMIT_CONST_U32(val) - jit->base_ip; \
315 ret = jit->lit64; \
316 if (jit->prg_buf) \
317 *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
318 jit->lit64 += 8; \
324 jit->seen |= SEEN_LITERAL; \
325 _EMIT_CONST_U64(val) - jit->base_ip; \
330 if (!fp->aux->verifier_zext) { \
343 return jit->size == 0; in is_first_pass()
352 return jit->prg_buf; in is_codegen_pass()
356 * Return whether "rel" can be encoded as a short PC-relative offset
360 return rel >= -65536 && rel <= 65534; in is_valid_rel()
364 * Return whether "off" can be reached using a short PC-relative offset
368 return is_valid_rel(off - jit->prg); in can_use_rel()
373 * Long-Displacement Facility
377 return disp >= -524288 && disp <= 524287; in is_valid_ldisp()
381 * Return whether the next 32-bit literal pool entry can be referenced using
382 * Long-Displacement Facility
386 return is_valid_ldisp(jit->lit32 - jit->base_ip); in can_use_ldisp_for_lit32()
390 * Return whether the next 64-bit literal pool entry can be referenced using
391 * Long-Displacement Facility
395 return is_valid_ldisp(jit->lit64 - jit->base_ip); in can_use_ldisp_for_lit64()
411 u32 off = STK_OFF_R6 + (rs - 6) * 8; in save_regs()
426 u32 off = STK_OFF_R6 + (rs - 6) * 8; in restore_regs()
428 if (jit->seen & SEEN_STACK) in restore_regs()
462 return i - 1; in get_end()
470 * Save and restore clobbered registers (6-15) on stack.
476 u16 seen_regs = jit->seen_regs | extra_regs; in save_restore_regs()
485 jit->prg += (last - re + 1) * save_restore_size; in save_restore_regs()
507 size -= 6; in bpf_skip()
511 size -= 4; in bpf_skip()
516 size -= 2; in bpf_skip()
546 plt->ret = ret; in bpf_jit_plt()
547 plt->target = target; in bpf_jit_plt()
559 /* No-op for hotpatching */ in bpf_jit_prologue()
561 EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt); in bpf_jit_prologue()
562 jit->prologue_plt_ret = jit->prg; in bpf_jit_prologue()
577 jit->tail_call_start = jit->prg; in bpf_jit_prologue()
578 if (fp->aux->exception_cb) { in bpf_jit_prologue()
582 * Arrange the restoration of %r6-%r15 in the epilogue. in bpf_jit_prologue()
587 jit->seen_regs |= NVREGS; in bpf_jit_prologue()
591 fp->aux->exception_boundary ? NVREGS : 0); in bpf_jit_prologue()
594 if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) { in bpf_jit_prologue()
596 is_valid_ldisp(jit->size - (jit->prg + 2))) { in bpf_jit_prologue()
599 jit->base_ip = jit->prg; in bpf_jit_prologue()
602 EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start); in bpf_jit_prologue()
603 jit->base_ip = jit->lit32_start; in bpf_jit_prologue()
607 if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) { in bpf_jit_prologue()
608 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) in bpf_jit_prologue()
613 /* aghi %r15,-STK_OFF */ in bpf_jit_prologue()
614 EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth)); in bpf_jit_prologue()
615 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) in bpf_jit_prologue()
628 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); in emit_expoline()
639 jit->r1_thunk_ip = jit->prg; in emit_r1_thunk()
653 EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip); in call_r1()
664 jit->exit_ip = jit->prg; in bpf_jit_epilogue()
670 jit->r14_thunk_ip = jit->prg; in bpf_jit_epilogue()
677 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) in bpf_jit_epilogue()
680 jit->prg = ALIGN(jit->prg, 8); in bpf_jit_epilogue()
681 jit->prologue_plt = jit->prg; in bpf_jit_epilogue()
682 if (jit->prg_buf) in bpf_jit_epilogue()
683 bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg), in bpf_jit_epilogue()
684 jit->prg_buf + jit->prologue_plt_ret, NULL); in bpf_jit_epilogue()
685 jit->prg += sizeof(struct bpf_plt); in bpf_jit_epilogue()
690 regs->psw.addr = extable_fixup(x); in ex_handler_bpf()
691 if (x->data != -1) in ex_handler_bpf()
692 regs->gprs[x->data] = 0; in ex_handler_bpf()
708 probe->prg = -1; in bpf_jit_probe_init()
709 probe->nop_prg = -1; in bpf_jit_probe_init()
710 probe->reg = -1; in bpf_jit_probe_init()
711 probe->arena_reg = REG_0; in bpf_jit_probe_init()
723 if (probe->prg == -1 || probe->nop_prg != -1) in bpf_jit_probe_emit_nop()
727 probe->nop_prg = jit->prg; in bpf_jit_probe_emit_nop()
735 if (BPF_MODE(insn->code) != BPF_PROBE_MEM && in bpf_jit_probe_load_pre()
736 BPF_MODE(insn->code) != BPF_PROBE_MEMSX && in bpf_jit_probe_load_pre()
737 BPF_MODE(insn->code) != BPF_PROBE_MEM32) in bpf_jit_probe_load_pre()
740 if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { in bpf_jit_probe_load_pre()
742 EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); in bpf_jit_probe_load_pre()
743 probe->arena_reg = REG_W1; in bpf_jit_probe_load_pre()
745 probe->prg = jit->prg; in bpf_jit_probe_load_pre()
746 probe->reg = reg2hex[insn->dst_reg]; in bpf_jit_probe_load_pre()
752 if (BPF_MODE(insn->code) != BPF_PROBE_MEM32) in bpf_jit_probe_store_pre()
756 EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); in bpf_jit_probe_store_pre()
757 probe->arena_reg = REG_W1; in bpf_jit_probe_store_pre()
758 probe->prg = jit->prg; in bpf_jit_probe_store_pre()
765 if (BPF_MODE(insn->code) != BPF_PROBE_ATOMIC) in bpf_jit_probe_atomic_pre()
769 EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); in bpf_jit_probe_atomic_pre()
771 EMIT4(0xb9080000, REG_W1, insn->dst_reg); in bpf_jit_probe_atomic_pre()
772 probe->arena_reg = REG_W1; in bpf_jit_probe_atomic_pre()
773 probe->prg = jit->prg; in bpf_jit_probe_atomic_pre()
784 if (probe->prg == -1) in bpf_jit_probe_post()
788 if (!fp->aux->extable) in bpf_jit_probe_post()
791 insn = jit->prg_buf + probe->prg; in bpf_jit_probe_post()
792 if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg)) in bpf_jit_probe_post()
793 /* JIT bug - gap between probe and nop instructions. */ in bpf_jit_probe_post()
794 return -1; in bpf_jit_probe_post()
796 if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries)) in bpf_jit_probe_post()
797 /* Verifier bug - not enough entries. */ in bpf_jit_probe_post()
798 return -1; in bpf_jit_probe_post()
799 ex = &fp->aux->extable[jit->excnt]; in bpf_jit_probe_post()
801 prg = i == 0 ? probe->prg : probe->nop_prg; in bpf_jit_probe_post()
802 delta = jit->prg_buf + prg - (u8 *)&ex->insn; in bpf_jit_probe_post()
804 /* JIT bug - code and extable must be close. */ in bpf_jit_probe_post()
805 return -1; in bpf_jit_probe_post()
806 ex->insn = delta; in bpf_jit_probe_post()
812 delta = jit->prg_buf + jit->prg - (u8 *)&ex->fixup; in bpf_jit_probe_post()
814 /* JIT bug - landing pad and extable must be close. */ in bpf_jit_probe_post()
815 return -1; in bpf_jit_probe_post()
816 ex->fixup = delta; in bpf_jit_probe_post()
817 ex->type = EX_TYPE_BPF; in bpf_jit_probe_post()
818 ex->data = probe->reg; in bpf_jit_probe_post()
819 jit->excnt++; in bpf_jit_probe_post()
825 * Sign-extend the register if necessary
848 return -1; in sign_extend()
855 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
861 struct bpf_insn *insn = &fp->insnsi[i]; in bpf_jit_insn()
862 s32 branch_oc_off = insn->off; in bpf_jit_insn()
863 u32 dst_reg = insn->dst_reg; in bpf_jit_insn()
864 u32 src_reg = insn->src_reg; in bpf_jit_insn()
867 u32 *addrs = jit->addrs; in bpf_jit_insn()
868 s32 imm = insn->imm; in bpf_jit_insn()
869 s16 off = insn->off; in bpf_jit_insn()
875 switch (insn->code) { in bpf_jit_insn()
880 switch (insn->off) { in bpf_jit_insn()
908 patch_brc = jit->prg; in bpf_jit_insn()
911 EMIT6_IMM(0xc0080000, dst_reg, jit->user_arena >> 32); in bpf_jit_insn()
913 if (jit->prg_buf) in bpf_jit_insn()
914 *(u16 *)(jit->prg_buf + patch_brc + 2) = in bpf_jit_insn()
915 (jit->prg - patch_brc) >> 1; in bpf_jit_insn()
918 switch (insn->off) { in bpf_jit_insn()
989 case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */ in bpf_jit_insn()
994 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */ in bpf_jit_insn()
998 case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */ in bpf_jit_insn()
1000 /* alfi %dst,-imm */ in bpf_jit_insn()
1001 EMIT6_IMM(0xc20b0000, dst_reg, -imm); in bpf_jit_insn()
1005 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */ in bpf_jit_insn()
1008 if (imm == -0x80000000) { in bpf_jit_insn()
1012 /* agfi %dst,-imm */ in bpf_jit_insn()
1013 EMIT6_IMM(0xc2080000, dst_reg, -imm); in bpf_jit_insn()
1047 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; in bpf_jit_insn()
1074 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; in bpf_jit_insn()
1099 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; in bpf_jit_insn()
1102 if (BPF_OP(insn->code) == BPF_MOD) in bpf_jit_insn()
1138 jit->seen |= SEEN_LITERAL; in bpf_jit_insn()
1148 jit->seen |= SEEN_LITERAL; in bpf_jit_insn()
1163 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; in bpf_jit_insn()
1166 if (BPF_OP(insn->code) == BPF_MOD) in bpf_jit_insn()
1200 jit->seen |= SEEN_LITERAL; in bpf_jit_insn()
1210 jit->seen |= SEEN_LITERAL; in bpf_jit_insn()
1247 jit->seen |= SEEN_LITERAL; in bpf_jit_insn()
1279 jit->seen |= SEEN_LITERAL; in bpf_jit_insn()
1313 jit->seen |= SEEN_LITERAL; in bpf_jit_insn()
1396 case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */ in bpf_jit_insn()
1401 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ in bpf_jit_insn()
1418 if (!fp->aux->verifier_zext) in bpf_jit_insn()
1442 if (!fp->aux->verifier_zext) in bpf_jit_insn()
1469 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1480 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1491 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1502 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1515 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1528 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1541 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1554 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1564 bool is32 = BPF_SIZE(insn->code) == BPF_W; in bpf_jit_insn()
1568 * but no index register. For the non-arena case, simply use in bpf_jit_insn()
1575 switch (insn->imm) { in bpf_jit_insn()
1580 (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \ in bpf_jit_insn()
1585 if (insn->imm & BPF_FETCH) { \ in bpf_jit_insn()
1586 /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \ in bpf_jit_insn()
1624 if (load_probe.prg != -1) { in bpf_jit_insn()
1625 probe.prg = jit->prg; in bpf_jit_insn()
1628 loop_start = jit->prg; in bpf_jit_insn()
1659 pr_err("Unknown atomic operation %02x\n", insn->imm); in bpf_jit_insn()
1660 return -1; in bpf_jit_insn()
1663 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1679 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1691 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1703 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1715 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1722 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1735 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1746 jit->seen |= SEEN_MEM; in bpf_jit_insn()
1766 return -1; in bpf_jit_insn()
1769 jit->seen |= SEEN_FUNC; in bpf_jit_insn()
1784 /* Sign-extend the kfunc arguments. */ in bpf_jit_insn()
1785 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in bpf_jit_insn()
1788 return -1; in bpf_jit_insn()
1790 for (j = 0; j < m->nr_args; j++) { in bpf_jit_insn()
1792 m->arg_size[j], in bpf_jit_insn()
1793 m->arg_flags[j])) in bpf_jit_insn()
1794 return -1; in bpf_jit_insn()
1815 * if (index >= array->map.max_entries) in bpf_jit_insn()
1824 patch_1_clrj = jit->prg; in bpf_jit_insn()
1826 jit->prg); in bpf_jit_insn()
1833 if (jit->seen & SEEN_STACK) in bpf_jit_insn()
1841 /* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */ in bpf_jit_insn()
1842 patch_2_clij = jit->prg; in bpf_jit_insn()
1843 EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1, in bpf_jit_insn()
1844 2, jit->prg); in bpf_jit_insn()
1847 * prog = array->ptrs[index]; in bpf_jit_insn()
1860 patch_3_brc = jit->prg; in bpf_jit_insn()
1861 EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg); in bpf_jit_insn()
1869 * goto *(prog->bpf_func + tail_call_start); in bpf_jit_insn()
1876 jit->seen |= SEEN_FUNC; in bpf_jit_insn()
1878 EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start); in bpf_jit_insn()
1880 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip); in bpf_jit_insn()
1883 _EMIT4(0x47f01000 + jit->tail_call_start); in bpf_jit_insn()
1886 if (jit->prg_buf) { in bpf_jit_insn()
1887 *(u16 *)(jit->prg_buf + patch_1_clrj + 2) = in bpf_jit_insn()
1888 (jit->prg - patch_1_clrj) >> 1; in bpf_jit_insn()
1889 *(u16 *)(jit->prg_buf + patch_2_clij + 2) = in bpf_jit_insn()
1890 (jit->prg - patch_2_clij) >> 1; in bpf_jit_insn()
1891 *(u16 *)(jit->prg_buf + patch_3_brc + 2) = in bpf_jit_insn()
1892 (jit->prg - patch_3_brc) >> 1; in bpf_jit_insn()
1897 last = (i == fp->len - 1) ? 1 : 0; in bpf_jit_insn()
1900 if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip)) in bpf_jit_insn()
1902 EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip); in bpf_jit_insn()
1905 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip); in bpf_jit_insn()
1914 * ------------------------------ in bpf_jit_insn()
1976 if (BPF_CLASS(insn->code) == BPF_JMP32) { in bpf_jit_insn()
2032 bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; in bpf_jit_insn()
2040 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; in bpf_jit_insn()
2061 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; in bpf_jit_insn()
2079 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; in bpf_jit_insn()
2112 pr_err("Unknown opcode %02x\n", insn->code); in bpf_jit_insn()
2113 return -1; in bpf_jit_insn()
2120 * Return whether new i-th instruction address does not violate any invariant
2130 return jit->addrs[i] == jit->prg; in bpf_is_new_addr_sane()
2133 return jit->addrs[i] >= jit->prg; in bpf_is_new_addr_sane()
2137 * Update the address of i-th instruction
2144 delta = jit->prg - jit->addrs[i]; in bpf_set_addr()
2146 bpf_skip(jit, -delta); in bpf_set_addr()
2149 return -1; in bpf_set_addr()
2150 jit->addrs[i] = jit->prg; in bpf_set_addr()
2163 jit->lit32 = jit->lit32_start; in bpf_jit_prog()
2164 jit->lit64 = jit->lit64_start; in bpf_jit_prog()
2165 jit->prg = 0; in bpf_jit_prog()
2166 jit->excnt = 0; in bpf_jit_prog()
2168 kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena); in bpf_jit_prog()
2170 jit->kern_arena = _EMIT_CONST_U64(kern_arena); in bpf_jit_prog()
2171 jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena); in bpf_jit_prog()
2175 return -1; in bpf_jit_prog()
2176 for (i = 0; i < fp->len; i += insn_count) { in bpf_jit_prog()
2179 return -1; in bpf_jit_prog()
2182 return -1; in bpf_jit_prog()
2186 lit32_size = jit->lit32 - jit->lit32_start; in bpf_jit_prog()
2187 lit64_size = jit->lit64 - jit->lit64_start; in bpf_jit_prog()
2188 jit->lit32_start = jit->prg; in bpf_jit_prog()
2190 jit->lit32_start = ALIGN(jit->lit32_start, 4); in bpf_jit_prog()
2191 jit->lit64_start = jit->lit32_start + lit32_size; in bpf_jit_prog()
2193 jit->lit64_start = ALIGN(jit->lit64_start, 8); in bpf_jit_prog()
2194 jit->size = jit->lit64_start + lit64_size; in bpf_jit_prog()
2195 jit->size_prg = jit->prg; in bpf_jit_prog()
2197 if (WARN_ON_ONCE(fp->aux->extable && in bpf_jit_prog()
2198 jit->excnt != fp->aux->num_exentries)) in bpf_jit_prog()
2199 /* Verifier bug - too many entries. */ in bpf_jit_prog()
2200 return -1; in bpf_jit_prog()
2225 for (i = 0; i < fp->len; i++) { in bpf_jit_alloc()
2226 insn = &fp->insnsi[i]; in bpf_jit_alloc()
2228 if (BPF_CLASS(insn->code) == BPF_STX && in bpf_jit_alloc()
2229 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC && in bpf_jit_alloc()
2230 (BPF_SIZE(insn->code) == BPF_DW || in bpf_jit_alloc()
2231 BPF_SIZE(insn->code) == BPF_W) && in bpf_jit_alloc()
2232 insn->imm == BPF_XCHG) in bpf_jit_alloc()
2234 * bpf_jit_insn() emits a load and a compare-and-swap, in bpf_jit_alloc()
2237 fp->aux->num_exentries += 1; in bpf_jit_alloc()
2240 fp->aux->num_exentries *= 2; in bpf_jit_alloc()
2242 code_size = roundup(jit->size, in bpf_jit_alloc()
2244 extable_size = fp->aux->num_exentries * in bpf_jit_alloc()
2246 header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf, in bpf_jit_alloc()
2250 fp->aux->extable = (struct exception_table_entry *) in bpf_jit_alloc()
2251 (jit->prg_buf + code_size); in bpf_jit_alloc()
2260 u32 stack_depth = round_up(fp->aux->stack_depth, 8); in bpf_int_jit_compile()
2269 if (!fp->jit_requested) in bpf_int_jit_compile()
2284 jit_data = fp->aux->jit_data; in bpf_int_jit_compile()
2291 fp->aux->jit_data = jit_data; in bpf_int_jit_compile()
2293 if (jit_data->ctx.addrs) { in bpf_int_jit_compile()
2294 jit = jit_data->ctx; in bpf_int_jit_compile()
2295 header = jit_data->header; in bpf_int_jit_compile()
2297 pass = jit_data->pass + 1; in bpf_int_jit_compile()
2302 jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); in bpf_int_jit_compile()
2309 * - 1/2: Determine clobbered registers in bpf_int_jit_compile()
2310 * - 3: Calculate program size and addrs array in bpf_int_jit_compile()
2333 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf); in bpf_int_jit_compile()
2336 if (!fp->is_func || extra_pass) { in bpf_int_jit_compile()
2343 jit_data->header = header; in bpf_int_jit_compile()
2344 jit_data->ctx = jit; in bpf_int_jit_compile()
2345 jit_data->pass = pass; in bpf_int_jit_compile()
2347 fp->bpf_func = (void *) jit.prg_buf; in bpf_int_jit_compile()
2348 fp->jited = 1; in bpf_int_jit_compile()
2349 fp->jited_len = jit.size; in bpf_int_jit_compile()
2351 if (!fp->is_func || extra_pass) { in bpf_int_jit_compile()
2356 fp->aux->jit_data = NULL; in bpf_int_jit_compile()
2391 return -EINVAL; in bpf_arch_text_poke()
2394 insn.disp == ((char *)new_addr - (char *)ip) >> 1) { in bpf_arch_text_poke()
2409 return -EINVAL; in bpf_arch_text_poke()
2412 s390_kernel_write(&plt->target, &new_plt.target, in bpf_arch_text_poke()
2440 * (ctx - 16)
2443 * (ctx - 8)
2472 struct bpf_jit *jit = &tjit->common; in invoke_bpf_prog()
2473 int cookie_off = tjit->run_ctx_off + in invoke_bpf_prog()
2475 struct bpf_prog *p = tlink->link.prog; in invoke_bpf_prog()
2479 * run_ctx.cookie = tlink->cookie; in invoke_bpf_prog()
2482 /* %r0 = tlink->cookie */ in invoke_bpf_prog()
2483 load_imm64(jit, REG_W0, tlink->cookie); in invoke_bpf_prog()
2497 EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off); in invoke_bpf_prog()
2503 patch = jit->prg; in invoke_bpf_prog()
2507 * retval = bpf_func(args, p->insnsi); in invoke_bpf_prog()
2510 /* %r1 = p->bpf_func */ in invoke_bpf_prog()
2511 load_imm64(jit, REG_1, (u64)p->bpf_func); in invoke_bpf_prog()
2513 EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off); in invoke_bpf_prog()
2514 /* %r3 = p->insnsi */ in invoke_bpf_prog()
2515 if (!p->jited) in invoke_bpf_prog()
2516 load_imm64(jit, REG_3, (u64)p->insnsi); in invoke_bpf_prog()
2521 if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags)) in invoke_bpf_prog()
2522 return -1; in invoke_bpf_prog()
2524 tjit->retval_off); in invoke_bpf_prog()
2528 if (jit->prg_buf) in invoke_bpf_prog()
2529 *(u32 *)&jit->prg_buf[patch + 2] = (jit->prg - patch) >> 1; in invoke_bpf_prog()
2542 EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off); in invoke_bpf_prog()
2551 int stack_offset = tjit->stack_size; in alloc_stack()
2553 tjit->stack_size += size; in alloc_stack()
2557 /* ABI uses %r2 - %r6 for parameter passing. */
2564 /* -mfentry generates a 6-byte nop on s390x. */
2567 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, in __arch_prepare_bpf_trampoline() argument
2578 struct bpf_jit *jit = &tjit->common; in __arch_prepare_bpf_trampoline()
2583 nr_reg_args = min_t(int, m->nr_args, MAX_NR_REG_ARGS); in __arch_prepare_bpf_trampoline()
2584 nr_stack_args = m->nr_args - nr_reg_args; in __arch_prepare_bpf_trampoline()
2586 return -ENOTSUPP; in __arch_prepare_bpf_trampoline()
2603 for (i = 0; i < m->nr_args; i++) { in __arch_prepare_bpf_trampoline()
2604 if (m->arg_size[i] <= 8) in __arch_prepare_bpf_trampoline()
2606 else if (m->arg_size[i] <= 16) in __arch_prepare_bpf_trampoline()
2609 return -ENOTSUPP; in __arch_prepare_bpf_trampoline()
2620 tjit->stack_size = STACK_FRAME_OVERHEAD; in __arch_prepare_bpf_trampoline()
2621 tjit->backchain_off = tjit->stack_size - sizeof(u64); in __arch_prepare_bpf_trampoline()
2622 tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64)); in __arch_prepare_bpf_trampoline()
2623 tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64)); in __arch_prepare_bpf_trampoline()
2624 tjit->ip_off = alloc_stack(tjit, sizeof(u64)); in __arch_prepare_bpf_trampoline()
2625 tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64)); in __arch_prepare_bpf_trampoline()
2626 tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64)); in __arch_prepare_bpf_trampoline()
2627 tjit->retval_off = alloc_stack(tjit, sizeof(u64)); in __arch_prepare_bpf_trampoline()
2628 tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64)); in __arch_prepare_bpf_trampoline()
2629 tjit->run_ctx_off = alloc_stack(tjit, in __arch_prepare_bpf_trampoline()
2631 tjit->tccnt_off = alloc_stack(tjit, sizeof(u64)); in __arch_prepare_bpf_trampoline()
2632 tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2); in __arch_prepare_bpf_trampoline()
2638 tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64); in __arch_prepare_bpf_trampoline()
2639 tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD; in __arch_prepare_bpf_trampoline()
2643 /* aghi %r15,-stack_size */ in __arch_prepare_bpf_trampoline()
2644 EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size); in __arch_prepare_bpf_trampoline()
2647 tjit->backchain_off); in __arch_prepare_bpf_trampoline()
2649 _EMIT6(0xd203f000 | tjit->tccnt_off, in __arch_prepare_bpf_trampoline()
2650 0xf000 | (tjit->stack_size + STK_OFF_TCCNT)); in __arch_prepare_bpf_trampoline()
2654 REG_2 + (nr_reg_args - 1), REG_15, in __arch_prepare_bpf_trampoline()
2655 tjit->reg_args_off); in __arch_prepare_bpf_trampoline()
2656 for (i = 0, j = 0; i < m->nr_args; i++) { in __arch_prepare_bpf_trampoline()
2660 arg = tjit->orig_stack_args_off + in __arch_prepare_bpf_trampoline()
2661 (i - MAX_NR_REG_ARGS) * sizeof(u64); in __arch_prepare_bpf_trampoline()
2662 bpf_arg_off = tjit->bpf_args_off + j * sizeof(u64); in __arch_prepare_bpf_trampoline()
2663 if (m->arg_size[i] <= 8) { in __arch_prepare_bpf_trampoline()
2690 tjit->r7_r8_off); in __arch_prepare_bpf_trampoline()
2692 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_14, REG_0, REG_15, tjit->r14_off); in __arch_prepare_bpf_trampoline()
2711 * arg_cnt = m->nr_args; in __arch_prepare_bpf_trampoline()
2719 tjit->ip_off); in __arch_prepare_bpf_trampoline()
2725 tjit->arg_cnt_off); in __arch_prepare_bpf_trampoline()
2729 * __bpf_tramp_enter(im); in __arch_prepare_bpf_trampoline()
2734 /* %r2 = im */ in __arch_prepare_bpf_trampoline()
2735 load_imm64(jit, REG_2, (u64)im); in __arch_prepare_bpf_trampoline()
2740 for (i = 0; i < fentry->nr_links; i++) in __arch_prepare_bpf_trampoline()
2741 if (invoke_bpf_prog(tjit, m, fentry->links[i], in __arch_prepare_bpf_trampoline()
2743 return -EINVAL; in __arch_prepare_bpf_trampoline()
2745 if (fmod_ret->nr_links) { in __arch_prepare_bpf_trampoline()
2751 _EMIT6(0xd707f000 | tjit->retval_off, in __arch_prepare_bpf_trampoline()
2752 0xf000 | tjit->retval_off); in __arch_prepare_bpf_trampoline()
2754 for (i = 0; i < fmod_ret->nr_links; i++) { in __arch_prepare_bpf_trampoline()
2755 if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true)) in __arch_prepare_bpf_trampoline()
2756 return -EINVAL; in __arch_prepare_bpf_trampoline()
2765 tjit->retval_off); in __arch_prepare_bpf_trampoline()
2767 EMIT6_PCREL_RILC(0xc0040000, 7, tjit->do_fexit); in __arch_prepare_bpf_trampoline()
2779 REG_2 + (nr_reg_args - 1), REG_15, in __arch_prepare_bpf_trampoline()
2780 tjit->reg_args_off); in __arch_prepare_bpf_trampoline()
2784 (nr_stack_args * sizeof(u64) - 1) << 16 | in __arch_prepare_bpf_trampoline()
2785 tjit->stack_args_off, in __arch_prepare_bpf_trampoline()
2786 0xf000 | tjit->orig_stack_args_off); in __arch_prepare_bpf_trampoline()
2788 _EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off); in __arch_prepare_bpf_trampoline()
2795 tjit->retval_off); in __arch_prepare_bpf_trampoline()
2797 im->ip_after_call = jit->prg_buf + jit->prg; in __arch_prepare_bpf_trampoline()
2803 /* brcl 0,im->ip_epilogue */ in __arch_prepare_bpf_trampoline()
2804 EMIT6_PCREL_RILC(0xc0040000, 0, (u64)im->ip_epilogue); in __arch_prepare_bpf_trampoline()
2808 tjit->do_fexit = jit->prg; in __arch_prepare_bpf_trampoline()
2809 for (i = 0; i < fexit->nr_links; i++) in __arch_prepare_bpf_trampoline()
2810 if (invoke_bpf_prog(tjit, m, fexit->links[i], false)) in __arch_prepare_bpf_trampoline()
2811 return -EINVAL; in __arch_prepare_bpf_trampoline()
2814 im->ip_epilogue = jit->prg_buf + jit->prg; in __arch_prepare_bpf_trampoline()
2817 * __bpf_tramp_exit(im); in __arch_prepare_bpf_trampoline()
2822 /* %r2 = im */ in __arch_prepare_bpf_trampoline()
2823 load_imm64(jit, REG_2, (u64)im); in __arch_prepare_bpf_trampoline()
2831 REG_2 + (nr_reg_args - 1), REG_15, in __arch_prepare_bpf_trampoline()
2832 tjit->reg_args_off); in __arch_prepare_bpf_trampoline()
2838 tjit->r7_r8_off); in __arch_prepare_bpf_trampoline()
2840 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_14, REG_0, REG_15, tjit->r14_off); in __arch_prepare_bpf_trampoline()
2844 tjit->retval_off); in __arch_prepare_bpf_trampoline()
2846 _EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT), in __arch_prepare_bpf_trampoline()
2847 0xf000 | tjit->tccnt_off); in __arch_prepare_bpf_trampoline()
2849 EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size); in __arch_prepare_bpf_trampoline()
2868 struct bpf_tramp_image im; in arch_bpf_trampoline_size() local
2874 ret = __arch_prepare_bpf_trampoline(&im, &tjit, m, flags, in arch_bpf_trampoline_size()
2880 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, in arch_prepare_bpf_trampoline() argument
2890 ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags, in arch_prepare_bpf_trampoline()
2895 if (tjit.common.prg > (char *)image_end - (char *)image) in arch_prepare_bpf_trampoline()
2900 return -E2BIG; in arch_prepare_bpf_trampoline()
2904 ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags, in arch_prepare_bpf_trampoline()
2954 * to perform a quasi-longjmp. The common code requires a in arch_bpf_stack_walk()
2955 * non-zero bp, so pass sp there as well. in arch_bpf_stack_walk()