Lines Matching +full:im +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
12 #include <asm/asm-compat.h>
21 #include <asm/text-patching.h>
48 " .size dummy_tramp, .-dummy_tramp ;"
57 * Out-of-line stub: in bpf_jit_build_fentry_stubs()
63 ool_stub_idx = ctx->idx; in bpf_jit_build_fentry_stubs()
68 WARN_ON_ONCE(!is_offset_in_branch_range(4 - (long)ctx->idx * 4)); in bpf_jit_build_fentry_stubs()
69 EMIT(PPC_RAW_BRANCH(4 - (long)ctx->idx * 4)); in bpf_jit_build_fentry_stubs()
77 * ld r12, -8-SZL(r12) in bpf_jit_build_fentry_stubs()
83 *((unsigned long *)&image[ctx->idx]) = (unsigned long)dummy_tramp; in bpf_jit_build_fentry_stubs()
84 ctx->idx += SZL / 4; in bpf_jit_build_fentry_stubs()
85 long_branch_stub_idx = ctx->idx; in bpf_jit_build_fentry_stubs()
89 EMIT(PPC_RAW_LL(_R12, _R12, -8-SZL)); in bpf_jit_build_fentry_stubs()
95 bpf_jit_ool_stub = (ctx->idx - ool_stub_idx) * 4; in bpf_jit_build_fentry_stubs()
96 bpf_jit_long_branch_stub = (ctx->idx - long_branch_stub_idx) * 4; in bpf_jit_build_fentry_stubs()
102 if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) { in bpf_jit_emit_exit_insn()
104 } else if (ctx->alt_exit_addr) { in bpf_jit_emit_exit_insn()
105 if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4)))) in bpf_jit_emit_exit_insn()
106 return -1; in bpf_jit_emit_exit_insn()
107 PPC_JMP(ctx->alt_exit_addr); in bpf_jit_emit_exit_insn()
109 ctx->alt_exit_addr = ctx->idx * 4; in bpf_jit_emit_exit_insn()
154 if (!fp->jit_requested) in bpf_int_jit_compile()
166 jit_data = fp->aux->jit_data; in bpf_int_jit_compile()
173 fp->aux->jit_data = jit_data; in bpf_int_jit_compile()
176 flen = fp->len; in bpf_int_jit_compile()
177 addrs = jit_data->addrs; in bpf_int_jit_compile()
179 cgctx = jit_data->ctx; in bpf_int_jit_compile()
185 fimage = jit_data->fimage; in bpf_int_jit_compile()
186 fhdr = jit_data->fhdr; in bpf_int_jit_compile()
187 proglen = jit_data->proglen; in bpf_int_jit_compile()
188 hdr = jit_data->hdr; in bpf_int_jit_compile()
189 image = (void *)hdr + ((void *)fimage - (void *)fhdr); in bpf_int_jit_compile()
206 cgctx.stack_size = round_up(fp->aux->stack_depth, 16); in bpf_int_jit_compile()
207 cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena); in bpf_int_jit_compile()
208 cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena); in bpf_int_jit_compile()
210 /* Scouting faux-generate pass 0 */ in bpf_int_jit_compile()
220 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen. in bpf_int_jit_compile()
239 addrs[fp->len] = cgctx.idx * 4; in bpf_int_jit_compile()
242 fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4; in bpf_int_jit_compile()
243 extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry); in bpf_int_jit_compile()
256 fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len; in bpf_int_jit_compile()
262 /* Code generation passes 1-2 */ in bpf_int_jit_compile()
270 bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size)); in bpf_int_jit_compile()
279 proglen - (cgctx.idx * 4), cgctx.seen); in bpf_int_jit_compile()
292 ((u64 *)image)[1] = local_paca->kernel_toc; in bpf_int_jit_compile()
295 fp->bpf_func = (void *)fimage; in bpf_int_jit_compile()
296 fp->jited = 1; in bpf_int_jit_compile()
297 fp->jited_len = cgctx.idx * 4 + FUNCTION_DESCR_SIZE; in bpf_int_jit_compile()
299 if (!fp->is_func || extra_pass) { in bpf_int_jit_compile()
308 fp->aux->jit_data = NULL; in bpf_int_jit_compile()
310 jit_data->addrs = addrs; in bpf_int_jit_compile()
311 jit_data->ctx = cgctx; in bpf_int_jit_compile()
312 jit_data->proglen = proglen; in bpf_int_jit_compile()
313 jit_data->fimage = fimage; in bpf_int_jit_compile()
314 jit_data->fhdr = fhdr; in bpf_int_jit_compile()
315 jit_data->hdr = hdr; in bpf_int_jit_compile()
342 if (!fp->aux->extable || in bpf_add_extable_entry()
343 WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries)) in bpf_add_extable_entry()
344 return -EINVAL; in bpf_add_extable_entry()
353 ex = (void *)fp->aux->extable - (void *)fimage + (void *)image; in bpf_add_extable_entry()
355 fixup = (void *)ex - in bpf_add_extable_entry()
356 (fp->aux->num_exentries * BPF_FIXUP_LEN * 4) + in bpf_add_extable_entry()
357 (ctx->exentry_idx * BPF_FIXUP_LEN * 4); in bpf_add_extable_entry()
364 fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */ in bpf_add_extable_entry()
366 fixup[BPF_FIXUP_LEN - 1] = in bpf_add_extable_entry()
367 PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]); in bpf_add_extable_entry()
369 ex_entry = &ex[ctx->exentry_idx]; in bpf_add_extable_entry()
371 offset = pc - (long)&ex_entry->insn; in bpf_add_extable_entry()
373 return -ERANGE; in bpf_add_extable_entry()
374 ex_entry->insn = offset; in bpf_add_extable_entry()
376 offset = (long)fixup - (long)&ex_entry->fixup; in bpf_add_extable_entry()
378 return -ERANGE; in bpf_add_extable_entry()
379 ex_entry->fixup = offset; in bpf_add_extable_entry()
381 ctx->exentry_idx++; in bpf_add_extable_entry()
390 return ERR_PTR(-EINVAL); in bpf_arch_text_copy()
405 return -EINVAL; in bpf_arch_text_invalidate()
416 if (fp->jited) { in bpf_jit_free()
417 struct powerpc_jit_data *jit_data = fp->aux->jit_data; in bpf_jit_free()
426 bpf_jit_binary_pack_finalize(jit_data->fhdr, jit_data->hdr); in bpf_jit_free()
427 kvfree(jit_data->addrs); in bpf_jit_free()
457 switch (insn->code) { in bpf_jit_supports_insn()
488 struct bpf_prog *p = l->link.prog; in invoke_bpf_prog()
495 PPC_LI64(_R3, l->cookie); in invoke_bpf_prog()
499 PPC_LI32(_R3, l->cookie >> 32); in invoke_bpf_prog()
500 PPC_LI32(_R4, l->cookie); in invoke_bpf_prog()
526 jmp_idx = ctx->idx; in invoke_bpf_prog()
529 /* p->bpf_func(ctx) */ in invoke_bpf_prog()
531 if (!p->jited) in invoke_bpf_prog()
532 PPC_LI_ADDR(_R4, (unsigned long)p->insnsi); in invoke_bpf_prog()
534 if (image && !create_branch(&branch_insn, (u32 *)&ro_image[ctx->idx], in invoke_bpf_prog()
535 (unsigned long)p->bpf_func, in invoke_bpf_prog()
537 image[ctx->idx] = ppc_inst_val(branch_insn); in invoke_bpf_prog()
538 ctx->idx++; in invoke_bpf_prog()
551 (unsigned long)&image[ctx->idx], COND_EQ << 16)) in invoke_bpf_prog()
552 return -EINVAL; in invoke_bpf_prog()
578 for (i = 0; i < tl->nr_links; i++) { in invoke_bpf_mod_ret()
579 if (invoke_bpf_prog(image, ro_image, ctx, tl->links[i], regs_off, retval_off, in invoke_bpf_mod_ret()
581 return -EINVAL; in invoke_bpf_mod_ret()
596 branches[i] = ctx->idx; in invoke_bpf_mod_ret()
610 EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); in bpf_trampoline_setup_tail_call_cnt()
611 EMIT(PPC_RAW_STL(_R3, _R1, -tailcallcnt_offset)); in bpf_trampoline_setup_tail_call_cnt()
625 EMIT(PPC_RAW_LL(_R3, _R1, -tailcallcnt_offset)); in bpf_trampoline_restore_tail_call_cnt()
626 EMIT(PPC_RAW_STL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); in bpf_trampoline_restore_tail_call_cnt()
675 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image, in __arch_prepare_bpf_trampoline() argument
693 return -EOPNOTSUPP; in __arch_prepare_bpf_trampoline()
695 nr_regs = m->nr_args; in __arch_prepare_bpf_trampoline()
697 for (i = 0; i < m->nr_args; i++) in __arch_prepare_bpf_trampoline()
698 if (m->arg_size[i] > SZL) in __arch_prepare_bpf_trampoline()
699 nr_regs += round_up(m->arg_size[i], SZL) / SZL - 1; in __arch_prepare_bpf_trampoline()
702 return -EOPNOTSUPP; in __arch_prepare_bpf_trampoline()
712 * bpf prog redzone/tailcallcnt [ ... ] 64 bytes (64-bit powerpc) in __arch_prepare_bpf_trampoline()
713 * [ ] -- in __arch_prepare_bpf_trampoline()
714 * LR save area [ r0 save (64-bit) ] | header in __arch_prepare_bpf_trampoline()
715 * [ r0 save (32-bit) ] | in __arch_prepare_bpf_trampoline()
716 * dummy frame for unwind [ back chain 1 ] -- in __arch_prepare_bpf_trampoline()
718 * r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc in __arch_prepare_bpf_trampoline()
719 * alt_lr_off [ real lr (ool stub)] optional - actual lr in __arch_prepare_bpf_trampoline()
733 * [ TOC save (64-bit) ] -- in __arch_prepare_bpf_trampoline()
734 * [ LR save (64-bit) ] | header in __arch_prepare_bpf_trampoline()
735 * [ LR save (32-bit) ] | in __arch_prepare_bpf_trampoline()
736 * bpf trampoline frame [ back chain 2 ] -- in __arch_prepare_bpf_trampoline()
748 * - if the function takes more than 8 arguments for the rest to spill onto the stack in __arch_prepare_bpf_trampoline()
749 * - or, if the function has variadic arguments in __arch_prepare_bpf_trampoline()
750 * - or, if this functions's prototype was not available to the caller in __arch_prepare_bpf_trampoline()
801 /* Dummy frame size for proper unwind - includes 64-bytes red zone for 64-bit powerpc */ in __arch_prepare_bpf_trampoline()
810 EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_dummy_frame_size)); in __arch_prepare_bpf_trampoline()
813 EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_frame_size)); in __arch_prepare_bpf_trampoline()
815 /* 64-bit: Save TOC and load kernel TOC */ in __arch_prepare_bpf_trampoline()
821 /* 32-bit: save tail call count in r4 */ in __arch_prepare_bpf_trampoline()
854 /* Save function arg count -- see bpf_get_func_arg_cnt() */ in __arch_prepare_bpf_trampoline()
863 PPC_LI_ADDR(_R3, (unsigned long)im); in __arch_prepare_bpf_trampoline()
870 for (i = 0; i < fentry->nr_links; i++) in __arch_prepare_bpf_trampoline()
871 if (invoke_bpf_prog(image, ro_image, ctx, fentry->links[i], regs_off, retval_off, in __arch_prepare_bpf_trampoline()
873 return -EINVAL; in __arch_prepare_bpf_trampoline()
875 if (fmod_ret->nr_links) { in __arch_prepare_bpf_trampoline()
876 branches = kcalloc(fmod_ret->nr_links, sizeof(u32), GFP_KERNEL); in __arch_prepare_bpf_trampoline()
878 return -ENOMEM; in __arch_prepare_bpf_trampoline()
882 ret = -EINVAL; in __arch_prepare_bpf_trampoline()
904 /* Restore TOC for 64-bit */ in __arch_prepare_bpf_trampoline()
920 im->ip_after_call = &((u32 *)ro_image)[ctx->idx]; in __arch_prepare_bpf_trampoline()
925 for (i = 0; i < fmod_ret->nr_links && image; i++) { in __arch_prepare_bpf_trampoline()
927 (unsigned long)&image[ctx->idx], COND_NE << 16)) { in __arch_prepare_bpf_trampoline()
928 ret = -EINVAL; in __arch_prepare_bpf_trampoline()
935 for (i = 0; i < fexit->nr_links; i++) in __arch_prepare_bpf_trampoline()
936 if (invoke_bpf_prog(image, ro_image, ctx, fexit->links[i], regs_off, retval_off, in __arch_prepare_bpf_trampoline()
938 ret = -EINVAL; in __arch_prepare_bpf_trampoline()
944 im->ip_epilogue = &((u32 *)ro_image)[ctx->idx]; in __arch_prepare_bpf_trampoline()
945 PPC_LI_ADDR(_R3, im); in __arch_prepare_bpf_trampoline()
990 if (image && WARN_ON_ONCE(&image[ctx->idx] > (u32 *)rw_image_end - BPF_INSN_SAFETY)) { in __arch_prepare_bpf_trampoline()
991 ret = -EFAULT; in __arch_prepare_bpf_trampoline()
994 ret = ctx->idx * 4 + BPF_INSN_SAFETY * 4; in __arch_prepare_bpf_trampoline()
1004 struct bpf_tramp_image im; in arch_bpf_trampoline_size() local
1007 ret = __arch_prepare_bpf_trampoline(&im, NULL, NULL, NULL, m, flags, tlinks, func_addr); in arch_bpf_trampoline_size()
1011 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, in arch_prepare_bpf_trampoline() argument
1016 u32 size = image_end - image; in arch_prepare_bpf_trampoline()
1026 return -ENOMEM; in arch_prepare_bpf_trampoline()
1028 ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m, in arch_prepare_bpf_trampoline()
1034 bpf_jit_dump(1, ret - BPF_INSN_SAFETY * 4, 1, rw_image); in arch_prepare_bpf_trampoline()
1051 return -EFAULT; in bpf_modify_inst()
1057 return -EINVAL; in bpf_modify_inst()
1072 * A 3-step process for bpf prog entry:
1076 * 2. Out-of-line stub:
1088 * ld r12, -16(r12)
1119 return -EOPNOTSUPP; in bpf_arch_text_poke()
1127 return -EOPNOTSUPP; in bpf_arch_text_poke()
1132 * an unconditional branch instruction at im->ip_after_call in bpf_arch_text_poke()
1138 return -EOPNOTSUPP; in bpf_arch_text_poke()
1143 return -ERANGE; in bpf_arch_text_poke()
1147 return -ERANGE; in bpf_arch_text_poke()
1159 /* Address of the jmp/call instruction in the out-of-line stub */ in bpf_arch_text_poke()
1160 ip = (void *)(bpf_func_end - bpf_jit_ool_stub + 4); in bpf_arch_text_poke()
1162 if (!is_offset_in_branch_range((long)ip - 4 - bpf_func)) { in bpf_arch_text_poke()
1165 return -ERANGE; in bpf_arch_text_poke()
1170 if (is_offset_in_branch_range(ip - old_addr)) in bpf_arch_text_poke()
1173 create_branch(&old_inst, ip, bpf_func_end - bpf_jit_long_branch_stub, in bpf_arch_text_poke()
1178 if (is_offset_in_branch_range(ip - new_addr)) in bpf_arch_text_poke()
1181 create_branch(&new_inst, ip, bpf_func_end - bpf_jit_long_branch_stub, in bpf_arch_text_poke()
1192 if ((new_addr && !is_offset_in_branch_range(new_addr - ip)) || in bpf_arch_text_poke()
1193 (old_addr && !is_offset_in_branch_range(old_addr - ip))) in bpf_arch_text_poke()
1194 ret = patch_ulong((void *)(bpf_func_end - bpf_jit_long_branch_stub - SZL), in bpf_arch_text_poke()
1195 (new_addr && !is_offset_in_branch_range(new_addr - ip)) ? in bpf_arch_text_poke()
1200 /* 2. Update the branch/call in the out-of-line stub */ in bpf_arch_text_poke()
1210 create_branch(&new_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0); in bpf_arch_text_poke()
1213 create_branch(&old_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0); in bpf_arch_text_poke()