Lines Matching +full:function +full:- +full:off

1 // SPDX-License-Identifier: GPL-2.0-only
20 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (round_up(stack, 16) - 80)
23 /* return value from in-kernel function, and exit value for eBPF program */
25 /* arguments from eBPF program to in-kernel function */
31 /* callee saved registers that in-kernel function will preserve */
36 /* read-only frame pointer to access stack */
44 const struct bpf_prog *prog = ctx->prog; in prepare_bpf_tail_call_cnt()
51 * std REG_TCC -> LOONGARCH_GPR_SP + store_offset in prepare_bpf_tail_call_cnt()
53 * std REG_TCC -> LOONGARCH_GPR_SP + store_offset in prepare_bpf_tail_call_cnt()
56 * std REG_TCC -> LOONGARCH_GPR_SP + store_offset in prepare_bpf_tail_call_cnt()
65 *store_offset -= sizeof(long); in prepare_bpf_tail_call_cnt()
86 *store_offset -= sizeof(long); in prepare_bpf_tail_call_cnt()
91 *store_offset -= sizeof(long); in prepare_bpf_tail_call_cnt()
99 * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
101 * +-------------------------+
103 * +-------------------------+
105 * +-------------------------+
107 * +-------------------------+
109 * +-------------------------+
111 * +-------------------------+
113 * +-------------------------+
115 * +-------------------------+
117 * +-------------------------+
119 * +-------------------------+ <--BPF_REG_FP
120 * | prog->aux->stack_depth |
122 * current $sp -------------> +-------------------------+
128 const struct bpf_prog *prog = ctx->prog; in build_prologue()
131 bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); in build_prologue()
154 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust); in build_prologue()
156 store_offset = stack_adjust - sizeof(long); in build_prologue()
159 store_offset -= sizeof(long); in build_prologue()
162 store_offset -= sizeof(long); in build_prologue()
165 store_offset -= sizeof(long); in build_prologue()
168 store_offset -= sizeof(long); in build_prologue()
171 store_offset -= sizeof(long); in build_prologue()
174 store_offset -= sizeof(long); in build_prologue()
177 store_offset -= sizeof(long); in build_prologue()
187 ctx->stack_size = stack_adjust; in build_prologue()
192 int stack_adjust = ctx->stack_size; in __build_epilogue()
195 load_offset = stack_adjust - sizeof(long); in __build_epilogue()
198 load_offset -= sizeof(long); in __build_epilogue()
201 load_offset -= sizeof(long); in __build_epilogue()
204 load_offset -= sizeof(long); in __build_epilogue()
207 load_offset -= sizeof(long); in __build_epilogue()
210 load_offset -= sizeof(long); in __build_epilogue()
213 load_offset -= sizeof(long); in __build_epilogue()
216 load_offset -= sizeof(long); in __build_epilogue()
223 load_offset -= 2 * sizeof(long); in __build_epilogue()
262 int off, tc_ninsn = 0; in emit_bpf_tail_call() local
263 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size); in emit_bpf_tail_call()
269 const int idx0 = ctx->idx; in emit_bpf_tail_call()
271 #define cur_offset (ctx->idx - idx0) in emit_bpf_tail_call()
272 #define jmp_offset (tc_ninsn - (cur_offset)) in emit_bpf_tail_call()
279 * if (index >= array->map.max_entries) in emit_bpf_tail_call()
282 tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0]; in emit_bpf_tail_call()
283 off = offsetof(struct bpf_array, map.max_entries); in emit_bpf_tail_call()
284 emit_insn(ctx, ldwu, t1, a1, off); in emit_bpf_tail_call()
302 * prog = array->ptrs[index]; in emit_bpf_tail_call()
307 off = offsetof(struct bpf_array, ptrs); in emit_bpf_tail_call()
308 emit_insn(ctx, ldd, t2, t2, off); in emit_bpf_tail_call()
313 /* goto *(prog->bpf_func + 4); */ in emit_bpf_tail_call()
314 off = offsetof(struct bpf_prog, bpf_func); in emit_bpf_tail_call()
315 emit_insn(ctx, ldd, t3, t2, off); in emit_bpf_tail_call()
322 return -1; in emit_bpf_tail_call()
333 const u8 src = regmap[insn->src_reg]; in emit_atomic()
334 const u8 dst = regmap[insn->dst_reg]; in emit_atomic()
335 const s16 off = insn->off; in emit_atomic() local
336 const s32 imm = insn->imm; in emit_atomic()
337 const bool isdw = BPF_SIZE(insn->code) == BPF_DW; in emit_atomic()
339 move_imm(ctx, t1, off, false); in emit_atomic()
344 /* lock *(size *)(dst + off) <op>= src */ in emit_atomic()
369 /* src = atomic_fetch_<op>(dst + off, src) */ in emit_atomic()
402 /* src = atomic_xchg(dst + off, src); */ in emit_atomic()
411 /* r0 = atomic_cmpxchg(dst + off, r0, src); */ in emit_atomic()
419 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4); in emit_atomic()
427 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6); in emit_atomic()
446 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup); in ex_handler_bpf()
447 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup); in ex_handler_bpf()
449 regs->regs[dst_reg] = 0; in ex_handler_bpf()
450 regs->csr_era = (unsigned long)&ex->fixup - offset; in ex_handler_bpf()
464 if (!ctx->image || !ctx->prog->aux->extable) in add_exception_handler()
467 if (BPF_MODE(insn->code) != BPF_PROBE_MEM && in add_exception_handler()
468 BPF_MODE(insn->code) != BPF_PROBE_MEMSX) in add_exception_handler()
471 if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries)) in add_exception_handler()
472 return -EINVAL; in add_exception_handler()
474 ex = &ctx->prog->aux->extable[ctx->num_exentries]; in add_exception_handler()
475 pc = (unsigned long)&ctx->image[ctx->idx - 1]; in add_exception_handler()
477 offset = pc - (long)&ex->insn; in add_exception_handler()
479 return -ERANGE; in add_exception_handler()
481 ex->insn = offset; in add_exception_handler()
491 offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE); in add_exception_handler()
493 return -ERANGE; in add_exception_handler()
495 ex->type = EX_TYPE_BPF; in add_exception_handler()
496 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg); in add_exception_handler()
498 ctx->num_exentries++; in add_exception_handler()
505 u8 tm = -1; in build_insn()
508 int i = insn - ctx->prog->insnsi; in build_insn()
510 const u8 code = insn->code; in build_insn()
514 const u8 src = regmap[insn->src_reg]; in build_insn()
515 const u8 dst = regmap[insn->dst_reg]; in build_insn()
516 const s16 off = insn->off; in build_insn() local
517 const s32 imm = insn->imm; in build_insn()
518 const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32; in build_insn()
524 switch (off) { in build_insn()
568 /* dst = dst - src */ in build_insn()
575 /* dst = dst - imm */ in build_insn()
578 if (is_signed_imm12(-imm)) { in build_insn()
579 emit_insn(ctx, addid, dst, dst, -imm); in build_insn()
605 if (!off) { in build_insn()
623 if (!off) { in build_insn()
640 if (!off) { in build_insn()
658 if (!off) { in build_insn()
672 /* dst = -dst */ in build_insn()
801 /* zero-extend 16 bits into 64 bits */ in build_insn()
805 /* zero-extend 32 bits into 64 bits */ in build_insn()
819 /* zero-extend 16 bits into 64 bits */ in build_insn()
833 /* PC += off if dst cond src */ in build_insn()
854 jmp_offset = bpf2la_offset(i, off, ctx); in build_insn()
868 /* PC += off if dst cond imm */ in build_insn()
889 jmp_offset = bpf2la_offset(i, off, ctx); in build_insn()
909 /* PC += off if dst & src */ in build_insn()
912 jmp_offset = bpf2la_offset(i, off, ctx); in build_insn()
919 /* PC += off if dst & imm */ in build_insn()
922 jmp_offset = bpf2la_offset(i, off, ctx); in build_insn()
930 /* PC += off */ in build_insn()
934 jmp_offset = bpf2la_offset(i, off, ctx); in build_insn()
941 /* function call */ in build_insn()
943 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, in build_insn()
948 if (insn->src_reg == BPF_PSEUDO_CALL) { in build_insn()
949 tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size); in build_insn()
956 if (insn->src_reg != BPF_PSEUDO_CALL) in build_insn()
964 return -EINVAL; in build_insn()
967 /* function return */ in build_insn()
969 if (i == ctx->prog->len - 1) in build_insn()
980 const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm; in build_insn()
989 /* dst = *(size *)(src + off) */ in build_insn()
998 /* dst_reg = (s64)*(signed size *)(src_reg + off) */ in build_insn()
1005 sign_extend = BPF_MODE(insn->code) == BPF_MEMSX || in build_insn()
1006 BPF_MODE(insn->code) == BPF_PROBE_MEMSX; in build_insn()
1009 if (is_signed_imm12(off)) { in build_insn()
1011 emit_insn(ctx, ldb, dst, src, off); in build_insn()
1013 emit_insn(ctx, ldbu, dst, src, off); in build_insn()
1015 move_imm(ctx, t1, off, is32); in build_insn()
1023 if (is_signed_imm12(off)) { in build_insn()
1025 emit_insn(ctx, ldh, dst, src, off); in build_insn()
1027 emit_insn(ctx, ldhu, dst, src, off); in build_insn()
1029 move_imm(ctx, t1, off, is32); in build_insn()
1037 if (is_signed_imm12(off)) { in build_insn()
1039 emit_insn(ctx, ldw, dst, src, off); in build_insn()
1041 emit_insn(ctx, ldwu, dst, src, off); in build_insn()
1043 move_imm(ctx, t1, off, is32); in build_insn()
1051 move_imm(ctx, t1, off, is32); in build_insn()
1061 /* *(size *)(dst + off) = imm */ in build_insn()
1069 if (is_signed_imm12(off)) { in build_insn()
1070 emit_insn(ctx, stb, t1, dst, off); in build_insn()
1072 move_imm(ctx, t2, off, is32); in build_insn()
1078 if (is_signed_imm12(off)) { in build_insn()
1079 emit_insn(ctx, sth, t1, dst, off); in build_insn()
1081 move_imm(ctx, t2, off, is32); in build_insn()
1087 if (is_signed_imm12(off)) { in build_insn()
1088 emit_insn(ctx, stw, t1, dst, off); in build_insn()
1089 } else if (is_signed_imm14(off)) { in build_insn()
1090 emit_insn(ctx, stptrw, t1, dst, off); in build_insn()
1092 move_imm(ctx, t2, off, is32); in build_insn()
1098 if (is_signed_imm12(off)) { in build_insn()
1099 emit_insn(ctx, std, t1, dst, off); in build_insn()
1100 } else if (is_signed_imm14(off)) { in build_insn()
1101 emit_insn(ctx, stptrd, t1, dst, off); in build_insn()
1103 move_imm(ctx, t2, off, is32); in build_insn()
1110 /* *(size *)(dst + off) = src */ in build_insn()
1117 if (is_signed_imm12(off)) { in build_insn()
1118 emit_insn(ctx, stb, src, dst, off); in build_insn()
1120 move_imm(ctx, t1, off, is32); in build_insn()
1125 if (is_signed_imm12(off)) { in build_insn()
1126 emit_insn(ctx, sth, src, dst, off); in build_insn()
1128 move_imm(ctx, t1, off, is32); in build_insn()
1133 if (is_signed_imm12(off)) { in build_insn()
1134 emit_insn(ctx, stw, src, dst, off); in build_insn()
1135 } else if (is_signed_imm14(off)) { in build_insn()
1136 emit_insn(ctx, stptrw, src, dst, off); in build_insn()
1138 move_imm(ctx, t1, off, is32); in build_insn()
1143 if (is_signed_imm12(off)) { in build_insn()
1144 emit_insn(ctx, std, src, dst, off); in build_insn()
1145 } else if (is_signed_imm14(off)) { in build_insn()
1146 emit_insn(ctx, stptrd, src, dst, off); in build_insn()
1148 move_imm(ctx, t1, off, is32); in build_insn()
1166 return -EINVAL; in build_insn()
1173 return -E2BIG; in build_insn()
1179 const struct bpf_prog *prog = ctx->prog; in build_body()
1181 for (i = 0; i < prog->len; i++) { in build_body()
1182 const struct bpf_insn *insn = &prog->insnsi[i]; in build_body()
1185 if (ctx->image == NULL) in build_body()
1186 ctx->offset[i] = ctx->idx; in build_body()
1191 if (ctx->image == NULL) in build_body()
1192 ctx->offset[i] = ctx->idx; in build_body()
1199 if (ctx->image == NULL) in build_body()
1200 ctx->offset[i] = ctx->idx; in build_body()
1211 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) in jit_fill_hole()
1220 for (i = 0; i < ctx->idx; i++) { in validate_code()
1221 insn = ctx->image[i]; in validate_code()
1224 return -1; in validate_code()
1233 return -1; in validate_ctx()
1235 if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries)) in validate_ctx()
1236 return -1; in validate_ctx()
1245 return -EFAULT; in emit_jump_and_link()
1284 return ret ? ERR_PTR(-EINVAL) : dst; in bpf_arch_text_copy()
1295 /* Only poking bpf text is supported. Since kernel function entry in bpf_arch_text_poke()
1299 return -ENOTSUPP; in bpf_arch_text_poke()
1306 return -EFAULT; in bpf_arch_text_poke()
1328 return -ENOMEM; in bpf_arch_text_invalidate()
1335 ret = -EINVAL; in bpf_arch_text_invalidate()
1348 emit_insn(ctx, std, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off); in store_args()
1349 args_off -= 8; in store_args()
1358 emit_insn(ctx, ldd, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off); in restore_args()
1359 args_off -= 8; in restore_args()
1368 struct bpf_prog *p = l->link.prog; in invoke_bpf_prog()
1371 if (l->cookie) { in invoke_bpf_prog()
1372 move_imm(ctx, LOONGARCH_GPR_T1, l->cookie, false); in invoke_bpf_prog()
1373 emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off); in invoke_bpf_prog()
1375 emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off); in invoke_bpf_prog()
1381 emit_insn(ctx, addid, LOONGARCH_GPR_A1, LOONGARCH_GPR_FP, -run_ctx_off); in invoke_bpf_prog()
1393 branch = (u32 *)ctx->image + ctx->idx; in invoke_bpf_prog()
1398 emit_insn(ctx, addid, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -args_off); in invoke_bpf_prog()
1399 if (!p->jited) in invoke_bpf_prog()
1400 move_imm(ctx, LOONGARCH_GPR_A1, (const s64)p->insnsi, false); in invoke_bpf_prog()
1401 ret = emit_call(ctx, (const u64)p->bpf_func); in invoke_bpf_prog()
1406 emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); in invoke_bpf_prog()
1407 emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); in invoke_bpf_prog()
1411 if (ctx->image) { in invoke_bpf_prog()
1412 int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branch; in invoke_bpf_prog()
1421 emit_insn(ctx, addid, LOONGARCH_GPR_A2, LOONGARCH_GPR_FP, -run_ctx_off); in invoke_bpf_prog()
1432 emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -retval_off); in invoke_bpf_mod_ret()
1433 for (i = 0; i < tl->nr_links; i++) { in invoke_bpf_mod_ret()
1434 invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, run_ctx_off, true); in invoke_bpf_mod_ret()
1435 emit_insn(ctx, ldd, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -retval_off); in invoke_bpf_mod_ret()
1436 branches[i] = (u32 *)ctx->image + ctx->idx; in invoke_bpf_mod_ret()
1452 * Sign-extend the register if necessary
1456 /* ABI requires unsigned char/short to be zero-extended */ in sign_extend()
1498 * function in __arch_prepare_bpf_trampoline()
1500 * function in __arch_prepare_bpf_trampoline()
1501 * FP - 8 [ T0 to traced func ] return address of traced in __arch_prepare_bpf_trampoline()
1502 * function in __arch_prepare_bpf_trampoline()
1503 * FP - 16 [ FP of traced func ] frame pointer of traced in __arch_prepare_bpf_trampoline()
1504 * function in __arch_prepare_bpf_trampoline()
1506 * FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or in __arch_prepare_bpf_trampoline()
1510 * FP - args_off [ arg1 ] in __arch_prepare_bpf_trampoline()
1512 * FP - nargs_off [ regs count ] in __arch_prepare_bpf_trampoline()
1514 * FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG in __arch_prepare_bpf_trampoline()
1516 * FP - run_ctx_off [ bpf_tramp_run_ctx ] in __arch_prepare_bpf_trampoline()
1518 * FP - sreg_off [ callee saved reg ] in __arch_prepare_bpf_trampoline()
1520 * FP - tcc_ptr_off [ tail_call_cnt_ptr ] in __arch_prepare_bpf_trampoline()
1523 if (m->nr_args > LOONGARCH_MAX_REG_ARGS) in __arch_prepare_bpf_trampoline()
1524 return -ENOTSUPP; in __arch_prepare_bpf_trampoline()
1527 for (i = 0; i < m->nr_args; i++) { in __arch_prepare_bpf_trampoline()
1528 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) in __arch_prepare_bpf_trampoline()
1529 return -ENOTSUPP; in __arch_prepare_bpf_trampoline()
1533 return -ENOTSUPP; in __arch_prepare_bpf_trampoline()
1545 nargs = m->nr_args; in __arch_prepare_bpf_trampoline()
1579 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size); in __arch_prepare_bpf_trampoline()
1580 emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8); in __arch_prepare_bpf_trampoline()
1581 emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); in __arch_prepare_bpf_trampoline()
1585 * For the trampoline called from function entry, in __arch_prepare_bpf_trampoline()
1586 * the frame of traced function and the frame of in __arch_prepare_bpf_trampoline()
1589 /* RA and FP for parent function */ in __arch_prepare_bpf_trampoline()
1590 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -16); in __arch_prepare_bpf_trampoline()
1595 /* RA and FP for traced function */ in __arch_prepare_bpf_trampoline()
1596 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size); in __arch_prepare_bpf_trampoline()
1597 emit_insn(ctx, std, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8); in __arch_prepare_bpf_trampoline()
1598 emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); in __arch_prepare_bpf_trampoline()
1603 emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off); in __arch_prepare_bpf_trampoline()
1606 emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off); in __arch_prepare_bpf_trampoline()
1608 /* store ip address of the traced function */ in __arch_prepare_bpf_trampoline()
1611 emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -ip_off); in __arch_prepare_bpf_trampoline()
1616 emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -nargs_off); in __arch_prepare_bpf_trampoline()
1620 /* To traced function */ in __arch_prepare_bpf_trampoline()
1635 for (i = 0; i < fentry->nr_links; i++) { in __arch_prepare_bpf_trampoline()
1636 ret = invoke_bpf_prog(ctx, fentry->links[i], args_off, retval_off, in __arch_prepare_bpf_trampoline()
1641 if (fmod_ret->nr_links) { in __arch_prepare_bpf_trampoline()
1642 branches = kcalloc(fmod_ret->nr_links, sizeof(u32 *), GFP_KERNEL); in __arch_prepare_bpf_trampoline()
1644 return -ENOMEM; in __arch_prepare_bpf_trampoline()
1650 restore_args(ctx, m->nr_args, args_off); in __arch_prepare_bpf_trampoline()
1653 emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off); in __arch_prepare_bpf_trampoline()
1658 emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); in __arch_prepare_bpf_trampoline()
1659 emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); in __arch_prepare_bpf_trampoline()
1660 im->ip_after_call = ctx->ro_image + ctx->idx; in __arch_prepare_bpf_trampoline()
1666 for (i = 0; ctx->image && i < fmod_ret->nr_links; i++) { in __arch_prepare_bpf_trampoline()
1667 int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branches[i]; in __arch_prepare_bpf_trampoline()
1671 for (i = 0; i < fexit->nr_links; i++) { in __arch_prepare_bpf_trampoline()
1672 ret = invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, run_ctx_off, false); in __arch_prepare_bpf_trampoline()
1678 im->ip_epilogue = ctx->ro_image + ctx->idx; in __arch_prepare_bpf_trampoline()
1686 restore_args(ctx, m->nr_args, args_off); in __arch_prepare_bpf_trampoline()
1689 emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); in __arch_prepare_bpf_trampoline()
1692 m->ret_size, m->ret_flags & BTF_FMODEL_SIGNED_ARG); in __arch_prepare_bpf_trampoline()
1694 emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); in __arch_prepare_bpf_trampoline()
1697 emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off); in __arch_prepare_bpf_trampoline()
1700 emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off); in __arch_prepare_bpf_trampoline()
1704 emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8); in __arch_prepare_bpf_trampoline()
1705 emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); in __arch_prepare_bpf_trampoline()
1710 /* trampoline called from function entry */ in __arch_prepare_bpf_trampoline()
1711 emit_insn(ctx, ldd, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8); in __arch_prepare_bpf_trampoline()
1712 emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); in __arch_prepare_bpf_trampoline()
1720 /* return to parent function */ in __arch_prepare_bpf_trampoline()
1723 /* return to traced function */ in __arch_prepare_bpf_trampoline()
1727 ret = ctx->idx; in __arch_prepare_bpf_trampoline()
1742 size = ro_image_end - ro_image; in arch_prepare_bpf_trampoline()
1745 return -ENOMEM; in arch_prepare_bpf_trampoline()
1751 jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image)); in arch_prepare_bpf_trampoline()
1757 ret = -EINVAL; in arch_prepare_bpf_trampoline()
1801 if (!prog->jit_requested) in bpf_int_jit_compile()
1818 jit_data = prog->aux->jit_data; in bpf_int_jit_compile()
1825 prog->aux->jit_data = jit_data; in bpf_int_jit_compile()
1827 if (jit_data->ctx.offset) { in bpf_int_jit_compile()
1828 ctx = jit_data->ctx; in bpf_int_jit_compile()
1829 image_ptr = jit_data->image; in bpf_int_jit_compile()
1830 header = jit_data->header; in bpf_int_jit_compile()
1839 ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL); in bpf_int_jit_compile()
1845 /* 1. Initial fake pass to compute ctx->idx and set ctx->flags */ in bpf_int_jit_compile()
1854 extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry); in bpf_int_jit_compile()
1874 prog->aux->extable = (void *)image_ptr + prog_size; in bpf_int_jit_compile()
1897 bpf_jit_dump(prog->len, prog_size, 2, ctx.image); in bpf_int_jit_compile()
1902 if (!prog->is_func || extra_pass) { in bpf_int_jit_compile()
1905 if (extra_pass && ctx.idx != jit_data->ctx.idx) { in bpf_int_jit_compile()
1906 pr_err_once("multi-func JIT bug %d != %d\n", in bpf_int_jit_compile()
1907 ctx.idx, jit_data->ctx.idx); in bpf_int_jit_compile()
1917 jit_data->ctx = ctx; in bpf_int_jit_compile()
1918 jit_data->image = image_ptr; in bpf_int_jit_compile()
1919 jit_data->header = header; in bpf_int_jit_compile()
1921 prog->jited = 1; in bpf_int_jit_compile()
1922 prog->jited_len = prog_size; in bpf_int_jit_compile()
1923 prog->bpf_func = (void *)ctx.image; in bpf_int_jit_compile()
1925 if (!prog->is_func || extra_pass) { in bpf_int_jit_compile()
1928 /* offset[prog->len] is the size of program */ in bpf_int_jit_compile()
1929 for (i = 0; i <= prog->len; i++) in bpf_int_jit_compile()
1936 prog->aux->jit_data = NULL; in bpf_int_jit_compile()
1948 prog->bpf_func = NULL; in bpf_int_jit_compile()
1949 prog->jited = 0; in bpf_int_jit_compile()
1950 prog->jited_len = 0; in bpf_int_jit_compile()