1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * eBPF JIT compiler
4 *
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6 * IBM Corporation
7 *
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <linux/kernel.h>
17 #include <linux/memory.h>
18 #include <linux/bpf.h>
19
20 #include <asm/kprobes.h>
21 #include <asm/text-patching.h>
22
23 #include "bpf_jit.h"
24
25 /* These offsets are from bpf prog end and stay the same across progs */
26 static int bpf_jit_ool_stub, bpf_jit_long_branch_stub;
27
bpf_jit_fill_ill_insns(void * area,unsigned int size)28 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
29 {
30 memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
31 }
32
33 void dummy_tramp(void);
34
35 asm (
36 " .pushsection .text, \"ax\", @progbits ;"
37 " .global dummy_tramp ;"
38 " .type dummy_tramp, @function ;"
39 "dummy_tramp: ;"
40 #ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
41 " blr ;"
42 #else
43 /* LR is always in r11, so we don't need a 'mflr r11' here */
44 " mtctr 11 ;"
45 " mtlr 0 ;"
46 " bctr ;"
47 #endif
48 " .size dummy_tramp, .-dummy_tramp ;"
49 " .popsection ;"
50 );
51
bpf_jit_build_fentry_stubs(u32 * image,struct codegen_context * ctx)52 void bpf_jit_build_fentry_stubs(u32 *image, struct codegen_context *ctx)
53 {
54 int ool_stub_idx, long_branch_stub_idx;
55
56 /*
57 * Out-of-line stub:
58 * mflr r0
59 * [b|bl] tramp
60 * mtlr r0 // only with CONFIG_PPC_FTRACE_OUT_OF_LINE
61 * b bpf_func + 4
62 */
63 ool_stub_idx = ctx->idx;
64 EMIT(PPC_RAW_MFLR(_R0));
65 EMIT(PPC_RAW_NOP());
66 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
67 EMIT(PPC_RAW_MTLR(_R0));
68 WARN_ON_ONCE(!is_offset_in_branch_range(4 - (long)ctx->idx * 4));
69 EMIT(PPC_RAW_BRANCH(4 - (long)ctx->idx * 4));
70
71 /*
72 * Long branch stub:
73 * .long <dummy_tramp_addr>
74 * mflr r11
75 * bcl 20,31,$+4
76 * mflr r12
77 * ld r12, -8-SZL(r12)
78 * mtctr r12
79 * mtlr r11 // needed to retain ftrace ABI
80 * bctr
81 */
82 if (image)
83 *((unsigned long *)&image[ctx->idx]) = (unsigned long)dummy_tramp;
84 ctx->idx += SZL / 4;
85 long_branch_stub_idx = ctx->idx;
86 EMIT(PPC_RAW_MFLR(_R11));
87 EMIT(PPC_RAW_BCL4());
88 EMIT(PPC_RAW_MFLR(_R12));
89 EMIT(PPC_RAW_LL(_R12, _R12, -8-SZL));
90 EMIT(PPC_RAW_MTCTR(_R12));
91 EMIT(PPC_RAW_MTLR(_R11));
92 EMIT(PPC_RAW_BCTR());
93
94 if (!bpf_jit_ool_stub) {
95 bpf_jit_ool_stub = (ctx->idx - ool_stub_idx) * 4;
96 bpf_jit_long_branch_stub = (ctx->idx - long_branch_stub_idx) * 4;
97 }
98 }
99
bpf_jit_emit_exit_insn(u32 * image,struct codegen_context * ctx,int tmp_reg,long exit_addr)100 int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
101 {
102 if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
103 PPC_JMP(exit_addr);
104 } else if (ctx->alt_exit_addr) {
105 if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4))))
106 return -1;
107 PPC_JMP(ctx->alt_exit_addr);
108 } else {
109 ctx->alt_exit_addr = ctx->idx * 4;
110 bpf_jit_build_epilogue(image, ctx);
111 }
112
113 return 0;
114 }
115
116 struct powerpc_jit_data {
117 /* address of rw header */
118 struct bpf_binary_header *hdr;
119 /* address of ro final header */
120 struct bpf_binary_header *fhdr;
121 u32 *addrs;
122 u8 *fimage;
123 u32 proglen;
124 struct codegen_context ctx;
125 };
126
bpf_jit_needs_zext(void)127 bool bpf_jit_needs_zext(void)
128 {
129 return true;
130 }
131
priv_stack_init_guard(void __percpu * priv_stack_ptr,int alloc_size)132 static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size)
133 {
134 int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
135 u64 *stack_ptr;
136
137 for_each_possible_cpu(cpu) {
138 stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
139 stack_ptr[0] = PRIV_STACK_GUARD_VAL;
140 stack_ptr[1] = PRIV_STACK_GUARD_VAL;
141 stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL;
142 stack_ptr[underflow_idx + 1] = PRIV_STACK_GUARD_VAL;
143 }
144 }
145
priv_stack_check_guard(void __percpu * priv_stack_ptr,int alloc_size,struct bpf_prog * fp)146 static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size,
147 struct bpf_prog *fp)
148 {
149 int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
150 u64 *stack_ptr;
151
152 for_each_possible_cpu(cpu) {
153 stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
154 if (stack_ptr[0] != PRIV_STACK_GUARD_VAL ||
155 stack_ptr[1] != PRIV_STACK_GUARD_VAL ||
156 stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL ||
157 stack_ptr[underflow_idx + 1] != PRIV_STACK_GUARD_VAL) {
158 pr_err("BPF private stack overflow/underflow detected for prog %s\n",
159 bpf_jit_get_prog_name(fp));
160 break;
161 }
162 }
163 }
164
bpf_int_jit_compile(struct bpf_verifier_env * env,struct bpf_prog * fp)165 struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *fp)
166 {
167 u32 proglen;
168 u32 alloclen;
169 u8 *image = NULL;
170 u32 *code_base = NULL;
171 u32 *addrs = NULL;
172 struct powerpc_jit_data *jit_data = NULL;
173 struct codegen_context cgctx;
174 int pass;
175 int flen;
176 int priv_stack_alloc_size;
177 void __percpu *priv_stack_ptr = NULL;
178 struct bpf_binary_header *fhdr = NULL;
179 struct bpf_binary_header *hdr = NULL;
180 bool extra_pass = false;
181 u8 *fimage = NULL;
182 u32 *fcode_base = NULL;
183 u32 extable_len;
184 u32 fixup_len;
185
186 if (!fp->jit_requested)
187 return fp;
188
189 jit_data = fp->aux->jit_data;
190 if (!jit_data) {
191 jit_data = kzalloc_obj(*jit_data);
192 if (!jit_data)
193 return fp;
194 fp->aux->jit_data = jit_data;
195 }
196
197 priv_stack_ptr = fp->aux->priv_stack_ptr;
198 if (!priv_stack_ptr && fp->aux->jits_use_priv_stack) {
199 /*
200 * Allocate private stack of size equivalent to
201 * verifier-calculated stack size plus two memory
202 * guard regions to detect private stack overflow
203 * and underflow.
204 */
205 priv_stack_alloc_size = round_up(fp->aux->stack_depth, 16) +
206 2 * PRIV_STACK_GUARD_SZ;
207 priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_size, 16, GFP_KERNEL);
208 if (!priv_stack_ptr)
209 goto out_priv_stack;
210
211 priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_size);
212 fp->aux->priv_stack_ptr = priv_stack_ptr;
213 }
214
215 flen = fp->len;
216 addrs = jit_data->addrs;
217 if (addrs) {
218 cgctx = jit_data->ctx;
219 /*
220 * JIT compiled to a writable location (image/code_base) first.
221 * It is then moved to the readonly final location (fimage/fcode_base)
222 * using instruction patching.
223 */
224 fimage = jit_data->fimage;
225 fhdr = jit_data->fhdr;
226 proglen = jit_data->proglen;
227 hdr = jit_data->hdr;
228 image = (void *)hdr + ((void *)fimage - (void *)fhdr);
229 extra_pass = true;
230 /* During extra pass, ensure index is reset before repopulating extable entries */
231 cgctx.exentry_idx = 0;
232 goto skip_init_ctx;
233 }
234
235 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
236 if (addrs == NULL)
237 goto out_err;
238
239 memset(&cgctx, 0, sizeof(struct codegen_context));
240 bpf_jit_init_reg_mapping(&cgctx);
241
242 /* Make sure that the stack is quadword aligned. */
243 cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
244 cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena);
245 cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena);
246 cgctx.is_subprog = bpf_is_subprog(fp);
247 cgctx.exception_boundary = fp->aux->exception_boundary;
248 cgctx.exception_cb = fp->aux->exception_cb;
249 cgctx.priv_sp = priv_stack_ptr;
250 cgctx.priv_stack_size = 0;
251 if (priv_stack_ptr) {
252 /*
253 * priv_stack_size required for setting bpf FP inside
254 * percpu allocation.
255 * stack_size is marked 0 to prevent allocation on
256 * general stack and offset calculation don't go for
257 * a toss in bpf_jit_stack_offsetof() & bpf_jit_stack_local()
258 */
259 cgctx.priv_stack_size = cgctx.stack_size;
260 cgctx.stack_size = 0;
261 }
262
263 /* Scouting faux-generate pass 0 */
264 if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false))
265 /* We hit something illegal or unsupported. */
266 goto out_err;
267
268 /*
269 * If we have seen a tail call, we need a second pass.
270 * This is because bpf_jit_emit_common_epilogue() is called
271 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
272 * We also need a second pass if we ended up with too large
273 * a program so as to ensure BPF_EXIT branches are in range.
274 */
275 if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
276 cgctx.idx = 0;
277 if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false))
278 goto out_err;
279 }
280
281 bpf_jit_realloc_regs(&cgctx);
282 /*
283 * Pretend to build prologue, given the features we've seen. This will
284 * update ctgtx.idx as it pretends to output instructions, then we can
285 * calculate total size from idx.
286 */
287 bpf_jit_build_prologue(NULL, &cgctx);
288 addrs[fp->len] = cgctx.idx * 4;
289 bpf_jit_build_epilogue(NULL, &cgctx);
290
291 fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
292 extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
293
294 proglen = cgctx.idx * 4;
295 alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
296
297 fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image,
298 bpf_jit_fill_ill_insns);
299 if (!fhdr)
300 goto out_err;
301
302 if (extable_len)
303 fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len;
304
305 skip_init_ctx:
306 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
307 fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE);
308
309 /* Code generation passes 1-2 */
310 for (pass = 1; pass < 3; pass++) {
311 /* Now build the prologue, body code & epilogue for real. */
312 cgctx.idx = 0;
313 cgctx.alt_exit_addr = 0;
314 bpf_jit_build_prologue(code_base, &cgctx);
315 if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass,
316 extra_pass)) {
317 bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size));
318 bpf_jit_binary_pack_free(fhdr, hdr);
319 goto out_err;
320 }
321 bpf_jit_build_epilogue(code_base, &cgctx);
322
323 if (bpf_jit_enable > 1)
324 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
325 proglen - (cgctx.idx * 4), cgctx.seen);
326 }
327
328 if (bpf_jit_enable > 1)
329 /*
330 * Note that we output the base address of the code_base
331 * rather than image, since opcodes are in code_base.
332 */
333 bpf_jit_dump(flen, proglen, pass, code_base);
334
335 #ifdef CONFIG_PPC64_ELF_ABI_V1
336 /* Function descriptor nastiness: Address + TOC */
337 ((u64 *)image)[0] = (u64)fcode_base;
338 ((u64 *)image)[1] = local_paca->kernel_toc;
339 #endif
340
341 if (!fp->is_func || extra_pass) {
342 if (bpf_jit_binary_pack_finalize(fhdr, hdr))
343 goto out_err;
344 }
345
346 fp->bpf_func = (void *)fimage;
347 fp->jited = 1;
348 fp->jited_len = cgctx.idx * 4 + FUNCTION_DESCR_SIZE;
349
350 if (!fp->is_func || extra_pass) {
351 bpf_prog_fill_jited_linfo(fp, addrs);
352 /*
353 * On ABI V1, executable code starts after the function
354 * descriptor, so adjust base accordingly.
355 */
356 bpf_prog_update_insn_ptrs(fp, addrs,
357 (void *)fimage + FUNCTION_DESCR_SIZE);
358
359 out_addrs:
360 if (!image && priv_stack_ptr) {
361 fp->aux->priv_stack_ptr = NULL;
362 free_percpu(priv_stack_ptr);
363 }
364 out_priv_stack:
365 kfree(addrs);
366 kfree(jit_data);
367 fp->aux->jit_data = NULL;
368 } else {
369 jit_data->addrs = addrs;
370 jit_data->ctx = cgctx;
371 jit_data->proglen = proglen;
372 jit_data->fimage = fimage;
373 jit_data->fhdr = fhdr;
374 jit_data->hdr = hdr;
375 }
376
377 return fp;
378
379 out_err:
380 if (extra_pass) {
381 fp->bpf_func = NULL;
382 fp->jited = 0;
383 fp->jited_len = 0;
384 }
385 goto out_addrs;
386 }
387
388 /*
389 * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
390 * this function, as this only applies to BPF_PROBE_MEM, for now.
391 */
bpf_add_extable_entry(struct bpf_prog * fp,u32 * image,u32 * fimage,int pass,struct codegen_context * ctx,int insn_idx,int jmp_off,int dst_reg,u32 code)392 int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
393 struct codegen_context *ctx, int insn_idx, int jmp_off,
394 int dst_reg, u32 code)
395 {
396 off_t offset;
397 unsigned long pc;
398 struct exception_table_entry *ex, *ex_entry;
399 u32 *fixup;
400
401 /* Populate extable entries only in the last pass */
402 if (pass != 2)
403 return 0;
404
405 if (!fp->aux->extable ||
406 WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
407 return -EINVAL;
408
409 /*
410 * Program is first written to image before copying to the
411 * final location (fimage). Accordingly, update in the image first.
412 * As all offsets used are relative, copying as is to the
413 * final location should be alright.
414 */
415 pc = (unsigned long)&image[insn_idx];
416 ex = (void *)fp->aux->extable - (void *)fimage + (void *)image;
417
418 fixup = (void *)ex -
419 (fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
420 (ctx->exentry_idx * BPF_FIXUP_LEN * 4);
421
422 fixup[0] = PPC_RAW_LI(dst_reg, 0);
423 if (BPF_CLASS(code) == BPF_ST || BPF_CLASS(code) == BPF_STX)
424 fixup[0] = PPC_RAW_NOP();
425
426 if (IS_ENABLED(CONFIG_PPC32))
427 fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
428
429 fixup[BPF_FIXUP_LEN - 1] =
430 PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
431
432 ex_entry = &ex[ctx->exentry_idx];
433
434 offset = pc - (long)&ex_entry->insn;
435 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
436 return -ERANGE;
437 ex_entry->insn = offset;
438
439 offset = (long)fixup - (long)&ex_entry->fixup;
440 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
441 return -ERANGE;
442 ex_entry->fixup = offset;
443
444 ctx->exentry_idx++;
445 return 0;
446 }
447
bpf_arch_text_copy(void * dst,void * src,size_t len)448 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
449 {
450 int err;
451
452 if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
453 return ERR_PTR(-EINVAL);
454
455 mutex_lock(&text_mutex);
456 err = patch_instructions(dst, src, len, false);
457 mutex_unlock(&text_mutex);
458
459 return err ? ERR_PTR(err) : dst;
460 }
461
bpf_arch_text_invalidate(void * dst,size_t len)462 int bpf_arch_text_invalidate(void *dst, size_t len)
463 {
464 u32 insn = BREAKPOINT_INSTRUCTION;
465 int ret;
466
467 if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
468 return -EINVAL;
469
470 mutex_lock(&text_mutex);
471 ret = patch_instructions(dst, &insn, len, true);
472 mutex_unlock(&text_mutex);
473
474 return ret;
475 }
476
bpf_jit_free(struct bpf_prog * fp)477 void bpf_jit_free(struct bpf_prog *fp)
478 {
479 if (fp->jited) {
480 struct powerpc_jit_data *jit_data = fp->aux->jit_data;
481 struct bpf_binary_header *hdr;
482 void __percpu *priv_stack_ptr;
483 int priv_stack_alloc_size;
484
485 /*
486 * If we fail the final pass of JIT (from jit_subprogs),
487 * the program may not be finalized yet. Call finalize here
488 * before freeing it.
489 */
490 if (jit_data) {
491 bpf_jit_binary_pack_finalize(jit_data->fhdr, jit_data->hdr);
492 kvfree(jit_data->addrs);
493 kfree(jit_data);
494 }
495 hdr = bpf_jit_binary_pack_hdr(fp);
496 bpf_jit_binary_pack_free(hdr, NULL);
497 priv_stack_ptr = fp->aux->priv_stack_ptr;
498 if (priv_stack_ptr) {
499 priv_stack_alloc_size = round_up(fp->aux->stack_depth, 16) +
500 2 * PRIV_STACK_GUARD_SZ;
501 priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_size, fp);
502 free_percpu(priv_stack_ptr);
503 }
504 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
505 }
506
507 bpf_prog_unlock_free(fp);
508 }
509
bpf_jit_supports_exceptions(void)510 bool bpf_jit_supports_exceptions(void)
511 {
512 return IS_ENABLED(CONFIG_PPC64);
513 }
514
bpf_jit_supports_subprog_tailcalls(void)515 bool bpf_jit_supports_subprog_tailcalls(void)
516 {
517 return IS_ENABLED(CONFIG_PPC64);
518 }
519
bpf_jit_supports_kfunc_call(void)520 bool bpf_jit_supports_kfunc_call(void)
521 {
522 return IS_ENABLED(CONFIG_PPC64);
523 }
524
bpf_jit_supports_private_stack(void)525 bool bpf_jit_supports_private_stack(void)
526 {
527 return IS_ENABLED(CONFIG_PPC64);
528 }
529
bpf_jit_supports_fsession(void)530 bool bpf_jit_supports_fsession(void)
531 {
532 /*
533 * TODO: Remove after validating support
534 * for fsession and trampoline on ppc32.
535 */
536 if (IS_ENABLED(CONFIG_PPC32))
537 return -EOPNOTSUPP;
538 return true;
539 }
540
bpf_jit_supports_arena(void)541 bool bpf_jit_supports_arena(void)
542 {
543 return IS_ENABLED(CONFIG_PPC64);
544 }
545
bpf_jit_supports_far_kfunc_call(void)546 bool bpf_jit_supports_far_kfunc_call(void)
547 {
548 return IS_ENABLED(CONFIG_PPC64);
549 }
550
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)551 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
552 {
553 if (!in_arena)
554 return true;
555 switch (insn->code) {
556 case BPF_STX | BPF_ATOMIC | BPF_H:
557 case BPF_STX | BPF_ATOMIC | BPF_B:
558 case BPF_STX | BPF_ATOMIC | BPF_W:
559 case BPF_STX | BPF_ATOMIC | BPF_DW:
560 if (bpf_atomic_is_load_store(insn))
561 return false;
562 return IS_ENABLED(CONFIG_PPC64);
563 }
564 return true;
565 }
566
bpf_jit_supports_percpu_insn(void)567 bool bpf_jit_supports_percpu_insn(void)
568 {
569 return IS_ENABLED(CONFIG_PPC64);
570 }
571
bpf_jit_inlines_helper_call(s32 imm)572 bool bpf_jit_inlines_helper_call(s32 imm)
573 {
574 switch (imm) {
575 case BPF_FUNC_get_smp_processor_id:
576 case BPF_FUNC_get_current_task:
577 case BPF_FUNC_get_current_task_btf:
578 return true;
579 default:
580 return false;
581 }
582 }
583
arch_alloc_bpf_trampoline(unsigned int size)584 void *arch_alloc_bpf_trampoline(unsigned int size)
585 {
586 return bpf_prog_pack_alloc(size, bpf_jit_fill_ill_insns);
587 }
588
arch_free_bpf_trampoline(void * image,unsigned int size)589 void arch_free_bpf_trampoline(void *image, unsigned int size)
590 {
591 bpf_prog_pack_free(image, size);
592 }
593
arch_protect_bpf_trampoline(void * image,unsigned int size)594 int arch_protect_bpf_trampoline(void *image, unsigned int size)
595 {
596 return 0;
597 }
598
invoke_bpf_prog(u32 * image,u32 * ro_image,struct codegen_context * ctx,struct bpf_tramp_link * l,int regs_off,int retval_off,int run_ctx_off,bool save_ret)599 static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ctx,
600 struct bpf_tramp_link *l, int regs_off, int retval_off,
601 int run_ctx_off, bool save_ret)
602 {
603 struct bpf_prog *p = l->link.prog;
604 ppc_inst_t branch_insn;
605 u32 jmp_idx;
606 int ret = 0;
607
608 /* Save cookie */
609 if (IS_ENABLED(CONFIG_PPC64)) {
610 PPC_LI64(_R3, l->cookie);
611 EMIT(PPC_RAW_STD(_R3, _R1, run_ctx_off + offsetof(struct bpf_tramp_run_ctx,
612 bpf_cookie)));
613 } else {
614 PPC_LI32(_R3, l->cookie >> 32);
615 PPC_LI32(_R4, l->cookie);
616 EMIT(PPC_RAW_STW(_R3, _R1,
617 run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie)));
618 EMIT(PPC_RAW_STW(_R4, _R1,
619 run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie) + 4));
620 }
621
622 /* __bpf_prog_enter(p, &bpf_tramp_run_ctx) */
623 PPC_LI_ADDR(_R3, p);
624 EMIT(PPC_RAW_MR(_R25, _R3));
625 EMIT(PPC_RAW_ADDI(_R4, _R1, run_ctx_off));
626 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
627 (unsigned long)bpf_trampoline_enter(p));
628 if (ret)
629 return ret;
630
631 /* Remember prog start time returned by __bpf_prog_enter */
632 EMIT(PPC_RAW_MR(_R26, _R3));
633
634 /*
635 * if (__bpf_prog_enter(p) == 0)
636 * goto skip_exec_of_prog;
637 *
638 * Emit a nop to be later patched with conditional branch, once offset is known
639 */
640 EMIT(PPC_RAW_CMPLI(_R3, 0));
641 jmp_idx = ctx->idx;
642 EMIT(PPC_RAW_NOP());
643
644 /* p->bpf_func(ctx) */
645 EMIT(PPC_RAW_ADDI(_R3, _R1, regs_off));
646 if (!p->jited)
647 PPC_LI_ADDR(_R4, (unsigned long)p->insnsi);
648 /* Account for max possible instructions during dummy pass for size calculation */
649 if (image && !create_branch(&branch_insn, (u32 *)&ro_image[ctx->idx],
650 (unsigned long)p->bpf_func,
651 BRANCH_SET_LINK)) {
652 image[ctx->idx] = ppc_inst_val(branch_insn);
653 ctx->idx++;
654 } else {
655 EMIT(PPC_RAW_LL(_R12, _R25, offsetof(struct bpf_prog, bpf_func)));
656 EMIT(PPC_RAW_MTCTR(_R12));
657 EMIT(PPC_RAW_BCTRL());
658 }
659
660 if (save_ret)
661 EMIT(PPC_RAW_STL(_R3, _R1, retval_off));
662
663 /* Fix up branch */
664 if (image) {
665 if (create_cond_branch(&branch_insn, &image[jmp_idx],
666 (unsigned long)&image[ctx->idx], COND_EQ << 16))
667 return -EINVAL;
668 image[jmp_idx] = ppc_inst_val(branch_insn);
669 }
670
671 /* __bpf_prog_exit(p, start_time, &bpf_tramp_run_ctx) */
672 EMIT(PPC_RAW_MR(_R3, _R25));
673 EMIT(PPC_RAW_MR(_R4, _R26));
674 EMIT(PPC_RAW_ADDI(_R5, _R1, run_ctx_off));
675 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
676 (unsigned long)bpf_trampoline_exit(p));
677
678 return ret;
679 }
680
invoke_bpf_mod_ret(u32 * image,u32 * ro_image,struct codegen_context * ctx,struct bpf_tramp_links * tl,int regs_off,int retval_off,int run_ctx_off,u32 * branches)681 static int invoke_bpf_mod_ret(u32 *image, u32 *ro_image, struct codegen_context *ctx,
682 struct bpf_tramp_links *tl, int regs_off, int retval_off,
683 int run_ctx_off, u32 *branches)
684 {
685 int i;
686
687 /*
688 * The first fmod_ret program will receive a garbage return value.
689 * Set this to 0 to avoid confusing the program.
690 */
691 EMIT(PPC_RAW_LI(_R3, 0));
692 EMIT(PPC_RAW_STL(_R3, _R1, retval_off));
693 for (i = 0; i < tl->nr_links; i++) {
694 if (invoke_bpf_prog(image, ro_image, ctx, tl->links[i], regs_off, retval_off,
695 run_ctx_off, true))
696 return -EINVAL;
697
698 /*
699 * mod_ret prog stored return value after prog ctx. Emit:
700 * if (*(u64 *)(ret_val) != 0)
701 * goto do_fexit;
702 */
703 EMIT(PPC_RAW_LL(_R3, _R1, retval_off));
704 EMIT(PPC_RAW_CMPLI(_R3, 0));
705
706 /*
707 * Save the location of the branch and generate a nop, which is
708 * replaced with a conditional jump once do_fexit (i.e. the
709 * start of the fexit invocation) is finalized.
710 */
711 branches[i] = ctx->idx;
712 EMIT(PPC_RAW_NOP());
713 }
714
715 return 0;
716 }
717
718 /*
719 * Refer __arch_prepare_bpf_trampoline() for stack component details.
720 *
721 * The tailcall count/reference is present in caller's stack frame. The
722 * tail_call_info is saved at the same offset on the trampoline frame
723 * for the traced function (BPF subprog/callee) to fetch it.
724 */
bpf_trampoline_setup_tail_call_info(u32 * image,struct codegen_context * ctx,int bpf_frame_size,int r4_off)725 static void bpf_trampoline_setup_tail_call_info(u32 *image, struct codegen_context *ctx,
726 int bpf_frame_size, int r4_off)
727 {
728 if (IS_ENABLED(CONFIG_PPC64)) {
729 EMIT(PPC_RAW_LD(_R4, _R1, bpf_frame_size));
730 /* Refer to trampoline's Generated stack layout */
731 EMIT(PPC_RAW_LD(_R3, _R4, -BPF_PPC_TAILCALL));
732
733 /*
734 * Setting the tail_call_info in trampoline's frame
735 * depending on if previous frame had value or reference.
736 */
737 EMIT(PPC_RAW_CMPLWI(_R3, MAX_TAIL_CALL_CNT));
738 PPC_BCC_CONST_SHORT(COND_GT, 8);
739 EMIT(PPC_RAW_ADDI(_R3, _R4, -BPF_PPC_TAILCALL));
740
741 /*
742 * Trampoline's tail_call_info is at the same offset, as that of
743 * any bpf program, with reference to previous frame. Update the
744 * address of main's tail_call_info in trampoline frame.
745 */
746 EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size - BPF_PPC_TAILCALL));
747 } else {
748 /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */
749 EMIT(PPC_RAW_LL(_R4, _R1, r4_off));
750 }
751 }
752
bpf_trampoline_restore_tail_call_cnt(u32 * image,struct codegen_context * ctx,int bpf_frame_size,int r4_off)753 static void bpf_trampoline_restore_tail_call_cnt(u32 *image, struct codegen_context *ctx,
754 int bpf_frame_size, int r4_off)
755 {
756 if (IS_ENABLED(CONFIG_PPC32)) {
757 /*
758 * Restore tailcall for 32-bit powerpc
759 * See bpf_jit_stack_offsetof() and BPF_PPC_TC
760 */
761 EMIT(PPC_RAW_STL(_R4, _R1, r4_off));
762 }
763 }
764
bpf_trampoline_save_args(u32 * image,struct codegen_context * ctx,int bpf_frame_size,int nr_regs,int regs_off)765 static void bpf_trampoline_save_args(u32 *image, struct codegen_context *ctx,
766 int bpf_frame_size, int nr_regs, int regs_off)
767 {
768 int param_save_area_offset;
769
770 param_save_area_offset = bpf_frame_size;
771 param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */
772
773 for (int i = 0; i < nr_regs; i++) {
774 if (i < 8) {
775 EMIT(PPC_RAW_STL(_R3 + i, _R1, regs_off + i * SZL));
776 } else {
777 EMIT(PPC_RAW_LL(_R3, _R1, param_save_area_offset + i * SZL));
778 EMIT(PPC_RAW_STL(_R3, _R1, regs_off + i * SZL));
779 }
780 }
781 }
782
783 /* Used when restoring just the register parameters when returning back */
bpf_trampoline_restore_args_regs(u32 * image,struct codegen_context * ctx,int nr_regs,int regs_off)784 static void bpf_trampoline_restore_args_regs(u32 *image, struct codegen_context *ctx,
785 int nr_regs, int regs_off)
786 {
787 for (int i = 0; i < nr_regs && i < 8; i++)
788 EMIT(PPC_RAW_LL(_R3 + i, _R1, regs_off + i * SZL));
789 }
790
791 /* Used when we call into the traced function. Replicate parameter save area */
bpf_trampoline_restore_args_stack(u32 * image,struct codegen_context * ctx,int bpf_frame_size,int nr_regs,int regs_off)792 static void bpf_trampoline_restore_args_stack(u32 *image, struct codegen_context *ctx,
793 int bpf_frame_size, int nr_regs, int regs_off)
794 {
795 int param_save_area_offset;
796
797 param_save_area_offset = bpf_frame_size;
798 param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */
799
800 for (int i = 8; i < nr_regs; i++) {
801 EMIT(PPC_RAW_LL(_R3, _R1, param_save_area_offset + i * SZL));
802 EMIT(PPC_RAW_STL(_R3, _R1, STACK_FRAME_MIN_SIZE + i * SZL));
803 }
804 bpf_trampoline_restore_args_regs(image, ctx, nr_regs, regs_off);
805 }
806
__arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * rw_image,void * rw_image_end,void * ro_image,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)807 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
808 void *rw_image_end, void *ro_image,
809 const struct btf_func_model *m, u32 flags,
810 struct bpf_tramp_links *tlinks,
811 void *func_addr)
812 {
813 int regs_off, func_meta_off, ip_off, run_ctx_off, retval_off;
814 int nvr_off, alt_lr_off, r4_off = 0;
815 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
816 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
817 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
818 int i, ret, nr_regs, retaddr_off, bpf_frame_size = 0;
819 struct codegen_context codegen_ctx, *ctx;
820 int cookie_off, cookie_cnt, cookie_ctx_off;
821 int fsession_cnt = bpf_fsession_cnt(tlinks);
822 u64 func_meta;
823 u32 *image = (u32 *)rw_image;
824 ppc_inst_t branch_insn;
825 u32 *branches = NULL;
826 bool save_ret;
827
828 if (IS_ENABLED(CONFIG_PPC32))
829 return -EOPNOTSUPP;
830
831 nr_regs = m->nr_args;
832 /* Extra registers for struct arguments */
833 for (i = 0; i < m->nr_args; i++)
834 if (m->arg_size[i] > SZL)
835 nr_regs += round_up(m->arg_size[i], SZL) / SZL - 1;
836
837 if (nr_regs > MAX_BPF_FUNC_ARGS)
838 return -EOPNOTSUPP;
839
840 ctx = &codegen_ctx;
841 memset(ctx, 0, sizeof(*ctx));
842
843 /*
844 * Generated stack layout:
845 *
846 * func prev back chain [ back chain ]
847 * [ tail_call_info ] optional - 64-bit powerpc
848 * [ padding ] align stack frame
849 * r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc
850 * alt_lr_off [ real lr (ool stub)] optional - actual lr
851 * retaddr_off [ return address ]
852 * [ r26 ]
853 * nvr_off [ r25 ] nvr save area
854 * retval_off [ return value ]
855 * [ reg argN ]
856 * [ ... ]
857 * regs_off [ reg_arg1 ] prog_ctx
858 * func_meta_off [ args count ] ((u64 *)prog_ctx)[-1]
859 * ip_off [ traced function ] ((u64 *)prog_ctx)[-2]
860 * [ stack cookieN ]
861 * [ ... ]
862 * cookie_off [ stack cookie1 ]
863 * run_ctx_off [ bpf_tramp_run_ctx ]
864 * [ reg argN ]
865 * [ ... ]
866 * param_save_area [ reg_arg1 ] min 8 doublewords, per ABI
867 * [ TOC save (64-bit) ] --
868 * [ LR save (64-bit) ] | header
869 * [ LR save (32-bit) ] |
870 * bpf trampoline frame [ back chain 2 ] --
871 *
872 */
873
874 /* Minimum stack frame header */
875 bpf_frame_size = STACK_FRAME_MIN_SIZE;
876
877 /*
878 * Room for parameter save area.
879 *
880 * As per the ABI, this is required if we call into the traced
881 * function (BPF_TRAMP_F_CALL_ORIG):
882 * - if the function takes more than 8 arguments for the rest to spill onto the stack
883 * - or, if the function has variadic arguments
884 * - or, if this functions's prototype was not available to the caller
885 *
886 * Reserve space for at least 8 registers for now. This can be optimized later.
887 */
888 bpf_frame_size += (nr_regs > 8 ? nr_regs : 8) * SZL;
889
890 /* Room for struct bpf_tramp_run_ctx */
891 run_ctx_off = bpf_frame_size;
892 bpf_frame_size += round_up(sizeof(struct bpf_tramp_run_ctx), SZL);
893
894 /* room for session cookies */
895 cookie_off = bpf_frame_size;
896 cookie_cnt = bpf_fsession_cookie_cnt(tlinks);
897 bpf_frame_size += cookie_cnt * 8;
898
899 /* Room for IP address argument */
900 ip_off = bpf_frame_size;
901 if (flags & BPF_TRAMP_F_IP_ARG)
902 bpf_frame_size += SZL;
903
904 /* Room for function metadata, arg regs count */
905 func_meta_off = bpf_frame_size;
906 bpf_frame_size += SZL;
907
908 /* Room for arg regs */
909 regs_off = bpf_frame_size;
910 bpf_frame_size += nr_regs * SZL;
911
912 /* Room for return value of func_addr or fentry prog */
913 retval_off = bpf_frame_size;
914 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
915 if (save_ret)
916 bpf_frame_size += SZL;
917
918 /* Room for nvr save area */
919 nvr_off = bpf_frame_size;
920 bpf_frame_size += 2 * SZL;
921
922 /* Save area for return address */
923 retaddr_off = bpf_frame_size;
924 bpf_frame_size += SZL;
925
926 /* Optional save area for actual LR in case of ool ftrace */
927 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
928 alt_lr_off = bpf_frame_size;
929 bpf_frame_size += SZL;
930 }
931
932 if (IS_ENABLED(CONFIG_PPC32)) {
933 if (nr_regs < 2) {
934 r4_off = bpf_frame_size;
935 bpf_frame_size += SZL;
936 } else {
937 r4_off = regs_off + SZL;
938 }
939 }
940
941 /*
942 * Save tailcall count pointer at the same offset on the
943 * stack where subprogs expect it
944 */
945 if ((flags & BPF_TRAMP_F_CALL_ORIG) &&
946 (flags & BPF_TRAMP_F_TAIL_CALL_CTX))
947 bpf_frame_size += BPF_PPC_TAILCALL;
948
949 /* Padding to align stack frame, if any */
950 bpf_frame_size = round_up(bpf_frame_size, SZL * 2);
951
952 /* Store original return value */
953 EMIT(PPC_RAW_STL(_R0, _R1, PPC_LR_STKOFF));
954
955 /* Create our stack frame */
956 EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_frame_size));
957
958 /* 64-bit: Save TOC and load kernel TOC */
959 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
960 EMIT(PPC_RAW_STD(_R2, _R1, 24));
961 PPC64_LOAD_PACA();
962 }
963
964 /* 32-bit: save tail call count in r4 */
965 if (IS_ENABLED(CONFIG_PPC32) && nr_regs < 2)
966 EMIT(PPC_RAW_STL(_R4, _R1, r4_off));
967
968 bpf_trampoline_save_args(image, ctx, bpf_frame_size, nr_regs, regs_off);
969
970 /* Save our LR/return address */
971 EMIT(PPC_RAW_MFLR(_R3));
972 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
973 EMIT(PPC_RAW_STL(_R3, _R1, alt_lr_off));
974 else
975 EMIT(PPC_RAW_STL(_R3, _R1, retaddr_off));
976
977 /*
978 * Derive IP address of the traced function.
979 * In case of CONFIG_PPC_FTRACE_OUT_OF_LINE or BPF program, LR points to the instruction
980 * after the 'bl' instruction in the OOL stub. Refer to ftrace_init_ool_stub() and
981 * bpf_arch_text_poke() for OOL stub of kernel functions and bpf programs respectively.
982 * Relevant stub sequence:
983 *
984 * bl <tramp>
985 * LR (R3) => mtlr r0
986 * b <func_addr+4>
987 *
988 * Recover kernel function/bpf program address from the unconditional
989 * branch instruction at the end of OOL stub.
990 */
991 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) || flags & BPF_TRAMP_F_IP_ARG) {
992 EMIT(PPC_RAW_LWZ(_R4, _R3, 4));
993 EMIT(PPC_RAW_SLWI(_R4, _R4, 6));
994 EMIT(PPC_RAW_SRAWI(_R4, _R4, 6));
995 EMIT(PPC_RAW_ADD(_R3, _R3, _R4));
996 }
997
998 if (flags & BPF_TRAMP_F_IP_ARG)
999 EMIT(PPC_RAW_STL(_R3, _R1, ip_off));
1000
1001 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
1002 /* Fake our LR for BPF_TRAMP_F_CALL_ORIG case */
1003 EMIT(PPC_RAW_ADDI(_R3, _R3, 4));
1004 EMIT(PPC_RAW_STL(_R3, _R1, retaddr_off));
1005 }
1006
1007 /* Save function arg regs count -- see bpf_get_func_arg_cnt() */
1008 func_meta = nr_regs;
1009 store_func_meta(image, ctx, func_meta, func_meta_off);
1010
1011 /* Save nv regs */
1012 EMIT(PPC_RAW_STL(_R25, _R1, nvr_off));
1013 EMIT(PPC_RAW_STL(_R26, _R1, nvr_off + SZL));
1014
1015 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1016 PPC_LI_ADDR(_R3, (unsigned long)im);
1017 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
1018 (unsigned long)__bpf_tramp_enter);
1019 if (ret)
1020 return ret;
1021 }
1022
1023 if (fsession_cnt) {
1024 /*
1025 * Clear all the session cookies' values
1026 * Clear the return value to make sure fentry always get 0
1027 */
1028 prepare_for_fsession_fentry(image, ctx, cookie_cnt, cookie_off, retval_off);
1029 }
1030
1031 cookie_ctx_off = (regs_off - cookie_off) / 8;
1032
1033 for (i = 0; i < fentry->nr_links; i++) {
1034 if (bpf_prog_calls_session_cookie(fentry->links[i])) {
1035 u64 meta = func_meta | (cookie_ctx_off << BPF_TRAMP_COOKIE_INDEX_SHIFT);
1036
1037 store_func_meta(image, ctx, meta, func_meta_off);
1038 cookie_ctx_off--;
1039 }
1040
1041 if (invoke_bpf_prog(image, ro_image, ctx, fentry->links[i], regs_off, retval_off,
1042 run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET))
1043 return -EINVAL;
1044 }
1045
1046 if (fmod_ret->nr_links) {
1047 branches = kcalloc(fmod_ret->nr_links, sizeof(u32), GFP_KERNEL);
1048 if (!branches)
1049 return -ENOMEM;
1050
1051 if (invoke_bpf_mod_ret(image, ro_image, ctx, fmod_ret, regs_off, retval_off,
1052 run_ctx_off, branches)) {
1053 ret = -EINVAL;
1054 goto cleanup;
1055 }
1056 }
1057
1058 /* Call the traced function */
1059 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1060 /*
1061 * retaddr on trampoline stack points to the correct point in the original function
1062 * with both PPC_FTRACE_OUT_OF_LINE as well as with traditional ftrace instruction
1063 * sequence
1064 */
1065 EMIT(PPC_RAW_LL(_R3, _R1, retaddr_off));
1066 EMIT(PPC_RAW_MTCTR(_R3));
1067
1068 /* Replicate tail_call_cnt before calling the original BPF prog */
1069 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
1070 bpf_trampoline_setup_tail_call_info(image, ctx, bpf_frame_size, r4_off);
1071
1072 /* Restore args */
1073 bpf_trampoline_restore_args_stack(image, ctx, bpf_frame_size, nr_regs, regs_off);
1074
1075 /* Restore TOC for 64-bit */
1076 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
1077 EMIT(PPC_RAW_LD(_R2, _R1, 24));
1078 EMIT(PPC_RAW_BCTRL());
1079 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
1080 PPC64_LOAD_PACA();
1081
1082 /* Store return value for bpf prog to access */
1083 EMIT(PPC_RAW_STL(_R3, _R1, retval_off));
1084
1085 /* Restore updated tail_call_cnt */
1086 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
1087 bpf_trampoline_restore_tail_call_cnt(image, ctx, bpf_frame_size, r4_off);
1088
1089 /* Reserve space to patch branch instruction to skip fexit progs */
1090 if (ro_image) /* image is NULL for dummy pass */
1091 im->ip_after_call = &((u32 *)ro_image)[ctx->idx];
1092 EMIT(PPC_RAW_NOP());
1093 }
1094
1095 /* Update branches saved in invoke_bpf_mod_ret with address of do_fexit */
1096 for (i = 0; i < fmod_ret->nr_links && image; i++) {
1097 if (create_cond_branch(&branch_insn, &image[branches[i]],
1098 (unsigned long)&image[ctx->idx], COND_NE << 16)) {
1099 ret = -EINVAL;
1100 goto cleanup;
1101 }
1102
1103 image[branches[i]] = ppc_inst_val(branch_insn);
1104 }
1105
1106 /* set the "is_return" flag for fsession */
1107 func_meta |= (1ULL << BPF_TRAMP_IS_RETURN_SHIFT);
1108 if (fsession_cnt)
1109 store_func_meta(image, ctx, func_meta, func_meta_off);
1110
1111 cookie_ctx_off = (regs_off - cookie_off) / 8;
1112
1113 for (i = 0; i < fexit->nr_links; i++) {
1114 if (bpf_prog_calls_session_cookie(fexit->links[i])) {
1115 u64 meta = func_meta | (cookie_ctx_off << BPF_TRAMP_COOKIE_INDEX_SHIFT);
1116
1117 store_func_meta(image, ctx, meta, func_meta_off);
1118 cookie_ctx_off--;
1119 }
1120
1121 if (invoke_bpf_prog(image, ro_image, ctx, fexit->links[i], regs_off, retval_off,
1122 run_ctx_off, false)) {
1123 ret = -EINVAL;
1124 goto cleanup;
1125 }
1126 }
1127
1128 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1129 if (ro_image) /* image is NULL for dummy pass */
1130 im->ip_epilogue = &((u32 *)ro_image)[ctx->idx];
1131 PPC_LI_ADDR(_R3, im);
1132 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
1133 (unsigned long)__bpf_tramp_exit);
1134 if (ret)
1135 goto cleanup;
1136 }
1137
1138 if (flags & BPF_TRAMP_F_RESTORE_REGS)
1139 bpf_trampoline_restore_args_regs(image, ctx, nr_regs, regs_off);
1140
1141 /* Restore return value of func_addr or fentry prog */
1142 if (save_ret)
1143 EMIT(PPC_RAW_LL(_R3, _R1, retval_off));
1144
1145 /* Restore nv regs */
1146 EMIT(PPC_RAW_LL(_R26, _R1, nvr_off + SZL));
1147 EMIT(PPC_RAW_LL(_R25, _R1, nvr_off));
1148
1149 /* Epilogue */
1150 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
1151 EMIT(PPC_RAW_LD(_R2, _R1, 24));
1152 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
1153 /* Skip the traced function and return to parent */
1154 EMIT(PPC_RAW_ADDI(_R1, _R1, bpf_frame_size));
1155 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF));
1156 EMIT(PPC_RAW_MTLR(_R0));
1157 EMIT(PPC_RAW_BLR());
1158 } else {
1159 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
1160 EMIT(PPC_RAW_LL(_R0, _R1, alt_lr_off));
1161 EMIT(PPC_RAW_MTLR(_R0));
1162 EMIT(PPC_RAW_ADDI(_R1, _R1, bpf_frame_size));
1163 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF));
1164 EMIT(PPC_RAW_BLR());
1165 } else {
1166 EMIT(PPC_RAW_LL(_R0, _R1, retaddr_off));
1167 EMIT(PPC_RAW_MTCTR(_R0));
1168 EMIT(PPC_RAW_ADDI(_R1, _R1, bpf_frame_size));
1169 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF));
1170 EMIT(PPC_RAW_MTLR(_R0));
1171 EMIT(PPC_RAW_BCTR());
1172 }
1173 }
1174
1175 /* Make sure the trampoline generation logic doesn't overflow */
1176 if (image && WARN_ON_ONCE(&image[ctx->idx] > (u32 *)rw_image_end - BPF_INSN_SAFETY)) {
1177 ret = -EFAULT;
1178 goto cleanup;
1179 }
1180 ret = ctx->idx * 4 + BPF_INSN_SAFETY * 4;
1181
1182 cleanup:
1183 kfree(branches);
1184 return ret;
1185 }
1186
arch_bpf_trampoline_size(const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)1187 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
1188 struct bpf_tramp_links *tlinks, void *func_addr)
1189 {
1190 struct bpf_tramp_image im;
1191 int ret;
1192
1193 ret = __arch_prepare_bpf_trampoline(&im, NULL, NULL, NULL, m, flags, tlinks, func_addr);
1194 return ret;
1195 }
1196
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)1197 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1198 const struct btf_func_model *m, u32 flags,
1199 struct bpf_tramp_links *tlinks,
1200 void *func_addr)
1201 {
1202 u32 size = image_end - image;
1203 void *rw_image, *tmp;
1204 int ret;
1205
1206 /*
1207 * rw_image doesn't need to be in module memory range, so we can
1208 * use kvmalloc.
1209 */
1210 rw_image = kvmalloc(size, GFP_KERNEL);
1211 if (!rw_image)
1212 return -ENOMEM;
1213
1214 ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
1215 flags, tlinks, func_addr);
1216 if (ret < 0)
1217 goto out;
1218
1219 if (bpf_jit_enable > 1)
1220 bpf_jit_dump(1, ret - BPF_INSN_SAFETY * 4, 1, rw_image);
1221
1222 tmp = bpf_arch_text_copy(image, rw_image, size);
1223 if (IS_ERR(tmp))
1224 ret = PTR_ERR(tmp);
1225
1226 out:
1227 kvfree(rw_image);
1228 return ret;
1229 }
1230
bpf_modify_inst(void * ip,ppc_inst_t old_inst,ppc_inst_t new_inst)1231 static int bpf_modify_inst(void *ip, ppc_inst_t old_inst, ppc_inst_t new_inst)
1232 {
1233 ppc_inst_t org_inst;
1234
1235 if (copy_inst_from_kernel_nofault(&org_inst, ip)) {
1236 pr_err("0x%lx: fetching instruction failed\n", (unsigned long)ip);
1237 return -EFAULT;
1238 }
1239
1240 if (!ppc_inst_equal(org_inst, old_inst)) {
1241 pr_err("0x%lx: expected (%08lx) != found (%08lx)\n",
1242 (unsigned long)ip, ppc_inst_as_ulong(old_inst), ppc_inst_as_ulong(org_inst));
1243 return -EINVAL;
1244 }
1245
1246 if (ppc_inst_equal(old_inst, new_inst))
1247 return 0;
1248
1249 return patch_instruction(ip, new_inst);
1250 }
1251
do_isync(void * info __maybe_unused)1252 static void do_isync(void *info __maybe_unused)
1253 {
1254 isync();
1255 }
1256
1257 /*
1258 * A 3-step process for bpf prog entry:
1259 * 1. At bpf prog entry, a single nop/b:
1260 * bpf_func:
1261 * [nop|b] ool_stub
1262 * 2. Out-of-line stub:
1263 * ool_stub:
1264 * mflr r0
1265 * [b|bl] <bpf_prog>/<long_branch_stub>
1266 * mtlr r0 // CONFIG_PPC_FTRACE_OUT_OF_LINE only
1267 * b bpf_func + 4
1268 * 3. Long branch stub:
1269 * long_branch_stub:
1270 * .long <branch_addr>/<dummy_tramp>
1271 * mflr r11
1272 * bcl 20,31,$+4
1273 * mflr r12
1274 * ld r12, -16(r12)
1275 * mtctr r12
1276 * mtlr r11 // needed to retain ftrace ABI
1277 * bctr
1278 *
1279 * dummy_tramp is used to reduce synchronization requirements.
1280 *
1281 * When attaching a bpf trampoline to a bpf prog, we do not need any
1282 * synchronization here since we always have a valid branch target regardless
1283 * of the order in which the above stores are seen. dummy_tramp ensures that
1284 * the long_branch stub goes to a valid destination on other cpus, even when
1285 * the branch to the long_branch stub is seen before the updated trampoline
1286 * address.
1287 *
1288 * However, when detaching a bpf trampoline from a bpf prog, or if changing
1289 * the bpf trampoline address, we need synchronization to ensure that other
1290 * cpus can no longer branch into the older trampoline so that it can be
1291 * safely freed. bpf_tramp_image_put() uses rcu_tasks to ensure all cpus
1292 * make forward progress, but we still need to ensure that other cpus
1293 * execute isync (or some CSI) so that they don't go back into the
1294 * trampoline again.
1295 */
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type old_t,enum bpf_text_poke_type new_t,void * old_addr,void * new_addr)1296 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
1297 enum bpf_text_poke_type new_t, void *old_addr,
1298 void *new_addr)
1299 {
1300 unsigned long bpf_func, bpf_func_end, size, offset;
1301 ppc_inst_t old_inst, new_inst;
1302 int ret = 0, branch_flags;
1303 char name[KSYM_NAME_LEN];
1304
1305 if (IS_ENABLED(CONFIG_PPC32))
1306 return -EOPNOTSUPP;
1307
1308 bpf_func = (unsigned long)ip;
1309
1310 /* We currently only support poking bpf programs */
1311 if (!bpf_address_lookup(bpf_func, &size, &offset, name)) {
1312 pr_err("%s (0x%lx): kernel/modules are not supported\n", __func__, bpf_func);
1313 return -EOPNOTSUPP;
1314 }
1315
1316 /*
1317 * If we are not poking at bpf prog entry, then we are simply patching in/out
1318 * an unconditional branch instruction at im->ip_after_call
1319 */
1320 if (offset) {
1321 if (old_t == BPF_MOD_CALL || new_t == BPF_MOD_CALL) {
1322 pr_err("%s (0x%lx): calls are not supported in bpf prog body\n", __func__,
1323 bpf_func);
1324 return -EOPNOTSUPP;
1325 }
1326 old_inst = ppc_inst(PPC_RAW_NOP());
1327 if (old_addr)
1328 if (create_branch(&old_inst, ip, (unsigned long)old_addr, 0))
1329 return -ERANGE;
1330 new_inst = ppc_inst(PPC_RAW_NOP());
1331 if (new_addr)
1332 if (create_branch(&new_inst, ip, (unsigned long)new_addr, 0))
1333 return -ERANGE;
1334 mutex_lock(&text_mutex);
1335 ret = bpf_modify_inst(ip, old_inst, new_inst);
1336 mutex_unlock(&text_mutex);
1337
1338 /* Make sure all cpus see the new instruction */
1339 smp_call_function(do_isync, NULL, 1);
1340 return ret;
1341 }
1342
1343 bpf_func_end = bpf_func + size;
1344
1345 /* Address of the jmp/call instruction in the out-of-line stub */
1346 ip = (void *)(bpf_func_end - bpf_jit_ool_stub + 4);
1347
1348 if (!is_offset_in_branch_range((long)ip - 4 - bpf_func)) {
1349 pr_err("%s (0x%lx): bpf prog too large, ool stub out of branch range\n", __func__,
1350 bpf_func);
1351 return -ERANGE;
1352 }
1353
1354 old_inst = ppc_inst(PPC_RAW_NOP());
1355 branch_flags = old_t == BPF_MOD_CALL ? BRANCH_SET_LINK : 0;
1356 if (old_addr) {
1357 if (is_offset_in_branch_range(ip - old_addr))
1358 create_branch(&old_inst, ip, (unsigned long)old_addr, branch_flags);
1359 else
1360 create_branch(&old_inst, ip, bpf_func_end - bpf_jit_long_branch_stub,
1361 branch_flags);
1362 }
1363 new_inst = ppc_inst(PPC_RAW_NOP());
1364 branch_flags = new_t == BPF_MOD_CALL ? BRANCH_SET_LINK : 0;
1365 if (new_addr) {
1366 if (is_offset_in_branch_range(ip - new_addr))
1367 create_branch(&new_inst, ip, (unsigned long)new_addr, branch_flags);
1368 else
1369 create_branch(&new_inst, ip, bpf_func_end - bpf_jit_long_branch_stub,
1370 branch_flags);
1371 }
1372
1373 mutex_lock(&text_mutex);
1374
1375 /*
1376 * 1. Update the address in the long branch stub:
1377 * If new_addr is out of range, we will have to use the long branch stub, so patch new_addr
1378 * here. Otherwise, revert to dummy_tramp, but only if we had patched old_addr here.
1379 */
1380 if ((new_addr && !is_offset_in_branch_range(new_addr - ip)) ||
1381 (old_addr && !is_offset_in_branch_range(old_addr - ip)))
1382 ret = patch_ulong((void *)(bpf_func_end - bpf_jit_long_branch_stub - SZL),
1383 (new_addr && !is_offset_in_branch_range(new_addr - ip)) ?
1384 (unsigned long)new_addr : (unsigned long)dummy_tramp);
1385 if (ret)
1386 goto out;
1387
1388 /* 2. Update the branch/call in the out-of-line stub */
1389 ret = bpf_modify_inst(ip, old_inst, new_inst);
1390 if (ret)
1391 goto out;
1392
1393 /* 3. Update instruction at bpf prog entry */
1394 ip = (void *)bpf_func;
1395 if (!old_addr || !new_addr) {
1396 if (!old_addr) {
1397 old_inst = ppc_inst(PPC_RAW_NOP());
1398 create_branch(&new_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0);
1399 } else {
1400 new_inst = ppc_inst(PPC_RAW_NOP());
1401 create_branch(&old_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0);
1402 }
1403 ret = bpf_modify_inst(ip, old_inst, new_inst);
1404 }
1405
1406 out:
1407 mutex_unlock(&text_mutex);
1408
1409 /*
1410 * Sync only if we are not attaching a trampoline to a bpf prog so the older
1411 * trampoline can be freed safely.
1412 */
1413 if (old_addr)
1414 smp_call_function(do_isync, NULL, 1);
1415
1416 return ret;
1417 }
1418