1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * bpf_jit_comp64.c: eBPF JIT compiler
4 *
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6 * IBM Corporation
7 *
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
19
20 #include "bpf_jit.h"
21
22 /*
23 * Stack layout with frame:
24 * Layout when setting up our own stack frame.
25 * Note: r1 at bottom, component offsets positive wrt r1.
26 * Ensure the top half (upto local_tmp_var) stays consistent
27 * with our redzone usage.
28 *
29 * tail_call_info - stores tailcall count value in main program's
30 * frame, stores reference to tail_call_info of
31 * main's frame in sub-prog's frame.
32 *
33 * [ prev sp ] <-------------
34 * [ tail_call_info ] 8 |
35 * [ nv gpr save area ] (6 * 8) |
36 * [ addl. nv gpr save area] (12 * 8) | <--- exception boundary/callback program
37 * [ local_tmp_var ] 24 |
38 * fp (r31) --> [ ebpf stack space ] upto 512 |
39 * [ frame header ] 32/112 |
40 * sp (r1) ---> [ stack pointer ] --------------
41 *
42 * Additional (12 * 8) in 'nv gpr save area' only in case of
43 * exception boundary/callback.
44 */
45
46 /* BPF non-volatile registers save area size */
47 #define BPF_PPC_STACK_SAVE (6 * 8)
48
49 /* for bpf JIT code internal usage */
50 #define BPF_PPC_STACK_LOCALS 24
51 /*
52 * for additional non volatile registers(r14-r25) to be saved
53 * at exception boundary
54 */
55 #define BPF_PPC_EXC_STACK_SAVE (12 * 8)
56
57 /* stack frame excluding BPF stack, ensure this is quadword aligned */
58 #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
59 BPF_PPC_STACK_LOCALS + \
60 BPF_PPC_STACK_SAVE + \
61 BPF_PPC_TAILCALL)
62
63 /*
64 * same as BPF_PPC_STACKFRAME with save area for additional
65 * non volatile registers saved at exception boundary.
66 * This is quad-word aligned.
67 */
68 #define BPF_PPC_EXC_STACKFRAME (BPF_PPC_STACKFRAME + BPF_PPC_EXC_STACK_SAVE)
69
70 /* BPF register usage */
71 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
72 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
73 #define ARENA_VM_START (MAX_BPF_JIT_REG + 2)
74
75 /* BPF to ppc register mappings */
bpf_jit_init_reg_mapping(struct codegen_context * ctx)76 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
77 {
78 /* function return value */
79 ctx->b2p[BPF_REG_0] = _R8;
80 /* function arguments */
81 ctx->b2p[BPF_REG_1] = _R3;
82 ctx->b2p[BPF_REG_2] = _R4;
83 ctx->b2p[BPF_REG_3] = _R5;
84 ctx->b2p[BPF_REG_4] = _R6;
85 ctx->b2p[BPF_REG_5] = _R7;
86 /* non volatile registers */
87 ctx->b2p[BPF_REG_6] = _R27;
88 ctx->b2p[BPF_REG_7] = _R28;
89 ctx->b2p[BPF_REG_8] = _R29;
90 ctx->b2p[BPF_REG_9] = _R30;
91 /* frame pointer aka BPF_REG_10 */
92 ctx->b2p[BPF_REG_FP] = _R31;
93 /* eBPF jit internal registers */
94 ctx->b2p[BPF_REG_AX] = _R12;
95 ctx->b2p[TMP_REG_1] = _R9;
96 ctx->b2p[TMP_REG_2] = _R10;
97 /* non volatile register for kern_vm_start address */
98 ctx->b2p[ARENA_VM_START] = _R26;
99 }
100
101 /* PPC NVR range -- update this if we ever use NVRs below r26 */
102 #define BPF_PPC_NVR_MIN _R26
103
bpf_has_stack_frame(struct codegen_context * ctx)104 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
105 {
106 /*
107 * We only need a stack frame if:
108 * - we call other functions (kernel helpers), or
109 * - the bpf program uses its stack area
110 * The latter condition is deduced from the usage of BPF_REG_FP
111 *
112 * bpf_throw() leads to exception callback from a BPF (sub)program.
113 * The (sub)program is always marked as SEEN_FUNC, creating a stack
114 * frame. The exception callback uses the frame of the exception
115 * boundary, so the exception boundary program must have a frame.
116 */
117 return ctx->seen & SEEN_FUNC ||
118 bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)) ||
119 ctx->exception_cb ||
120 ctx->exception_boundary;
121 }
122
123 /*
124 * Stack layout with redzone:
125 * When not setting up our own stackframe, the redzone (288 bytes) usage is:
126 * Note: r1 from prev frame. Component offset negative wrt r1.
127 *
128 * [ prev sp ] <-------------
129 * [ ... ] |
130 * sp (r1) ---> [ stack pointer ] --------------
131 * [ tail_call_info ] 8
132 * [ nv gpr save area ] (6 * 8)
133 * [ addl. nv gpr save area] (12 * 8) <--- exception boundary/callback program
134 * [ local_tmp_var ] 24
135 * [ unused red zone ] 224
136 *
137 * Additional (12 * 8) in 'nv gpr save area' only in case of
138 * exception boundary/callback.
139 */
bpf_jit_stack_local(struct codegen_context * ctx)140 static int bpf_jit_stack_local(struct codegen_context *ctx)
141 {
142 if (bpf_has_stack_frame(ctx)) {
143 /* Stack layout with frame */
144 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
145 } else {
146 /* Stack layout with redzone */
147 return -(BPF_PPC_TAILCALL
148 +BPF_PPC_STACK_SAVE
149 +(ctx->exception_boundary || ctx->exception_cb ?
150 BPF_PPC_EXC_STACK_SAVE : 0)
151 +BPF_PPC_STACK_LOCALS
152 );
153 }
154 }
155
bpf_jit_stack_tailcallinfo_offset(struct codegen_context * ctx)156 static int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx)
157 {
158 return bpf_jit_stack_local(ctx) + BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE;
159 }
160
bpf_jit_stack_offsetof(struct codegen_context * ctx,int reg)161 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
162 {
163 int min_valid_nvreg = BPF_PPC_NVR_MIN;
164 /* Default frame size for all cases except exception boundary */
165 int frame_nvr_size = BPF_PPC_STACKFRAME;
166
167 /* Consider all nv regs for handling exceptions */
168 if (ctx->exception_boundary || ctx->exception_cb) {
169 min_valid_nvreg = _R14;
170 frame_nvr_size = BPF_PPC_EXC_STACKFRAME;
171 }
172
173 if (reg >= min_valid_nvreg && reg < 32)
174 return (bpf_has_stack_frame(ctx) ?
175 (frame_nvr_size + ctx->stack_size) : 0)
176 - (8 * (32 - reg)) - BPF_PPC_TAILCALL;
177
178 pr_err("BPF JIT is asking about unknown registers");
179 BUG();
180 }
181
prepare_for_fsession_fentry(u32 * image,struct codegen_context * ctx,int cookie_cnt,int cookie_off,int retval_off)182 void prepare_for_fsession_fentry(u32 *image, struct codegen_context *ctx, int cookie_cnt,
183 int cookie_off, int retval_off)
184 {
185 EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
186
187 for (int i = 0; i < cookie_cnt; i++)
188 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, cookie_off + 8 * i));
189 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, retval_off));
190 }
191
store_func_meta(u32 * image,struct codegen_context * ctx,u64 func_meta,int func_meta_off)192 void store_func_meta(u32 *image, struct codegen_context *ctx,
193 u64 func_meta, int func_meta_off)
194 {
195 /*
196 * Store func_meta to stack at [R1 + func_meta_off] = func_meta
197 *
198 * func_meta :
199 * bit[63]: is_return flag
200 * byte[1]: cookie offset from ctx
201 * byte[0]: args count
202 */
203 PPC_LI64(bpf_to_ppc(TMP_REG_1), func_meta);
204 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, func_meta_off));
205 }
206
bpf_jit_realloc_regs(struct codegen_context * ctx)207 void bpf_jit_realloc_regs(struct codegen_context *ctx)
208 {
209 }
210
emit_fp_priv_stack(u32 * image,struct codegen_context * ctx)211 static void emit_fp_priv_stack(u32 *image, struct codegen_context *ctx)
212 {
213 PPC_LI64(bpf_to_ppc(BPF_REG_FP), (__force long)ctx->priv_sp);
214 /*
215 * Load base percpu pointer of private stack allocation.
216 * Runtime per-cpu address = (base + data_offset) + (guard + stack_size)
217 */
218 #ifdef CONFIG_SMP
219 /* Load percpu data offset */
220 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R13,
221 offsetof(struct paca_struct, data_offset)));
222 EMIT(PPC_RAW_ADD(bpf_to_ppc(BPF_REG_FP),
223 bpf_to_ppc(TMP_REG_1), bpf_to_ppc(BPF_REG_FP)));
224 #endif
225 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), bpf_to_ppc(BPF_REG_FP),
226 PRIV_STACK_GUARD_SZ + round_up(ctx->priv_stack_size, 16)));
227 }
228
229 /*
230 * For exception boundary & exception_cb progs:
231 * return increased size to accommodate additional NVRs.
232 */
bpf_jit_stack_size(struct codegen_context * ctx)233 static int bpf_jit_stack_size(struct codegen_context *ctx)
234 {
235 return ctx->exception_boundary || ctx->exception_cb ?
236 BPF_PPC_EXC_STACKFRAME :
237 BPF_PPC_STACKFRAME;
238 }
239
bpf_jit_build_prologue(u32 * image,struct codegen_context * ctx)240 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
241 {
242 int i;
243
244 /* Instruction for trampoline attach */
245 EMIT(PPC_RAW_NOP());
246
247 #ifndef CONFIG_PPC_KERNEL_PCREL
248 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
249 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
250 #endif
251
252 /*
253 * Tail call count(tcc) is saved & updated only in main
254 * program's frame and the address of tcc in main program's
255 * frame (tcc_ptr) is saved in subprogs frame.
256 *
257 * Offset of tail_call_info on any frame will be interpreted
258 * as either tcc_ptr or tcc value depending on whether it is
259 * greater than MAX_TAIL_CALL_CNT or not.
260 */
261 if (!ctx->is_subprog) {
262 EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
263 /* this goes in the redzone */
264 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
265 } else if (!ctx->exception_cb) {
266 /*
267 * Tailcall jitting for non exception_cb progs only.
268 * exception_cb won't require tail_call_info to be setup.
269 *
270 * tail_call_info interpretation logic:
271 *
272 * if tail_call_info < MAX_TAIL_CALL_CNT
273 * main prog calling first subprog -> copy reference
274 * else
275 * subsequent subprog calling another subprog -> directly copy content
276 */
277 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), _R1, 0));
278 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), -(BPF_PPC_TAILCALL)));
279 EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
280 PPC_BCC_CONST_SHORT(COND_GT, 8);
281 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2),
282 -(BPF_PPC_TAILCALL)));
283 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
284 }
285
286 if (bpf_has_stack_frame(ctx) && !ctx->exception_cb) {
287 /*
288 * We need a stack frame, but we don't necessarily need to
289 * save/restore LR unless we call other functions
290 */
291 if (ctx->seen & SEEN_FUNC) {
292 EMIT(PPC_RAW_MFLR(_R0));
293 EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
294 }
295
296 EMIT(PPC_RAW_STDU(_R1, _R1,
297 -(bpf_jit_stack_size(ctx) + ctx->stack_size)));
298 }
299
300 /*
301 * Program acting as exception boundary pushes R14..R25 in addition to
302 * BPF callee-saved non volatile registers. Exception callback uses
303 * the boundary program's stack frame, recover additionally saved
304 * registers in epilogue of exception callback.
305 */
306 if (ctx->exception_boundary) {
307 for (i = _R14; i <= _R25; i++)
308 EMIT(PPC_RAW_STD(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
309 }
310
311 if (!ctx->exception_cb) {
312 /*
313 * Back up non-volatile regs -- BPF registers 6-10
314 * If we haven't created our own stack frame, we save these
315 * in the protected zone below the previous stack frame
316 */
317 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
318 if (ctx->exception_boundary || bpf_is_seen_register(ctx, bpf_to_ppc(i)))
319 EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1,
320 bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
321
322 if (ctx->exception_boundary || ctx->arena_vm_start)
323 EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
324 bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
325 } else {
326 /*
327 * Exception callback receives Frame Pointer of boundary
328 * program(main prog) as third arg
329 */
330 EMIT(PPC_RAW_MR(_R1, _R5));
331 /*
332 * Exception callback reuses the stack frame of exception boundary.
333 * But BPF stack depth of exception callback and exception boundary
334 * don't have to be same. If BPF stack depth is different, adjust the
335 * stack frame size considering BPF stack depth of exception callback.
336 * The non-volatile register save area remains unchanged. These non-
337 * volatile registers are restored in exception callback's epilogue.
338 */
339 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R5, 0));
340 EMIT(PPC_RAW_SUB(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_1), _R1));
341 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
342 -BPF_PPC_EXC_STACKFRAME));
343 EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), ctx->stack_size));
344 PPC_BCC_CONST_SHORT(COND_EQ, 12);
345 EMIT(PPC_RAW_MR(_R1, bpf_to_ppc(TMP_REG_1)));
346 EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_EXC_STACKFRAME + ctx->stack_size)));
347 }
348
349 /*
350 * Exception_cb not restricted from using stack area or arena.
351 * Setup frame pointer to point to the bpf stack area
352 */
353 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) {
354 if (ctx->priv_sp) {
355 /* Set up fp in private stack */
356 emit_fp_priv_stack(image, ctx);
357 } else {
358 /* Setup frame pointer to point to the bpf stack area */
359 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
360 STACK_FRAME_MIN_SIZE + ctx->stack_size));
361 }
362 }
363
364 if (ctx->arena_vm_start)
365 PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start);
366 }
367
bpf_jit_emit_common_epilogue(u32 * image,struct codegen_context * ctx)368 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
369 {
370 int i;
371
372 /* Restore NVRs */
373 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
374 if (ctx->exception_cb || bpf_is_seen_register(ctx, bpf_to_ppc(i)))
375 EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
376
377 if (ctx->exception_cb || ctx->arena_vm_start)
378 EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1,
379 bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
380
381 if (ctx->exception_cb) {
382 /*
383 * Recover additionally saved non volatile registers from stack
384 * frame of exception boundary program.
385 */
386 for (i = _R14; i <= _R25; i++)
387 EMIT(PPC_RAW_LD(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
388 }
389
390 /* Tear down our stack frame */
391 if (bpf_has_stack_frame(ctx)) {
392 EMIT(PPC_RAW_ADDI(_R1, _R1, bpf_jit_stack_size(ctx) + ctx->stack_size));
393
394 if (ctx->seen & SEEN_FUNC || ctx->exception_cb) {
395 EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
396 EMIT(PPC_RAW_MTLR(_R0));
397 }
398 }
399 }
400
bpf_jit_build_epilogue(u32 * image,struct codegen_context * ctx)401 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
402 {
403 bpf_jit_emit_common_epilogue(image, ctx);
404
405 /* Move result to r3 */
406 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
407
408 EMIT(PPC_RAW_BLR());
409
410 bpf_jit_build_fentry_stubs(image, ctx);
411 }
412
413 /*
414 * arch_bpf_stack_walk() - BPF stack walker for PowerPC
415 *
416 * Based on arch_stack_walk() from stacktrace.c.
417 * PowerPC uses stack frames rather than stack pointers. See [1] for
418 * the equivalence between frame pointers and stack pointers.
419 * Additional reference at [2].
420 * TODO: refactor with arch_stack_walk()
421 *
422 * [1]: https://lore.kernel.org/all/20200220115141.2707-1-mpe@ellerman.id.au/
423 * [2]: https://lore.kernel.org/bpf/20260122211854.5508-5-adubey@linux.ibm.com/
424 */
425
arch_bpf_stack_walk(bool (* consume_fn)(void *,u64,u64,u64),void * cookie)426 void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64), void *cookie)
427 {
428 // callback processing always in current context
429 unsigned long sp = current_stack_frame();
430
431 for (;;) {
432 unsigned long *stack = (unsigned long *) sp;
433 unsigned long ip;
434
435 if (!validate_sp(sp, current))
436 return;
437
438 ip = stack[STACK_FRAME_LR_SAVE];
439 if (!ip)
440 break;
441
442 /*
443 * consume_fn common code expects stack pointer in third
444 * argument. There is no sp in ppc64, rather pass frame
445 * pointer(named sp here).
446 */
447 if (ip && !consume_fn(cookie, ip, sp, sp))
448 break;
449
450 sp = stack[0];
451 }
452 }
453
bpf_jit_emit_func_call_rel(u32 * image,u32 * fimage,struct codegen_context * ctx,u64 func)454 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
455 {
456 unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
457 long reladdr;
458
459 /* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */
460 if (!func) {
461 for (int i = 0; i < 5; i++)
462 EMIT(PPC_RAW_NOP());
463 /* elfv1 needs an additional instruction to load addr from descriptor */
464 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
465 EMIT(PPC_RAW_NOP());
466 EMIT(PPC_RAW_MTCTR(_R12));
467 EMIT(PPC_RAW_BCTRL());
468 return 0;
469 }
470
471 #ifdef CONFIG_PPC_KERNEL_PCREL
472 reladdr = func_addr - local_paca->kernelbase;
473
474 /*
475 * If fimage is NULL (the initial pass to find image size),
476 * account for the maximum no. of instructions possible.
477 */
478 if (!fimage) {
479 ctx->idx += 7;
480 return 0;
481 } else if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
482 EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
483 /* Align for subsequent prefix instruction */
484 if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
485 EMIT(PPC_RAW_NOP());
486 /* paddi r12,r12,addr */
487 EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
488 EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
489 } else {
490 unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
491 bool alignment_needed = !IS_ALIGNED(pc, 8);
492
493 reladdr = func_addr - (alignment_needed ? pc + 4 : pc);
494
495 if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
496 if (alignment_needed)
497 EMIT(PPC_RAW_NOP());
498 /* pla r12,addr */
499 EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
500 EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
501 } else {
502 /* We can clobber r12 */
503 PPC_LI64(_R12, func);
504 }
505 }
506 EMIT(PPC_RAW_MTCTR(_R12));
507 EMIT(PPC_RAW_BCTRL());
508 #else
509 if (core_kernel_text(func_addr)) {
510 reladdr = func_addr - kernel_toc_addr();
511 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
512 pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
513 return -ERANGE;
514 }
515
516 EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
517 EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
518 EMIT(PPC_RAW_MTCTR(_R12));
519 EMIT(PPC_RAW_BCTRL());
520 } else {
521 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
522 /* func points to the function descriptor */
523 PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
524 /* Load actual entry point from function descriptor */
525 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
526 /* ... and move it to CTR */
527 EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
528 /*
529 * Load TOC from function descriptor at offset 8.
530 * We can clobber r2 since we get called through a
531 * function pointer (so caller will save/restore r2).
532 */
533 if (is_module_text_address(func_addr))
534 EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
535 } else {
536 PPC_LI64(_R12, func);
537 EMIT(PPC_RAW_MTCTR(_R12));
538 }
539 EMIT(PPC_RAW_BCTRL());
540 /*
541 * Load r2 with kernel TOC as kernel TOC is used if function address falls
542 * within core kernel text.
543 */
544 if (is_module_text_address(func_addr))
545 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
546 }
547 #endif
548
549 return 0;
550 }
551
zero_extend(u32 * image,struct codegen_context * ctx,u32 src_reg,u32 dst_reg,u32 size)552 static int zero_extend(u32 *image, struct codegen_context *ctx, u32 src_reg, u32 dst_reg, u32 size)
553 {
554 switch (size) {
555 case 1:
556 /* zero-extend 8 bits into 64 bits */
557 EMIT(PPC_RAW_RLDICL(dst_reg, src_reg, 0, 56));
558 return 0;
559 case 2:
560 /* zero-extend 16 bits into 64 bits */
561 EMIT(PPC_RAW_RLDICL(dst_reg, src_reg, 0, 48));
562 return 0;
563 case 4:
564 /* zero-extend 32 bits into 64 bits */
565 EMIT(PPC_RAW_RLDICL(dst_reg, src_reg, 0, 32));
566 fallthrough;
567 case 8:
568 /* Nothing to do */
569 return 0;
570 default:
571 return -1;
572 }
573 }
574
sign_extend(u32 * image,struct codegen_context * ctx,u32 src_reg,u32 dst_reg,u32 size)575 static int sign_extend(u32 *image, struct codegen_context *ctx, u32 src_reg, u32 dst_reg, u32 size)
576 {
577 switch (size) {
578 case 1:
579 /* sign-extend 8 bits into 64 bits */
580 EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
581 return 0;
582 case 2:
583 /* sign-extend 16 bits into 64 bits */
584 EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
585 return 0;
586 case 4:
587 /* sign-extend 32 bits into 64 bits */
588 EMIT(PPC_RAW_EXTSW(dst_reg, src_reg));
589 fallthrough;
590 case 8:
591 /* Nothing to do */
592 return 0;
593 default:
594 return -1;
595 }
596 }
597
598 /*
599 * Handle powerpc ABI expectations from caller:
600 * - Unsigned arguments are zero-extended.
601 * - Signed arguments are sign-extended.
602 */
prepare_for_kfunc_call(const struct bpf_prog * fp,u32 * image,struct codegen_context * ctx,const struct bpf_insn * insn)603 static int prepare_for_kfunc_call(const struct bpf_prog *fp, u32 *image,
604 struct codegen_context *ctx,
605 const struct bpf_insn *insn)
606 {
607 const struct btf_func_model *m = bpf_jit_find_kfunc_model(fp, insn);
608 int i;
609
610 if (!m)
611 return -1;
612
613 for (i = 0; i < m->nr_args; i++) {
614 /* Note that BPF ABI only allows up to 5 args for kfuncs */
615 u32 reg = bpf_to_ppc(BPF_REG_1 + i), size = m->arg_size[i];
616
617 if (!(m->arg_flags[i] & BTF_FMODEL_SIGNED_ARG)) {
618 if (zero_extend(image, ctx, reg, reg, size))
619 return -1;
620 } else {
621 if (sign_extend(image, ctx, reg, reg, size))
622 return -1;
623 }
624 }
625
626 return 0;
627 }
628
bpf_jit_emit_tail_call(u32 * image,struct codegen_context * ctx,u32 out)629 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
630 {
631 /*
632 * By now, the eBPF program has already setup parameters in r3, r4 and r5
633 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
634 * r4/BPF_REG_2 - pointer to bpf_array
635 * r5/BPF_REG_3 - index in bpf_array
636 */
637 int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
638 int b2p_index = bpf_to_ppc(BPF_REG_3);
639 int bpf_tailcall_prologue_size = 12;
640
641 if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
642 bpf_tailcall_prologue_size += 4; /* skip past the toc load */
643
644 /*
645 * if (index >= array->map.max_entries)
646 * goto out;
647 */
648 EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
649 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
650 EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
651 PPC_BCC_SHORT(COND_GE, out);
652
653 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallinfo_offset(ctx)));
654 EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
655 PPC_BCC_CONST_SHORT(COND_LE, 8);
656
657 /* dereference TMP_REG_1 */
658 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 0));
659
660 /*
661 * if (tail_call_info == MAX_TAIL_CALL_CNT)
662 * goto out;
663 */
664 EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
665 PPC_BCC_SHORT(COND_EQ, out);
666
667 /*
668 * tail_call_info++; <- Actual value of tcc here
669 * Writeback this updated value only if tailcall succeeds.
670 */
671 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
672
673 /* prog = array->ptrs[index]; */
674 EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_2), b2p_index, 8));
675 EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), b2p_bpf_array));
676 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
677 offsetof(struct bpf_array, ptrs)));
678
679 /*
680 * if (prog == NULL)
681 * goto out;
682 */
683 EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), 0));
684 PPC_BCC_SHORT(COND_EQ, out);
685
686 /* goto *(prog->bpf_func + prologue_size); */
687 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
688 offsetof(struct bpf_prog, bpf_func)));
689 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
690 FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
691 EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_2)));
692
693 /*
694 * Before writing updated tail_call_info, distinguish if current frame
695 * is storing a reference to tail_call_info or actual tcc value in
696 * tail_call_info.
697 */
698 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx)));
699 EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_2), MAX_TAIL_CALL_CNT));
700 PPC_BCC_CONST_SHORT(COND_GT, 8);
701
702 /* First get address of tail_call_info */
703 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx)));
704 /* Writeback updated value to tail_call_info */
705 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
706
707 /* tear down stack, restore NVRs, ... */
708 bpf_jit_emit_common_epilogue(image, ctx);
709
710 EMIT(PPC_RAW_BCTR());
711
712 /* out: */
713 return 0;
714 }
715
bpf_jit_bypass_spec_v1(void)716 bool bpf_jit_bypass_spec_v1(void)
717 {
718 #if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
719 return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
720 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR));
721 #else
722 return true;
723 #endif
724 }
725
bpf_jit_bypass_spec_v4(void)726 bool bpf_jit_bypass_spec_v4(void)
727 {
728 return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
729 security_ftr_enabled(SEC_FTR_STF_BARRIER) &&
730 stf_barrier_type_get() != STF_BARRIER_NONE);
731 }
732
733 /*
734 * We spill into the redzone always, even if the bpf program has its own stackframe.
735 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
736 */
737 void bpf_stf_barrier(void);
738
739 asm (
740 " .global bpf_stf_barrier ;"
741 " bpf_stf_barrier: ;"
742 " std 21,-80(1) ;"
743 " std 22,-72(1) ;"
744 " sync ;"
745 " ld 21,-80(1) ;"
746 " ld 22,-72(1) ;"
747 " ori 31,31,0 ;"
748 " .rept 14 ;"
749 " b 1f ;"
750 " 1: ;"
751 " .endr ;"
752 " blr ;"
753 );
754
bpf_jit_emit_atomic_ops(u32 * image,struct codegen_context * ctx,const struct bpf_insn * insn,u32 * jmp_off,u32 * tmp_idx,u32 * addrp)755 static int bpf_jit_emit_atomic_ops(u32 *image, struct codegen_context *ctx,
756 const struct bpf_insn *insn, u32 *jmp_off,
757 u32 *tmp_idx, u32 *addrp)
758 {
759 u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
760 u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
761 u32 size = BPF_SIZE(insn->code);
762 u32 src_reg = bpf_to_ppc(insn->src_reg);
763 u32 dst_reg = bpf_to_ppc(insn->dst_reg);
764 s32 imm = insn->imm;
765
766 u32 save_reg = tmp2_reg;
767 u32 ret_reg = src_reg;
768 u32 fixup_idx;
769
770 /* Get offset into TMP_REG_1 */
771 EMIT(PPC_RAW_LI(tmp1_reg, insn->off));
772 /*
773 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
774 * before and after the operation.
775 *
776 * This is a requirement in the Linux Kernel Memory Model.
777 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
778 */
779 if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
780 EMIT(PPC_RAW_SYNC());
781
782 *tmp_idx = ctx->idx;
783
784 /* load value from memory into TMP_REG_2 */
785 if (size == BPF_DW)
786 EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
787 else
788 EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
789 /* Save old value in _R0 */
790 if (imm & BPF_FETCH)
791 EMIT(PPC_RAW_MR(_R0, tmp2_reg));
792
793 switch (imm) {
794 case BPF_ADD:
795 case BPF_ADD | BPF_FETCH:
796 EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
797 break;
798 case BPF_AND:
799 case BPF_AND | BPF_FETCH:
800 EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
801 break;
802 case BPF_OR:
803 case BPF_OR | BPF_FETCH:
804 EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
805 break;
806 case BPF_XOR:
807 case BPF_XOR | BPF_FETCH:
808 EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
809 break;
810 case BPF_CMPXCHG:
811 /*
812 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
813 * in src_reg for other cases.
814 */
815 ret_reg = bpf_to_ppc(BPF_REG_0);
816
817 /* Compare with old value in BPF_R0 */
818 if (size == BPF_DW)
819 EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
820 else
821 EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
822 /* Don't set if different from old value */
823 PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
824 fallthrough;
825 case BPF_XCHG:
826 save_reg = src_reg;
827 break;
828 default:
829 return -EOPNOTSUPP;
830 }
831
832 /* store new value */
833 if (size == BPF_DW)
834 EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
835 else
836 EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
837 /* we're done if this succeeded */
838 PPC_BCC_SHORT(COND_NE, *tmp_idx * 4);
839 fixup_idx = ctx->idx;
840
841 if (imm & BPF_FETCH) {
842 /* Emit 'sync' to enforce full ordering */
843 if (IS_ENABLED(CONFIG_SMP))
844 EMIT(PPC_RAW_SYNC());
845 EMIT(PPC_RAW_MR(ret_reg, _R0));
846 /*
847 * Skip unnecessary zero-extension for 32-bit cmpxchg.
848 * For context, see commit 39491867ace5.
849 */
850 if (size != BPF_DW && imm == BPF_CMPXCHG &&
851 insn_is_zext(insn + 1))
852 *addrp = ctx->idx * 4;
853 }
854
855 *jmp_off = (fixup_idx - *tmp_idx) * 4;
856
857 return 0;
858 }
859
bpf_jit_emit_probe_mem_store(struct codegen_context * ctx,u32 src_reg,s16 off,u32 code,u32 * image)860 static int bpf_jit_emit_probe_mem_store(struct codegen_context *ctx, u32 src_reg, s16 off,
861 u32 code, u32 *image)
862 {
863 u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
864 u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
865
866 switch (BPF_SIZE(code)) {
867 case BPF_B:
868 EMIT(PPC_RAW_STB(src_reg, tmp1_reg, off));
869 break;
870 case BPF_H:
871 EMIT(PPC_RAW_STH(src_reg, tmp1_reg, off));
872 break;
873 case BPF_W:
874 EMIT(PPC_RAW_STW(src_reg, tmp1_reg, off));
875 break;
876 case BPF_DW:
877 if (off % 4) {
878 EMIT(PPC_RAW_LI(tmp2_reg, off));
879 EMIT(PPC_RAW_STDX(src_reg, tmp1_reg, tmp2_reg));
880 } else {
881 EMIT(PPC_RAW_STD(src_reg, tmp1_reg, off));
882 }
883 break;
884 default:
885 return -EINVAL;
886 }
887 return 0;
888 }
889
emit_atomic_ld_st(const struct bpf_insn insn,struct codegen_context * ctx,u32 * image)890 static int emit_atomic_ld_st(const struct bpf_insn insn, struct codegen_context *ctx, u32 *image)
891 {
892 u32 code = insn.code;
893 u32 dst_reg = bpf_to_ppc(insn.dst_reg);
894 u32 src_reg = bpf_to_ppc(insn.src_reg);
895 u32 size = BPF_SIZE(code);
896 u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
897 u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
898 s16 off = insn.off;
899 s32 imm = insn.imm;
900
901 switch (imm) {
902 case BPF_LOAD_ACQ:
903 switch (size) {
904 case BPF_B:
905 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
906 break;
907 case BPF_H:
908 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
909 break;
910 case BPF_W:
911 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
912 break;
913 case BPF_DW:
914 if (off % 4) {
915 EMIT(PPC_RAW_LI(tmp1_reg, off));
916 EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
917 } else {
918 EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
919 }
920 break;
921 }
922 EMIT(PPC_RAW_LWSYNC());
923 break;
924 case BPF_STORE_REL:
925 EMIT(PPC_RAW_LWSYNC());
926 switch (size) {
927 case BPF_B:
928 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
929 break;
930 case BPF_H:
931 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
932 break;
933 case BPF_W:
934 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
935 break;
936 case BPF_DW:
937 if (off % 4) {
938 EMIT(PPC_RAW_LI(tmp2_reg, off));
939 EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
940 } else {
941 EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
942 }
943 break;
944 }
945 break;
946 default:
947 pr_err_ratelimited("unexpected atomic load/store op code %02x\n",
948 imm);
949 return -EINVAL;
950 }
951
952 return 0;
953 }
954
955 /* Assemble the body code between the prologue & epilogue */
bpf_jit_build_body(struct bpf_prog * fp,u32 * image,u32 * fimage,struct codegen_context * ctx,u32 * addrs,int pass,bool extra_pass)956 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
957 u32 *addrs, int pass, bool extra_pass)
958 {
959 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
960 bool sync_emitted, ori31_emitted;
961 const struct bpf_insn *insn = fp->insnsi;
962 int flen = fp->len;
963 int i, ret;
964
965 /* Start of epilogue code - will only be valid 2nd pass onwards */
966 u32 exit_addr = addrs[flen];
967
968 for (i = 0; i < flen; i++) {
969 u32 code = insn[i].code;
970 u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
971 u32 src_reg = bpf_to_ppc(insn[i].src_reg);
972 u32 size = BPF_SIZE(code);
973 u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
974 u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
975 s16 off = insn[i].off;
976 s32 imm = insn[i].imm;
977 bool func_addr_fixed;
978 u64 func_addr;
979 u64 imm64;
980 u32 true_cond;
981 u32 tmp_idx;
982 u32 jmp_off;
983
984 /*
985 * addrs[] maps a BPF bytecode address into a real offset from
986 * the start of the body code.
987 */
988 addrs[i] = ctx->idx * 4;
989
990 /*
991 * As an optimization, we note down which non-volatile registers
992 * are used so that we can only save/restore those in our
993 * prologue and epilogue. We do this here regardless of whether
994 * the actual BPF instruction uses src/dst registers or not
995 * (for instance, BPF_CALL does not use them). The expectation
996 * is that those instructions will have src_reg/dst_reg set to
997 * 0. Even otherwise, we just lose some prologue/epilogue
998 * optimization but everything else should work without
999 * any issues.
1000 */
1001 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
1002 bpf_set_seen_register(ctx, dst_reg);
1003 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
1004 bpf_set_seen_register(ctx, src_reg);
1005
1006 switch (code) {
1007 /*
1008 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
1009 */
1010 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
1011 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
1012 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
1013 goto bpf_alu32_trunc;
1014 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
1015 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
1016 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
1017 goto bpf_alu32_trunc;
1018 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
1019 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
1020 if (!imm) {
1021 goto bpf_alu32_trunc;
1022 } else if (imm >= -32768 && imm < 32768) {
1023 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
1024 } else {
1025 PPC_LI32(tmp1_reg, imm);
1026 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
1027 }
1028 goto bpf_alu32_trunc;
1029 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
1030 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
1031 if (!imm) {
1032 goto bpf_alu32_trunc;
1033 } else if (imm > -32768 && imm <= 32768) {
1034 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
1035 } else {
1036 PPC_LI32(tmp1_reg, imm);
1037 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
1038 }
1039 goto bpf_alu32_trunc;
1040 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
1041 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
1042 if (BPF_CLASS(code) == BPF_ALU)
1043 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
1044 else
1045 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
1046 goto bpf_alu32_trunc;
1047 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
1048 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
1049 if (imm >= -32768 && imm < 32768)
1050 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
1051 else {
1052 PPC_LI32(tmp1_reg, imm);
1053 if (BPF_CLASS(code) == BPF_ALU)
1054 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
1055 else
1056 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
1057 }
1058 goto bpf_alu32_trunc;
1059 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
1060 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
1061 if (BPF_OP(code) == BPF_MOD) {
1062 if (off)
1063 EMIT(PPC_RAW_DIVW(tmp1_reg, dst_reg, src_reg));
1064 else
1065 EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
1066
1067 EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
1068 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
1069 } else
1070 if (off)
1071 EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, src_reg));
1072 else
1073 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
1074 goto bpf_alu32_trunc;
1075 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
1076 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
1077 if (BPF_OP(code) == BPF_MOD) {
1078 if (off)
1079 EMIT(PPC_RAW_DIVD(tmp1_reg, dst_reg, src_reg));
1080 else
1081 EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
1082 EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
1083 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
1084 } else
1085 if (off)
1086 EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, src_reg));
1087 else
1088 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
1089 break;
1090 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
1091 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
1092 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
1093 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
1094 if (imm == 0)
1095 return -EINVAL;
1096 if (imm == 1) {
1097 if (BPF_OP(code) == BPF_DIV) {
1098 goto bpf_alu32_trunc;
1099 } else {
1100 EMIT(PPC_RAW_LI(dst_reg, 0));
1101 break;
1102 }
1103 }
1104
1105 PPC_LI32(tmp1_reg, imm);
1106 switch (BPF_CLASS(code)) {
1107 case BPF_ALU:
1108 if (BPF_OP(code) == BPF_MOD) {
1109 if (off)
1110 EMIT(PPC_RAW_DIVW(tmp2_reg, dst_reg, tmp1_reg));
1111 else
1112 EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
1113 EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
1114 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
1115 } else
1116 if (off)
1117 EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, tmp1_reg));
1118 else
1119 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
1120 break;
1121 case BPF_ALU64:
1122 if (BPF_OP(code) == BPF_MOD) {
1123 if (off)
1124 EMIT(PPC_RAW_DIVD(tmp2_reg, dst_reg, tmp1_reg));
1125 else
1126 EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
1127 EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
1128 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
1129 } else
1130 if (off)
1131 EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, tmp1_reg));
1132 else
1133 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
1134 break;
1135 }
1136 goto bpf_alu32_trunc;
1137 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
1138 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
1139 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
1140 goto bpf_alu32_trunc;
1141
1142 /*
1143 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
1144 */
1145 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
1146 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
1147 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
1148 goto bpf_alu32_trunc;
1149 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
1150 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
1151 if (!IMM_H(imm))
1152 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
1153 else {
1154 /* Sign-extended */
1155 PPC_LI32(tmp1_reg, imm);
1156 EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
1157 }
1158 goto bpf_alu32_trunc;
1159 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
1160 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
1161 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
1162 goto bpf_alu32_trunc;
1163 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
1164 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
1165 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
1166 /* Sign-extended */
1167 PPC_LI32(tmp1_reg, imm);
1168 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
1169 } else {
1170 if (IMM_L(imm))
1171 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
1172 if (IMM_H(imm))
1173 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
1174 }
1175 goto bpf_alu32_trunc;
1176 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
1177 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
1178 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
1179 goto bpf_alu32_trunc;
1180 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
1181 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
1182 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
1183 /* Sign-extended */
1184 PPC_LI32(tmp1_reg, imm);
1185 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
1186 } else {
1187 if (IMM_L(imm))
1188 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
1189 if (IMM_H(imm))
1190 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
1191 }
1192 goto bpf_alu32_trunc;
1193 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
1194 /* slw clears top 32 bits */
1195 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
1196 /* skip zero extension move, but set address map. */
1197 if (insn_is_zext(&insn[i + 1]))
1198 addrs[++i] = ctx->idx * 4;
1199 break;
1200 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
1201 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
1202 break;
1203 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
1204 /* with imm 0, we still need to clear top 32 bits */
1205 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
1206 if (insn_is_zext(&insn[i + 1]))
1207 addrs[++i] = ctx->idx * 4;
1208 break;
1209 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
1210 if (imm != 0)
1211 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
1212 break;
1213 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
1214 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
1215 if (insn_is_zext(&insn[i + 1]))
1216 addrs[++i] = ctx->idx * 4;
1217 break;
1218 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
1219 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
1220 break;
1221 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
1222 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
1223 if (insn_is_zext(&insn[i + 1]))
1224 addrs[++i] = ctx->idx * 4;
1225 break;
1226 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
1227 if (imm != 0)
1228 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
1229 break;
1230 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
1231 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
1232 goto bpf_alu32_trunc;
1233 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
1234 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
1235 break;
1236 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
1237 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
1238 goto bpf_alu32_trunc;
1239 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
1240 if (imm != 0)
1241 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
1242 break;
1243
1244 /*
1245 * MOV
1246 */
1247 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
1248 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
1249
1250 if (insn_is_mov_percpu_addr(&insn[i])) {
1251 if (IS_ENABLED(CONFIG_SMP)) {
1252 EMIT(PPC_RAW_LD(tmp1_reg, _R13, offsetof(struct paca_struct, data_offset)));
1253 EMIT(PPC_RAW_ADD(dst_reg, src_reg, tmp1_reg));
1254 } else if (src_reg != dst_reg) {
1255 EMIT(PPC_RAW_MR(dst_reg, src_reg));
1256 }
1257 break;
1258 }
1259
1260 if (insn_is_cast_user(&insn[i])) {
1261 EMIT(PPC_RAW_RLDICL_DOT(tmp1_reg, src_reg, 0, 32));
1262 PPC_LI64(dst_reg, (ctx->user_vm_start & 0xffffffff00000000UL));
1263 PPC_BCC_SHORT(COND_EQ, (ctx->idx + 2) * 4);
1264 EMIT(PPC_RAW_OR(tmp1_reg, dst_reg, tmp1_reg));
1265 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
1266 break;
1267 }
1268
1269 if (imm == 1) {
1270 /* special mov32 for zext */
1271 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
1272 break;
1273 }
1274 if (off == 0) {
1275 /* MOV */
1276 if (dst_reg != src_reg)
1277 EMIT(PPC_RAW_MR(dst_reg, src_reg));
1278 } else {
1279 /* MOVSX: dst = (s8,s16,s32)src (off = 8,16,32) */
1280 if (sign_extend(image, ctx, src_reg, dst_reg, off / 8))
1281 return -1;
1282 }
1283 goto bpf_alu32_trunc;
1284 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
1285 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
1286 PPC_LI32(dst_reg, imm);
1287 if (imm < 0)
1288 goto bpf_alu32_trunc;
1289 else if (insn_is_zext(&insn[i + 1]))
1290 addrs[++i] = ctx->idx * 4;
1291 break;
1292
1293 bpf_alu32_trunc:
1294 /* Truncate to 32-bits */
1295 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
1296 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
1297 break;
1298
1299 /*
1300 * BPF_FROM_BE/LE
1301 */
1302 case BPF_ALU | BPF_END | BPF_FROM_LE:
1303 case BPF_ALU | BPF_END | BPF_FROM_BE:
1304 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1305 #ifdef __BIG_ENDIAN__
1306 if (BPF_SRC(code) == BPF_FROM_BE)
1307 goto emit_clear;
1308 #else /* !__BIG_ENDIAN__ */
1309 if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
1310 goto emit_clear;
1311 #endif
1312 switch (imm) {
1313 case 16:
1314 /* Rotate 8 bits left & mask with 0x0000ff00 */
1315 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
1316 /* Rotate 8 bits right & insert LSB to reg */
1317 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
1318 /* Move result back to dst_reg */
1319 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
1320 break;
1321 case 32:
1322 /*
1323 * Rotate word left by 8 bits:
1324 * 2 bytes are already in their final position
1325 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
1326 */
1327 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
1328 /* Rotate 24 bits and insert byte 1 */
1329 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
1330 /* Rotate 24 bits and insert byte 3 */
1331 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
1332 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
1333 break;
1334 case 64:
1335 /* Store the value to stack and then use byte-reverse loads */
1336 EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
1337 EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
1338 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1339 EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
1340 } else {
1341 EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
1342 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
1343 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
1344 EMIT(PPC_RAW_LI(tmp2_reg, 4));
1345 EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
1346 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1347 EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
1348 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
1349 }
1350 break;
1351 }
1352 break;
1353
1354 emit_clear:
1355 switch (imm) {
1356 case 16:
1357 /* zero-extend 16 bits into 64 bits */
1358 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
1359 if (insn_is_zext(&insn[i + 1]))
1360 addrs[++i] = ctx->idx * 4;
1361 break;
1362 case 32:
1363 if (!fp->aux->verifier_zext)
1364 /* zero-extend 32 bits into 64 bits */
1365 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
1366 break;
1367 case 64:
1368 /* nop */
1369 break;
1370 }
1371 break;
1372
1373 /*
1374 * BPF_ST NOSPEC (speculation barrier)
1375 *
1376 * The following must act as a barrier against both Spectre v1
1377 * and v4 if we requested both mitigations. Therefore, also emit
1378 * 'isync; sync' on E500 or 'ori31' on BOOK3S_64 in addition to
1379 * the insns needed for a Spectre v4 barrier.
1380 *
1381 * If we requested only !bypass_spec_v1 OR only !bypass_spec_v4,
1382 * we can skip the respective other barrier type as an
1383 * optimization.
1384 */
1385 case BPF_ST | BPF_NOSPEC:
1386 sync_emitted = false;
1387 ori31_emitted = false;
1388 if (IS_ENABLED(CONFIG_PPC_E500) &&
1389 !bpf_jit_bypass_spec_v1()) {
1390 EMIT(PPC_RAW_ISYNC());
1391 EMIT(PPC_RAW_SYNC());
1392 sync_emitted = true;
1393 }
1394 if (!bpf_jit_bypass_spec_v4()) {
1395 switch (stf_barrier) {
1396 case STF_BARRIER_EIEIO:
1397 EMIT(PPC_RAW_EIEIO() | 0x02000000);
1398 break;
1399 case STF_BARRIER_SYNC_ORI:
1400 if (!sync_emitted)
1401 EMIT(PPC_RAW_SYNC());
1402 EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
1403 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
1404 ori31_emitted = true;
1405 break;
1406 case STF_BARRIER_FALLBACK:
1407 ctx->seen |= SEEN_FUNC;
1408 PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
1409 EMIT(PPC_RAW_MTCTR(_R12));
1410 EMIT(PPC_RAW_BCTRL());
1411 break;
1412 case STF_BARRIER_NONE:
1413 break;
1414 }
1415 }
1416 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1417 !bpf_jit_bypass_spec_v1() &&
1418 !ori31_emitted)
1419 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
1420 break;
1421
1422 /*
1423 * BPF_ST(X)
1424 */
1425 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
1426 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1427 if (BPF_CLASS(code) == BPF_ST) {
1428 EMIT(PPC_RAW_LI(tmp1_reg, imm));
1429 src_reg = tmp1_reg;
1430 }
1431 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
1432 break;
1433 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1434 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1435 if (BPF_CLASS(code) == BPF_ST) {
1436 EMIT(PPC_RAW_LI(tmp1_reg, imm));
1437 src_reg = tmp1_reg;
1438 }
1439 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
1440 break;
1441 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1442 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1443 if (BPF_CLASS(code) == BPF_ST) {
1444 PPC_LI32(tmp1_reg, imm);
1445 src_reg = tmp1_reg;
1446 }
1447 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
1448 break;
1449 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1450 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1451 if (BPF_CLASS(code) == BPF_ST) {
1452 PPC_LI32(tmp1_reg, imm);
1453 src_reg = tmp1_reg;
1454 }
1455 if (off % 4) {
1456 EMIT(PPC_RAW_LI(tmp2_reg, off));
1457 EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
1458 } else {
1459 EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
1460 }
1461 break;
1462
1463 case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
1464 case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
1465 case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
1466 case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
1467
1468 EMIT(PPC_RAW_ADD(tmp1_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1469
1470 ret = bpf_jit_emit_probe_mem_store(ctx, src_reg, off, code, image);
1471 if (ret)
1472 return ret;
1473
1474 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1475 ctx->idx - 1, 4, -1, code);
1476 if (ret)
1477 return ret;
1478
1479 break;
1480
1481 case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1482 case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1483 case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1484 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1485
1486 EMIT(PPC_RAW_ADD(tmp1_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1487
1488 if (BPF_SIZE(code) == BPF_W || BPF_SIZE(code) == BPF_DW) {
1489 PPC_LI32(tmp2_reg, imm);
1490 src_reg = tmp2_reg;
1491 } else {
1492 EMIT(PPC_RAW_LI(tmp2_reg, imm));
1493 src_reg = tmp2_reg;
1494 }
1495
1496 ret = bpf_jit_emit_probe_mem_store(ctx, src_reg, off, code, image);
1497 if (ret)
1498 return ret;
1499
1500 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1501 ctx->idx - 1, 4, -1, code);
1502 if (ret)
1503 return ret;
1504
1505 break;
1506
1507 /*
1508 * BPF_STX PROBE_ATOMIC (arena atomic ops)
1509 */
1510 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
1511 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
1512 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1513 ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i],
1514 &jmp_off, &tmp_idx, &addrs[i + 1]);
1515 if (ret) {
1516 if (ret == -EOPNOTSUPP) {
1517 pr_err_ratelimited(
1518 "eBPF filter atomic op code %02x (@%d) unsupported\n",
1519 code, i);
1520 }
1521 return ret;
1522 }
1523 /* LDARX/LWARX should land here on exception. */
1524 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1525 tmp_idx, jmp_off, dst_reg, code);
1526 if (ret)
1527 return ret;
1528
1529 /* Retrieve the dst_reg */
1530 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1531 break;
1532
1533 /*
1534 * BPF_STX ATOMIC (atomic ops)
1535 */
1536 case BPF_STX | BPF_ATOMIC | BPF_B:
1537 case BPF_STX | BPF_ATOMIC | BPF_H:
1538 case BPF_STX | BPF_ATOMIC | BPF_W:
1539 case BPF_STX | BPF_ATOMIC | BPF_DW:
1540 if (bpf_atomic_is_load_store(&insn[i])) {
1541 ret = emit_atomic_ld_st(insn[i], ctx, image);
1542 if (ret)
1543 return ret;
1544
1545 if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1546 addrs[++i] = ctx->idx * 4;
1547 break;
1548 } else if (size == BPF_B || size == BPF_H) {
1549 pr_err_ratelimited(
1550 "eBPF filter atomic op code %02x (@%d) unsupported\n",
1551 code, i);
1552 return -EOPNOTSUPP;
1553 }
1554
1555 ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i],
1556 &jmp_off, &tmp_idx, &addrs[i + 1]);
1557 if (ret) {
1558 if (ret == -EOPNOTSUPP) {
1559 pr_err_ratelimited(
1560 "eBPF filter atomic op code %02x (@%d) unsupported\n",
1561 code, i);
1562 }
1563 return ret;
1564 }
1565 break;
1566
1567 /*
1568 * BPF_LDX
1569 */
1570 /* dst = *(u8 *)(ul) (src + off) */
1571 case BPF_LDX | BPF_MEM | BPF_B:
1572 case BPF_LDX | BPF_MEMSX | BPF_B:
1573 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1574 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1575 /* dst = *(u16 *)(ul) (src + off) */
1576 case BPF_LDX | BPF_MEM | BPF_H:
1577 case BPF_LDX | BPF_MEMSX | BPF_H:
1578 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1579 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1580 /* dst = *(u32 *)(ul) (src + off) */
1581 case BPF_LDX | BPF_MEM | BPF_W:
1582 case BPF_LDX | BPF_MEMSX | BPF_W:
1583 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1584 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1585 /* dst = *(u64 *)(ul) (src + off) */
1586 case BPF_LDX | BPF_MEM | BPF_DW:
1587 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1588 /*
1589 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
1590 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
1591 * load only if addr is kernel address (see is_kernel_addr()), otherwise
1592 * set dst_reg=0 and move on.
1593 */
1594 if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
1595 EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
1596 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
1597 PPC_LI64(tmp2_reg, 0x8000000000000000ul);
1598 else /* BOOK3S_64 */
1599 PPC_LI64(tmp2_reg, PAGE_OFFSET);
1600 EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
1601 PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
1602 EMIT(PPC_RAW_LI(dst_reg, 0));
1603 /*
1604 * Check if 'off' is word aligned for BPF_DW, because
1605 * we might generate two instructions.
1606 */
1607 if ((BPF_SIZE(code) == BPF_DW && (off & 3)) ||
1608 (BPF_SIZE(code) == BPF_B &&
1609 BPF_MODE(code) == BPF_PROBE_MEMSX) ||
1610 (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_MEMSX))
1611 PPC_JMP((ctx->idx + 3) * 4);
1612 else
1613 PPC_JMP((ctx->idx + 2) * 4);
1614 }
1615
1616 if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
1617 switch (size) {
1618 case BPF_B:
1619 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1620 EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
1621 break;
1622 case BPF_H:
1623 EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
1624 break;
1625 case BPF_W:
1626 EMIT(PPC_RAW_LWA(dst_reg, src_reg, off));
1627 break;
1628 }
1629 } else {
1630 switch (size) {
1631 case BPF_B:
1632 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1633 break;
1634 case BPF_H:
1635 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
1636 break;
1637 case BPF_W:
1638 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1639 break;
1640 case BPF_DW:
1641 if (off % 4) {
1642 EMIT(PPC_RAW_LI(tmp1_reg, off));
1643 EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
1644 } else {
1645 EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
1646 }
1647 break;
1648 }
1649 }
1650
1651 if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1652 addrs[++i] = ctx->idx * 4;
1653
1654 if (BPF_MODE(code) == BPF_PROBE_MEM) {
1655 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1656 ctx->idx - 1, 4, dst_reg, code);
1657 if (ret)
1658 return ret;
1659 }
1660 break;
1661
1662 /* dst = *(u64 *)(ul) (src + ARENA_VM_START + off) */
1663 case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
1664 case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
1665 case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
1666 case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
1667
1668 EMIT(PPC_RAW_ADD(tmp1_reg, src_reg, bpf_to_ppc(ARENA_VM_START)));
1669
1670 switch (size) {
1671 case BPF_B:
1672 EMIT(PPC_RAW_LBZ(dst_reg, tmp1_reg, off));
1673 break;
1674 case BPF_H:
1675 EMIT(PPC_RAW_LHZ(dst_reg, tmp1_reg, off));
1676 break;
1677 case BPF_W:
1678 EMIT(PPC_RAW_LWZ(dst_reg, tmp1_reg, off));
1679 break;
1680 case BPF_DW:
1681 if (off % 4) {
1682 EMIT(PPC_RAW_LI(tmp2_reg, off));
1683 EMIT(PPC_RAW_LDX(dst_reg, tmp1_reg, tmp2_reg));
1684 } else {
1685 EMIT(PPC_RAW_LD(dst_reg, tmp1_reg, off));
1686 }
1687 break;
1688 }
1689
1690 if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1691 addrs[++i] = ctx->idx * 4;
1692
1693 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1694 ctx->idx - 1, 4, dst_reg, code);
1695 if (ret)
1696 return ret;
1697 break;
1698
1699 /*
1700 * Doubleword load
1701 * 16 byte instruction that uses two 'struct bpf_insn'
1702 */
1703 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1704 imm64 = ((u64)(u32) insn[i].imm) |
1705 (((u64)(u32) insn[i+1].imm) << 32);
1706 PPC_LI64(dst_reg, imm64);
1707 /* Adjust for two bpf instructions */
1708 addrs[++i] = ctx->idx * 4;
1709 break;
1710
1711 /*
1712 * JUMP reg
1713 */
1714 case BPF_JMP | BPF_JA | BPF_X:
1715 EMIT(PPC_RAW_MTCTR(dst_reg));
1716 EMIT(PPC_RAW_BCTR());
1717 break;
1718
1719 /*
1720 * Return/Exit
1721 */
1722 case BPF_JMP | BPF_EXIT:
1723 /*
1724 * If this isn't the very last instruction, branch to
1725 * the epilogue. If we _are_ the last instruction,
1726 * we'll just fall through to the epilogue.
1727 */
1728 if (i != flen - 1) {
1729 ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
1730 if (ret)
1731 return ret;
1732 }
1733 /* else fall through to the epilogue */
1734 break;
1735
1736 /*
1737 * Call kernel helper or bpf function
1738 */
1739 case BPF_JMP | BPF_CALL:
1740 ctx->seen |= SEEN_FUNC;
1741
1742 if (src_reg == bpf_to_ppc(BPF_REG_0)) {
1743 if (imm == BPF_FUNC_get_smp_processor_id) {
1744 EMIT(PPC_RAW_LHZ(src_reg, _R13, offsetof(struct paca_struct, paca_index)));
1745 break;
1746 } else if (imm == BPF_FUNC_get_current_task ||
1747 imm == BPF_FUNC_get_current_task_btf) {
1748 EMIT(PPC_RAW_LD(src_reg, _R13, offsetof(struct paca_struct, __current)));
1749 break;
1750 }
1751 }
1752
1753 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1754 &func_addr, &func_addr_fixed);
1755 if (ret < 0)
1756 return ret;
1757
1758 /* Take care of powerpc ABI requirements before kfunc call */
1759 if (insn[i].src_reg == BPF_PSEUDO_KFUNC_CALL) {
1760 if (prepare_for_kfunc_call(fp, image, ctx, &insn[i]))
1761 return -1;
1762 }
1763
1764 ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1765 if (ret)
1766 return ret;
1767
1768 /* move return value from r3 to BPF_REG_0 */
1769 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1770 break;
1771
1772 /*
1773 * Jumps and branches
1774 */
1775 case BPF_JMP | BPF_JA:
1776 PPC_JMP(addrs[i + 1 + off]);
1777 break;
1778 case BPF_JMP32 | BPF_JA:
1779 PPC_JMP(addrs[i + 1 + imm]);
1780 break;
1781
1782 case BPF_JMP | BPF_JGT | BPF_K:
1783 case BPF_JMP | BPF_JGT | BPF_X:
1784 case BPF_JMP | BPF_JSGT | BPF_K:
1785 case BPF_JMP | BPF_JSGT | BPF_X:
1786 case BPF_JMP32 | BPF_JGT | BPF_K:
1787 case BPF_JMP32 | BPF_JGT | BPF_X:
1788 case BPF_JMP32 | BPF_JSGT | BPF_K:
1789 case BPF_JMP32 | BPF_JSGT | BPF_X:
1790 true_cond = COND_GT;
1791 goto cond_branch;
1792 case BPF_JMP | BPF_JLT | BPF_K:
1793 case BPF_JMP | BPF_JLT | BPF_X:
1794 case BPF_JMP | BPF_JSLT | BPF_K:
1795 case BPF_JMP | BPF_JSLT | BPF_X:
1796 case BPF_JMP32 | BPF_JLT | BPF_K:
1797 case BPF_JMP32 | BPF_JLT | BPF_X:
1798 case BPF_JMP32 | BPF_JSLT | BPF_K:
1799 case BPF_JMP32 | BPF_JSLT | BPF_X:
1800 true_cond = COND_LT;
1801 goto cond_branch;
1802 case BPF_JMP | BPF_JGE | BPF_K:
1803 case BPF_JMP | BPF_JGE | BPF_X:
1804 case BPF_JMP | BPF_JSGE | BPF_K:
1805 case BPF_JMP | BPF_JSGE | BPF_X:
1806 case BPF_JMP32 | BPF_JGE | BPF_K:
1807 case BPF_JMP32 | BPF_JGE | BPF_X:
1808 case BPF_JMP32 | BPF_JSGE | BPF_K:
1809 case BPF_JMP32 | BPF_JSGE | BPF_X:
1810 true_cond = COND_GE;
1811 goto cond_branch;
1812 case BPF_JMP | BPF_JLE | BPF_K:
1813 case BPF_JMP | BPF_JLE | BPF_X:
1814 case BPF_JMP | BPF_JSLE | BPF_K:
1815 case BPF_JMP | BPF_JSLE | BPF_X:
1816 case BPF_JMP32 | BPF_JLE | BPF_K:
1817 case BPF_JMP32 | BPF_JLE | BPF_X:
1818 case BPF_JMP32 | BPF_JSLE | BPF_K:
1819 case BPF_JMP32 | BPF_JSLE | BPF_X:
1820 true_cond = COND_LE;
1821 goto cond_branch;
1822 case BPF_JMP | BPF_JEQ | BPF_K:
1823 case BPF_JMP | BPF_JEQ | BPF_X:
1824 case BPF_JMP32 | BPF_JEQ | BPF_K:
1825 case BPF_JMP32 | BPF_JEQ | BPF_X:
1826 true_cond = COND_EQ;
1827 goto cond_branch;
1828 case BPF_JMP | BPF_JNE | BPF_K:
1829 case BPF_JMP | BPF_JNE | BPF_X:
1830 case BPF_JMP32 | BPF_JNE | BPF_K:
1831 case BPF_JMP32 | BPF_JNE | BPF_X:
1832 true_cond = COND_NE;
1833 goto cond_branch;
1834 case BPF_JMP | BPF_JSET | BPF_K:
1835 case BPF_JMP | BPF_JSET | BPF_X:
1836 case BPF_JMP32 | BPF_JSET | BPF_K:
1837 case BPF_JMP32 | BPF_JSET | BPF_X:
1838 true_cond = COND_NE;
1839 /* Fall through */
1840
1841 cond_branch:
1842 switch (code) {
1843 case BPF_JMP | BPF_JGT | BPF_X:
1844 case BPF_JMP | BPF_JLT | BPF_X:
1845 case BPF_JMP | BPF_JGE | BPF_X:
1846 case BPF_JMP | BPF_JLE | BPF_X:
1847 case BPF_JMP | BPF_JEQ | BPF_X:
1848 case BPF_JMP | BPF_JNE | BPF_X:
1849 case BPF_JMP32 | BPF_JGT | BPF_X:
1850 case BPF_JMP32 | BPF_JLT | BPF_X:
1851 case BPF_JMP32 | BPF_JGE | BPF_X:
1852 case BPF_JMP32 | BPF_JLE | BPF_X:
1853 case BPF_JMP32 | BPF_JEQ | BPF_X:
1854 case BPF_JMP32 | BPF_JNE | BPF_X:
1855 /* unsigned comparison */
1856 if (BPF_CLASS(code) == BPF_JMP32)
1857 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1858 else
1859 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1860 break;
1861 case BPF_JMP | BPF_JSGT | BPF_X:
1862 case BPF_JMP | BPF_JSLT | BPF_X:
1863 case BPF_JMP | BPF_JSGE | BPF_X:
1864 case BPF_JMP | BPF_JSLE | BPF_X:
1865 case BPF_JMP32 | BPF_JSGT | BPF_X:
1866 case BPF_JMP32 | BPF_JSLT | BPF_X:
1867 case BPF_JMP32 | BPF_JSGE | BPF_X:
1868 case BPF_JMP32 | BPF_JSLE | BPF_X:
1869 /* signed comparison */
1870 if (BPF_CLASS(code) == BPF_JMP32)
1871 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1872 else
1873 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1874 break;
1875 case BPF_JMP | BPF_JSET | BPF_X:
1876 case BPF_JMP32 | BPF_JSET | BPF_X:
1877 if (BPF_CLASS(code) == BPF_JMP) {
1878 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1879 } else {
1880 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1881 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1882 }
1883 break;
1884 case BPF_JMP | BPF_JNE | BPF_K:
1885 case BPF_JMP | BPF_JEQ | BPF_K:
1886 case BPF_JMP | BPF_JGT | BPF_K:
1887 case BPF_JMP | BPF_JLT | BPF_K:
1888 case BPF_JMP | BPF_JGE | BPF_K:
1889 case BPF_JMP | BPF_JLE | BPF_K:
1890 case BPF_JMP32 | BPF_JNE | BPF_K:
1891 case BPF_JMP32 | BPF_JEQ | BPF_K:
1892 case BPF_JMP32 | BPF_JGT | BPF_K:
1893 case BPF_JMP32 | BPF_JLT | BPF_K:
1894 case BPF_JMP32 | BPF_JGE | BPF_K:
1895 case BPF_JMP32 | BPF_JLE | BPF_K:
1896 {
1897 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1898
1899 /*
1900 * Need sign-extended load, so only positive
1901 * values can be used as imm in cmpldi
1902 */
1903 if (imm >= 0 && imm < 32768) {
1904 if (is_jmp32)
1905 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1906 else
1907 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1908 } else {
1909 /* sign-extending load */
1910 PPC_LI32(tmp1_reg, imm);
1911 /* ... but unsigned comparison */
1912 if (is_jmp32)
1913 EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1914 else
1915 EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1916 }
1917 break;
1918 }
1919 case BPF_JMP | BPF_JSGT | BPF_K:
1920 case BPF_JMP | BPF_JSLT | BPF_K:
1921 case BPF_JMP | BPF_JSGE | BPF_K:
1922 case BPF_JMP | BPF_JSLE | BPF_K:
1923 case BPF_JMP32 | BPF_JSGT | BPF_K:
1924 case BPF_JMP32 | BPF_JSLT | BPF_K:
1925 case BPF_JMP32 | BPF_JSGE | BPF_K:
1926 case BPF_JMP32 | BPF_JSLE | BPF_K:
1927 {
1928 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1929
1930 /*
1931 * signed comparison, so any 16-bit value
1932 * can be used in cmpdi
1933 */
1934 if (imm >= -32768 && imm < 32768) {
1935 if (is_jmp32)
1936 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1937 else
1938 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1939 } else {
1940 PPC_LI32(tmp1_reg, imm);
1941 if (is_jmp32)
1942 EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1943 else
1944 EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1945 }
1946 break;
1947 }
1948 case BPF_JMP | BPF_JSET | BPF_K:
1949 case BPF_JMP32 | BPF_JSET | BPF_K:
1950 /* andi does not sign-extend the immediate */
1951 if (imm >= 0 && imm < 32768)
1952 /* PPC_ANDI is _only/always_ dot-form */
1953 EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1954 else {
1955 PPC_LI32(tmp1_reg, imm);
1956 if (BPF_CLASS(code) == BPF_JMP) {
1957 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1958 tmp1_reg));
1959 } else {
1960 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1961 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1962 0, 0, 31));
1963 }
1964 }
1965 break;
1966 }
1967 PPC_BCC(true_cond, addrs[i + 1 + off]);
1968 break;
1969
1970 /*
1971 * Tail call
1972 */
1973 case BPF_JMP | BPF_TAIL_CALL:
1974 ctx->seen |= SEEN_TAILCALL;
1975 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1976 if (ret < 0)
1977 return ret;
1978 break;
1979
1980 default:
1981 /*
1982 * The filter contains something cruel & unusual.
1983 * We don't handle it, but also there shouldn't be
1984 * anything missing from our list.
1985 */
1986 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1987 code, i);
1988 return -ENOTSUPP;
1989 }
1990 }
1991
1992 /* Set end-of-body-code address for exit. */
1993 addrs[i] = ctx->idx * 4;
1994
1995 return 0;
1996 }
1997