1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * bpf_jit_comp64.c: eBPF JIT compiler
4 *
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6 * IBM Corporation
7 *
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
19
20 #include "bpf_jit.h"
21
22 /*
23 * Stack layout:
24 * Ensure the top half (upto local_tmp_var) stays consistent
25 * with our redzone usage.
26 *
27 * [ prev sp ] <-------------
28 * [ nv gpr save area ] 5*8 |
29 * [ tail_call_cnt ] 8 |
30 * [ local_tmp_var ] 16 |
31 * fp (r31) --> [ ebpf stack space ] upto 512 |
32 * [ frame header ] 32/112 |
33 * sp (r1) ---> [ stack pointer ] --------------
34 */
35
36 /* for gpr non volatile registers BPG_REG_6 to 10 */
37 #define BPF_PPC_STACK_SAVE (5*8)
38 /* for bpf JIT code internal usage */
39 #define BPF_PPC_STACK_LOCALS 24
40 /* stack frame excluding BPF stack, ensure this is quadword aligned */
41 #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
42 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43
44 /* BPF register usage */
45 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
46 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
47
48 /* BPF to ppc register mappings */
bpf_jit_init_reg_mapping(struct codegen_context * ctx)49 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
50 {
51 /* function return value */
52 ctx->b2p[BPF_REG_0] = _R8;
53 /* function arguments */
54 ctx->b2p[BPF_REG_1] = _R3;
55 ctx->b2p[BPF_REG_2] = _R4;
56 ctx->b2p[BPF_REG_3] = _R5;
57 ctx->b2p[BPF_REG_4] = _R6;
58 ctx->b2p[BPF_REG_5] = _R7;
59 /* non volatile registers */
60 ctx->b2p[BPF_REG_6] = _R27;
61 ctx->b2p[BPF_REG_7] = _R28;
62 ctx->b2p[BPF_REG_8] = _R29;
63 ctx->b2p[BPF_REG_9] = _R30;
64 /* frame pointer aka BPF_REG_10 */
65 ctx->b2p[BPF_REG_FP] = _R31;
66 /* eBPF jit internal registers */
67 ctx->b2p[BPF_REG_AX] = _R12;
68 ctx->b2p[TMP_REG_1] = _R9;
69 ctx->b2p[TMP_REG_2] = _R10;
70 }
71
72 /* PPC NVR range -- update this if we ever use NVRs below r27 */
73 #define BPF_PPC_NVR_MIN _R27
74
bpf_has_stack_frame(struct codegen_context * ctx)75 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
76 {
77 /*
78 * We only need a stack frame if:
79 * - we call other functions (kernel helpers), or
80 * - the bpf program uses its stack area
81 * The latter condition is deduced from the usage of BPF_REG_FP
82 */
83 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
84 }
85
86 /*
87 * When not setting up our own stackframe, the redzone usage is:
88 *
89 * [ prev sp ] <-------------
90 * [ ... ] |
91 * sp (r1) ---> [ stack pointer ] --------------
92 * [ nv gpr save area ] 5*8
93 * [ tail_call_cnt ] 8
94 * [ local_tmp_var ] 16
95 * [ unused red zone ] 208 bytes protected
96 */
bpf_jit_stack_local(struct codegen_context * ctx)97 static int bpf_jit_stack_local(struct codegen_context *ctx)
98 {
99 if (bpf_has_stack_frame(ctx))
100 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
101 else
102 return -(BPF_PPC_STACK_SAVE + 24);
103 }
104
bpf_jit_stack_tailcallcnt(struct codegen_context * ctx)105 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
106 {
107 return bpf_jit_stack_local(ctx) + 16;
108 }
109
bpf_jit_stack_offsetof(struct codegen_context * ctx,int reg)110 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
111 {
112 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113 return (bpf_has_stack_frame(ctx) ?
114 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
115 - (8 * (32 - reg));
116
117 pr_err("BPF JIT is asking about unknown registers");
118 BUG();
119 }
120
bpf_jit_realloc_regs(struct codegen_context * ctx)121 void bpf_jit_realloc_regs(struct codegen_context *ctx)
122 {
123 }
124
bpf_jit_build_prologue(u32 * image,struct codegen_context * ctx)125 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
126 {
127 int i;
128
129 #ifndef CONFIG_PPC_KERNEL_PCREL
130 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
131 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
132 #endif
133
134 /*
135 * Initialize tail_call_cnt if we do tail calls.
136 * Otherwise, put in NOPs so that it can be skipped when we are
137 * invoked through a tail call.
138 */
139 if (ctx->seen & SEEN_TAILCALL) {
140 EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
141 /* this goes in the redzone */
142 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
143 } else {
144 EMIT(PPC_RAW_NOP());
145 EMIT(PPC_RAW_NOP());
146 }
147
148 if (bpf_has_stack_frame(ctx)) {
149 /*
150 * We need a stack frame, but we don't necessarily need to
151 * save/restore LR unless we call other functions
152 */
153 if (ctx->seen & SEEN_FUNC) {
154 EMIT(PPC_RAW_MFLR(_R0));
155 EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
156 }
157
158 EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
159 }
160
161 /*
162 * Back up non-volatile regs -- BPF registers 6-10
163 * If we haven't created our own stack frame, we save these
164 * in the protected zone below the previous stack frame
165 */
166 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
167 if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
168 EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
169
170 /* Setup frame pointer to point to the bpf stack area */
171 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
172 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
173 STACK_FRAME_MIN_SIZE + ctx->stack_size));
174 }
175
bpf_jit_emit_common_epilogue(u32 * image,struct codegen_context * ctx)176 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
177 {
178 int i;
179
180 /* Restore NVRs */
181 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
182 if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
183 EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
184
185 /* Tear down our stack frame */
186 if (bpf_has_stack_frame(ctx)) {
187 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
188 if (ctx->seen & SEEN_FUNC) {
189 EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
190 EMIT(PPC_RAW_MTLR(_R0));
191 }
192 }
193 }
194
bpf_jit_build_epilogue(u32 * image,struct codegen_context * ctx)195 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
196 {
197 bpf_jit_emit_common_epilogue(image, ctx);
198
199 /* Move result to r3 */
200 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
201
202 EMIT(PPC_RAW_BLR());
203 }
204
205 static int
bpf_jit_emit_func_call_hlp(u32 * image,u32 * fimage,struct codegen_context * ctx,u64 func)206 bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
207 {
208 unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
209 long reladdr;
210
211 if (WARN_ON_ONCE(!kernel_text_address(func_addr)))
212 return -EINVAL;
213
214 #ifdef CONFIG_PPC_KERNEL_PCREL
215 reladdr = func_addr - local_paca->kernelbase;
216
217 if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
218 EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
219 /* Align for subsequent prefix instruction */
220 if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
221 EMIT(PPC_RAW_NOP());
222 /* paddi r12,r12,addr */
223 EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
224 EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
225 } else {
226 unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
227 bool alignment_needed = !IS_ALIGNED(pc, 8);
228
229 reladdr = func_addr - (alignment_needed ? pc + 4 : pc);
230
231 if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
232 if (alignment_needed)
233 EMIT(PPC_RAW_NOP());
234 /* pla r12,addr */
235 EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
236 EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
237 } else {
238 /* We can clobber r12 */
239 PPC_LI64(_R12, func);
240 }
241 }
242 EMIT(PPC_RAW_MTCTR(_R12));
243 EMIT(PPC_RAW_BCTRL());
244 #else
245 if (core_kernel_text(func_addr)) {
246 reladdr = func_addr - kernel_toc_addr();
247 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
248 pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
249 return -ERANGE;
250 }
251
252 EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
253 EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
254 EMIT(PPC_RAW_MTCTR(_R12));
255 EMIT(PPC_RAW_BCTRL());
256 } else {
257 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
258 /* func points to the function descriptor */
259 PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
260 /* Load actual entry point from function descriptor */
261 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
262 /* ... and move it to CTR */
263 EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
264 /*
265 * Load TOC from function descriptor at offset 8.
266 * We can clobber r2 since we get called through a
267 * function pointer (so caller will save/restore r2).
268 */
269 EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
270 } else {
271 PPC_LI64(_R12, func);
272 EMIT(PPC_RAW_MTCTR(_R12));
273 }
274 EMIT(PPC_RAW_BCTRL());
275 /*
276 * Load r2 with kernel TOC as kernel TOC is used if function address falls
277 * within core kernel text.
278 */
279 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
280 }
281 #endif
282
283 return 0;
284 }
285
bpf_jit_emit_func_call_rel(u32 * image,u32 * fimage,struct codegen_context * ctx,u64 func)286 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
287 {
288 unsigned int i, ctx_idx = ctx->idx;
289
290 if (WARN_ON_ONCE(func && is_module_text_address(func)))
291 return -EINVAL;
292
293 /* skip past descriptor if elf v1 */
294 func += FUNCTION_DESCR_SIZE;
295
296 /* Load function address into r12 */
297 PPC_LI64(_R12, func);
298
299 /* For bpf-to-bpf function calls, the callee's address is unknown
300 * until the last extra pass. As seen above, we use PPC_LI64() to
301 * load the callee's address, but this may optimize the number of
302 * instructions required based on the nature of the address.
303 *
304 * Since we don't want the number of instructions emitted to increase,
305 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
306 * we always have a five-instruction sequence, which is the maximum
307 * that PPC_LI64() can emit.
308 */
309 if (!image)
310 for (i = ctx->idx - ctx_idx; i < 5; i++)
311 EMIT(PPC_RAW_NOP());
312
313 EMIT(PPC_RAW_MTCTR(_R12));
314 EMIT(PPC_RAW_BCTRL());
315
316 return 0;
317 }
318
bpf_jit_emit_tail_call(u32 * image,struct codegen_context * ctx,u32 out)319 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
320 {
321 /*
322 * By now, the eBPF program has already setup parameters in r3, r4 and r5
323 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
324 * r4/BPF_REG_2 - pointer to bpf_array
325 * r5/BPF_REG_3 - index in bpf_array
326 */
327 int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
328 int b2p_index = bpf_to_ppc(BPF_REG_3);
329 int bpf_tailcall_prologue_size = 8;
330
331 if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
332 bpf_tailcall_prologue_size += 4; /* skip past the toc load */
333
334 /*
335 * if (index >= array->map.max_entries)
336 * goto out;
337 */
338 EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
339 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
340 EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
341 PPC_BCC_SHORT(COND_GE, out);
342
343 /*
344 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
345 * goto out;
346 */
347 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
348 EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
349 PPC_BCC_SHORT(COND_GE, out);
350
351 /*
352 * tail_call_cnt++;
353 */
354 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
355 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
356
357 /* prog = array->ptrs[index]; */
358 EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
359 EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
360 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
361
362 /*
363 * if (prog == NULL)
364 * goto out;
365 */
366 EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
367 PPC_BCC_SHORT(COND_EQ, out);
368
369 /* goto *(prog->bpf_func + prologue_size); */
370 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
371 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
372 FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
373 EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
374
375 /* tear down stack, restore NVRs, ... */
376 bpf_jit_emit_common_epilogue(image, ctx);
377
378 EMIT(PPC_RAW_BCTR());
379
380 /* out: */
381 return 0;
382 }
383
384 /*
385 * We spill into the redzone always, even if the bpf program has its own stackframe.
386 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
387 */
388 void bpf_stf_barrier(void);
389
390 asm (
391 " .global bpf_stf_barrier ;"
392 " bpf_stf_barrier: ;"
393 " std 21,-64(1) ;"
394 " std 22,-56(1) ;"
395 " sync ;"
396 " ld 21,-64(1) ;"
397 " ld 22,-56(1) ;"
398 " ori 31,31,0 ;"
399 " .rept 14 ;"
400 " b 1f ;"
401 " 1: ;"
402 " .endr ;"
403 " blr ;"
404 );
405
406 /* Assemble the body code between the prologue & epilogue */
bpf_jit_build_body(struct bpf_prog * fp,u32 * image,u32 * fimage,struct codegen_context * ctx,u32 * addrs,int pass,bool extra_pass)407 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
408 u32 *addrs, int pass, bool extra_pass)
409 {
410 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
411 const struct bpf_insn *insn = fp->insnsi;
412 int flen = fp->len;
413 int i, ret;
414
415 /* Start of epilogue code - will only be valid 2nd pass onwards */
416 u32 exit_addr = addrs[flen];
417
418 for (i = 0; i < flen; i++) {
419 u32 code = insn[i].code;
420 u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
421 u32 src_reg = bpf_to_ppc(insn[i].src_reg);
422 u32 size = BPF_SIZE(code);
423 u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
424 u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
425 u32 save_reg, ret_reg;
426 s16 off = insn[i].off;
427 s32 imm = insn[i].imm;
428 bool func_addr_fixed;
429 u64 func_addr;
430 u64 imm64;
431 u32 true_cond;
432 u32 tmp_idx;
433 int j;
434
435 /*
436 * addrs[] maps a BPF bytecode address into a real offset from
437 * the start of the body code.
438 */
439 addrs[i] = ctx->idx * 4;
440
441 /*
442 * As an optimization, we note down which non-volatile registers
443 * are used so that we can only save/restore those in our
444 * prologue and epilogue. We do this here regardless of whether
445 * the actual BPF instruction uses src/dst registers or not
446 * (for instance, BPF_CALL does not use them). The expectation
447 * is that those instructions will have src_reg/dst_reg set to
448 * 0. Even otherwise, we just lose some prologue/epilogue
449 * optimization but everything else should work without
450 * any issues.
451 */
452 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
453 bpf_set_seen_register(ctx, dst_reg);
454 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
455 bpf_set_seen_register(ctx, src_reg);
456
457 switch (code) {
458 /*
459 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
460 */
461 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
462 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
463 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
464 goto bpf_alu32_trunc;
465 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
466 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
467 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
468 goto bpf_alu32_trunc;
469 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
470 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
471 if (!imm) {
472 goto bpf_alu32_trunc;
473 } else if (imm >= -32768 && imm < 32768) {
474 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
475 } else {
476 PPC_LI32(tmp1_reg, imm);
477 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
478 }
479 goto bpf_alu32_trunc;
480 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
481 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
482 if (!imm) {
483 goto bpf_alu32_trunc;
484 } else if (imm > -32768 && imm <= 32768) {
485 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
486 } else {
487 PPC_LI32(tmp1_reg, imm);
488 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
489 }
490 goto bpf_alu32_trunc;
491 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
492 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
493 if (BPF_CLASS(code) == BPF_ALU)
494 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
495 else
496 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
497 goto bpf_alu32_trunc;
498 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
499 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
500 if (imm >= -32768 && imm < 32768)
501 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
502 else {
503 PPC_LI32(tmp1_reg, imm);
504 if (BPF_CLASS(code) == BPF_ALU)
505 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
506 else
507 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
508 }
509 goto bpf_alu32_trunc;
510 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
511 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
512 if (BPF_OP(code) == BPF_MOD) {
513 if (off)
514 EMIT(PPC_RAW_DIVW(tmp1_reg, dst_reg, src_reg));
515 else
516 EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
517
518 EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
519 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
520 } else
521 if (off)
522 EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, src_reg));
523 else
524 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
525 goto bpf_alu32_trunc;
526 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
527 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
528 if (BPF_OP(code) == BPF_MOD) {
529 if (off)
530 EMIT(PPC_RAW_DIVD(tmp1_reg, dst_reg, src_reg));
531 else
532 EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
533 EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
534 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
535 } else
536 if (off)
537 EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, src_reg));
538 else
539 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
540 break;
541 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
542 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
543 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
544 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
545 if (imm == 0)
546 return -EINVAL;
547 if (imm == 1) {
548 if (BPF_OP(code) == BPF_DIV) {
549 goto bpf_alu32_trunc;
550 } else {
551 EMIT(PPC_RAW_LI(dst_reg, 0));
552 break;
553 }
554 }
555
556 PPC_LI32(tmp1_reg, imm);
557 switch (BPF_CLASS(code)) {
558 case BPF_ALU:
559 if (BPF_OP(code) == BPF_MOD) {
560 if (off)
561 EMIT(PPC_RAW_DIVW(tmp2_reg, dst_reg, tmp1_reg));
562 else
563 EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
564 EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
565 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
566 } else
567 if (off)
568 EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, tmp1_reg));
569 else
570 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
571 break;
572 case BPF_ALU64:
573 if (BPF_OP(code) == BPF_MOD) {
574 if (off)
575 EMIT(PPC_RAW_DIVD(tmp2_reg, dst_reg, tmp1_reg));
576 else
577 EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
578 EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
579 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
580 } else
581 if (off)
582 EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, tmp1_reg));
583 else
584 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
585 break;
586 }
587 goto bpf_alu32_trunc;
588 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
589 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
590 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
591 goto bpf_alu32_trunc;
592
593 /*
594 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
595 */
596 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
597 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
598 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
599 goto bpf_alu32_trunc;
600 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
601 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
602 if (!IMM_H(imm))
603 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
604 else {
605 /* Sign-extended */
606 PPC_LI32(tmp1_reg, imm);
607 EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
608 }
609 goto bpf_alu32_trunc;
610 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
611 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
612 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
613 goto bpf_alu32_trunc;
614 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
615 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
616 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
617 /* Sign-extended */
618 PPC_LI32(tmp1_reg, imm);
619 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
620 } else {
621 if (IMM_L(imm))
622 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
623 if (IMM_H(imm))
624 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
625 }
626 goto bpf_alu32_trunc;
627 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
628 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
629 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
630 goto bpf_alu32_trunc;
631 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
632 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
633 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
634 /* Sign-extended */
635 PPC_LI32(tmp1_reg, imm);
636 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
637 } else {
638 if (IMM_L(imm))
639 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
640 if (IMM_H(imm))
641 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
642 }
643 goto bpf_alu32_trunc;
644 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
645 /* slw clears top 32 bits */
646 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
647 /* skip zero extension move, but set address map. */
648 if (insn_is_zext(&insn[i + 1]))
649 addrs[++i] = ctx->idx * 4;
650 break;
651 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
652 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
653 break;
654 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
655 /* with imm 0, we still need to clear top 32 bits */
656 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
657 if (insn_is_zext(&insn[i + 1]))
658 addrs[++i] = ctx->idx * 4;
659 break;
660 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
661 if (imm != 0)
662 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
663 break;
664 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
665 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
666 if (insn_is_zext(&insn[i + 1]))
667 addrs[++i] = ctx->idx * 4;
668 break;
669 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
670 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
671 break;
672 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
673 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
674 if (insn_is_zext(&insn[i + 1]))
675 addrs[++i] = ctx->idx * 4;
676 break;
677 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
678 if (imm != 0)
679 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
680 break;
681 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
682 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
683 goto bpf_alu32_trunc;
684 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
685 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
686 break;
687 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
688 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
689 goto bpf_alu32_trunc;
690 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
691 if (imm != 0)
692 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
693 break;
694
695 /*
696 * MOV
697 */
698 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
699 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
700 if (imm == 1) {
701 /* special mov32 for zext */
702 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
703 break;
704 } else if (off == 8) {
705 EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
706 } else if (off == 16) {
707 EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
708 } else if (off == 32) {
709 EMIT(PPC_RAW_EXTSW(dst_reg, src_reg));
710 } else if (dst_reg != src_reg)
711 EMIT(PPC_RAW_MR(dst_reg, src_reg));
712 goto bpf_alu32_trunc;
713 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
714 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
715 PPC_LI32(dst_reg, imm);
716 if (imm < 0)
717 goto bpf_alu32_trunc;
718 else if (insn_is_zext(&insn[i + 1]))
719 addrs[++i] = ctx->idx * 4;
720 break;
721
722 bpf_alu32_trunc:
723 /* Truncate to 32-bits */
724 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
725 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
726 break;
727
728 /*
729 * BPF_FROM_BE/LE
730 */
731 case BPF_ALU | BPF_END | BPF_FROM_LE:
732 case BPF_ALU | BPF_END | BPF_FROM_BE:
733 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
734 #ifdef __BIG_ENDIAN__
735 if (BPF_SRC(code) == BPF_FROM_BE)
736 goto emit_clear;
737 #else /* !__BIG_ENDIAN__ */
738 if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
739 goto emit_clear;
740 #endif
741 switch (imm) {
742 case 16:
743 /* Rotate 8 bits left & mask with 0x0000ff00 */
744 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
745 /* Rotate 8 bits right & insert LSB to reg */
746 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
747 /* Move result back to dst_reg */
748 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
749 break;
750 case 32:
751 /*
752 * Rotate word left by 8 bits:
753 * 2 bytes are already in their final position
754 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
755 */
756 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
757 /* Rotate 24 bits and insert byte 1 */
758 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
759 /* Rotate 24 bits and insert byte 3 */
760 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
761 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
762 break;
763 case 64:
764 /* Store the value to stack and then use byte-reverse loads */
765 EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
766 EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
767 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
768 EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
769 } else {
770 EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
771 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
772 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
773 EMIT(PPC_RAW_LI(tmp2_reg, 4));
774 EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
775 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
776 EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
777 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
778 }
779 break;
780 }
781 break;
782
783 emit_clear:
784 switch (imm) {
785 case 16:
786 /* zero-extend 16 bits into 64 bits */
787 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
788 if (insn_is_zext(&insn[i + 1]))
789 addrs[++i] = ctx->idx * 4;
790 break;
791 case 32:
792 if (!fp->aux->verifier_zext)
793 /* zero-extend 32 bits into 64 bits */
794 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
795 break;
796 case 64:
797 /* nop */
798 break;
799 }
800 break;
801
802 /*
803 * BPF_ST NOSPEC (speculation barrier)
804 */
805 case BPF_ST | BPF_NOSPEC:
806 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
807 !security_ftr_enabled(SEC_FTR_STF_BARRIER))
808 break;
809
810 switch (stf_barrier) {
811 case STF_BARRIER_EIEIO:
812 EMIT(PPC_RAW_EIEIO() | 0x02000000);
813 break;
814 case STF_BARRIER_SYNC_ORI:
815 EMIT(PPC_RAW_SYNC());
816 EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
817 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
818 break;
819 case STF_BARRIER_FALLBACK:
820 ctx->seen |= SEEN_FUNC;
821 PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
822 EMIT(PPC_RAW_MTCTR(_R12));
823 EMIT(PPC_RAW_BCTRL());
824 break;
825 case STF_BARRIER_NONE:
826 break;
827 }
828 break;
829
830 /*
831 * BPF_ST(X)
832 */
833 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
834 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
835 if (BPF_CLASS(code) == BPF_ST) {
836 EMIT(PPC_RAW_LI(tmp1_reg, imm));
837 src_reg = tmp1_reg;
838 }
839 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
840 break;
841 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
842 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
843 if (BPF_CLASS(code) == BPF_ST) {
844 EMIT(PPC_RAW_LI(tmp1_reg, imm));
845 src_reg = tmp1_reg;
846 }
847 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
848 break;
849 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
850 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
851 if (BPF_CLASS(code) == BPF_ST) {
852 PPC_LI32(tmp1_reg, imm);
853 src_reg = tmp1_reg;
854 }
855 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
856 break;
857 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
858 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
859 if (BPF_CLASS(code) == BPF_ST) {
860 PPC_LI32(tmp1_reg, imm);
861 src_reg = tmp1_reg;
862 }
863 if (off % 4) {
864 EMIT(PPC_RAW_LI(tmp2_reg, off));
865 EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
866 } else {
867 EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
868 }
869 break;
870
871 /*
872 * BPF_STX ATOMIC (atomic ops)
873 */
874 case BPF_STX | BPF_ATOMIC | BPF_W:
875 case BPF_STX | BPF_ATOMIC | BPF_DW:
876 save_reg = tmp2_reg;
877 ret_reg = src_reg;
878
879 /* Get offset into TMP_REG_1 */
880 EMIT(PPC_RAW_LI(tmp1_reg, off));
881 /*
882 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
883 * before and after the operation.
884 *
885 * This is a requirement in the Linux Kernel Memory Model.
886 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
887 */
888 if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
889 EMIT(PPC_RAW_SYNC());
890 tmp_idx = ctx->idx * 4;
891 /* load value from memory into TMP_REG_2 */
892 if (size == BPF_DW)
893 EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
894 else
895 EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
896
897 /* Save old value in _R0 */
898 if (imm & BPF_FETCH)
899 EMIT(PPC_RAW_MR(_R0, tmp2_reg));
900
901 switch (imm) {
902 case BPF_ADD:
903 case BPF_ADD | BPF_FETCH:
904 EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
905 break;
906 case BPF_AND:
907 case BPF_AND | BPF_FETCH:
908 EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
909 break;
910 case BPF_OR:
911 case BPF_OR | BPF_FETCH:
912 EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
913 break;
914 case BPF_XOR:
915 case BPF_XOR | BPF_FETCH:
916 EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
917 break;
918 case BPF_CMPXCHG:
919 /*
920 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
921 * in src_reg for other cases.
922 */
923 ret_reg = bpf_to_ppc(BPF_REG_0);
924
925 /* Compare with old value in BPF_R0 */
926 if (size == BPF_DW)
927 EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
928 else
929 EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
930 /* Don't set if different from old value */
931 PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
932 fallthrough;
933 case BPF_XCHG:
934 save_reg = src_reg;
935 break;
936 default:
937 pr_err_ratelimited(
938 "eBPF filter atomic op code %02x (@%d) unsupported\n",
939 code, i);
940 return -EOPNOTSUPP;
941 }
942
943 /* store new value */
944 if (size == BPF_DW)
945 EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
946 else
947 EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
948 /* we're done if this succeeded */
949 PPC_BCC_SHORT(COND_NE, tmp_idx);
950
951 if (imm & BPF_FETCH) {
952 /* Emit 'sync' to enforce full ordering */
953 if (IS_ENABLED(CONFIG_SMP))
954 EMIT(PPC_RAW_SYNC());
955 EMIT(PPC_RAW_MR(ret_reg, _R0));
956 /*
957 * Skip unnecessary zero-extension for 32-bit cmpxchg.
958 * For context, see commit 39491867ace5.
959 */
960 if (size != BPF_DW && imm == BPF_CMPXCHG &&
961 insn_is_zext(&insn[i + 1]))
962 addrs[++i] = ctx->idx * 4;
963 }
964 break;
965
966 /*
967 * BPF_LDX
968 */
969 /* dst = *(u8 *)(ul) (src + off) */
970 case BPF_LDX | BPF_MEM | BPF_B:
971 case BPF_LDX | BPF_MEMSX | BPF_B:
972 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
973 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
974 /* dst = *(u16 *)(ul) (src + off) */
975 case BPF_LDX | BPF_MEM | BPF_H:
976 case BPF_LDX | BPF_MEMSX | BPF_H:
977 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
978 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
979 /* dst = *(u32 *)(ul) (src + off) */
980 case BPF_LDX | BPF_MEM | BPF_W:
981 case BPF_LDX | BPF_MEMSX | BPF_W:
982 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
983 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
984 /* dst = *(u64 *)(ul) (src + off) */
985 case BPF_LDX | BPF_MEM | BPF_DW:
986 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
987 /*
988 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
989 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
990 * load only if addr is kernel address (see is_kernel_addr()), otherwise
991 * set dst_reg=0 and move on.
992 */
993 if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
994 EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
995 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
996 PPC_LI64(tmp2_reg, 0x8000000000000000ul);
997 else /* BOOK3S_64 */
998 PPC_LI64(tmp2_reg, PAGE_OFFSET);
999 EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
1000 PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
1001 EMIT(PPC_RAW_LI(dst_reg, 0));
1002 /*
1003 * Check if 'off' is word aligned for BPF_DW, because
1004 * we might generate two instructions.
1005 */
1006 if ((BPF_SIZE(code) == BPF_DW ||
1007 (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) &&
1008 (off & 3))
1009 PPC_JMP((ctx->idx + 3) * 4);
1010 else
1011 PPC_JMP((ctx->idx + 2) * 4);
1012 }
1013
1014 if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
1015 switch (size) {
1016 case BPF_B:
1017 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1018 EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
1019 break;
1020 case BPF_H:
1021 EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
1022 break;
1023 case BPF_W:
1024 EMIT(PPC_RAW_LWA(dst_reg, src_reg, off));
1025 break;
1026 }
1027 } else {
1028 switch (size) {
1029 case BPF_B:
1030 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1031 break;
1032 case BPF_H:
1033 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
1034 break;
1035 case BPF_W:
1036 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1037 break;
1038 case BPF_DW:
1039 if (off % 4) {
1040 EMIT(PPC_RAW_LI(tmp1_reg, off));
1041 EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
1042 } else {
1043 EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
1044 }
1045 break;
1046 }
1047 }
1048
1049 if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1050 addrs[++i] = ctx->idx * 4;
1051
1052 if (BPF_MODE(code) == BPF_PROBE_MEM) {
1053 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1054 ctx->idx - 1, 4, dst_reg);
1055 if (ret)
1056 return ret;
1057 }
1058 break;
1059
1060 /*
1061 * Doubleword load
1062 * 16 byte instruction that uses two 'struct bpf_insn'
1063 */
1064 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1065 imm64 = ((u64)(u32) insn[i].imm) |
1066 (((u64)(u32) insn[i+1].imm) << 32);
1067 tmp_idx = ctx->idx;
1068 PPC_LI64(dst_reg, imm64);
1069 /* padding to allow full 5 instructions for later patching */
1070 if (!image)
1071 for (j = ctx->idx - tmp_idx; j < 5; j++)
1072 EMIT(PPC_RAW_NOP());
1073 /* Adjust for two bpf instructions */
1074 addrs[++i] = ctx->idx * 4;
1075 break;
1076
1077 /*
1078 * Return/Exit
1079 */
1080 case BPF_JMP | BPF_EXIT:
1081 /*
1082 * If this isn't the very last instruction, branch to
1083 * the epilogue. If we _are_ the last instruction,
1084 * we'll just fall through to the epilogue.
1085 */
1086 if (i != flen - 1) {
1087 ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
1088 if (ret)
1089 return ret;
1090 }
1091 /* else fall through to the epilogue */
1092 break;
1093
1094 /*
1095 * Call kernel helper or bpf function
1096 */
1097 case BPF_JMP | BPF_CALL:
1098 ctx->seen |= SEEN_FUNC;
1099
1100 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1101 &func_addr, &func_addr_fixed);
1102 if (ret < 0)
1103 return ret;
1104
1105 if (func_addr_fixed)
1106 ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
1107 else
1108 ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1109
1110 if (ret)
1111 return ret;
1112
1113 /* move return value from r3 to BPF_REG_0 */
1114 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1115 break;
1116
1117 /*
1118 * Jumps and branches
1119 */
1120 case BPF_JMP | BPF_JA:
1121 PPC_JMP(addrs[i + 1 + off]);
1122 break;
1123 case BPF_JMP32 | BPF_JA:
1124 PPC_JMP(addrs[i + 1 + imm]);
1125 break;
1126
1127 case BPF_JMP | BPF_JGT | BPF_K:
1128 case BPF_JMP | BPF_JGT | BPF_X:
1129 case BPF_JMP | BPF_JSGT | BPF_K:
1130 case BPF_JMP | BPF_JSGT | BPF_X:
1131 case BPF_JMP32 | BPF_JGT | BPF_K:
1132 case BPF_JMP32 | BPF_JGT | BPF_X:
1133 case BPF_JMP32 | BPF_JSGT | BPF_K:
1134 case BPF_JMP32 | BPF_JSGT | BPF_X:
1135 true_cond = COND_GT;
1136 goto cond_branch;
1137 case BPF_JMP | BPF_JLT | BPF_K:
1138 case BPF_JMP | BPF_JLT | BPF_X:
1139 case BPF_JMP | BPF_JSLT | BPF_K:
1140 case BPF_JMP | BPF_JSLT | BPF_X:
1141 case BPF_JMP32 | BPF_JLT | BPF_K:
1142 case BPF_JMP32 | BPF_JLT | BPF_X:
1143 case BPF_JMP32 | BPF_JSLT | BPF_K:
1144 case BPF_JMP32 | BPF_JSLT | BPF_X:
1145 true_cond = COND_LT;
1146 goto cond_branch;
1147 case BPF_JMP | BPF_JGE | BPF_K:
1148 case BPF_JMP | BPF_JGE | BPF_X:
1149 case BPF_JMP | BPF_JSGE | BPF_K:
1150 case BPF_JMP | BPF_JSGE | BPF_X:
1151 case BPF_JMP32 | BPF_JGE | BPF_K:
1152 case BPF_JMP32 | BPF_JGE | BPF_X:
1153 case BPF_JMP32 | BPF_JSGE | BPF_K:
1154 case BPF_JMP32 | BPF_JSGE | BPF_X:
1155 true_cond = COND_GE;
1156 goto cond_branch;
1157 case BPF_JMP | BPF_JLE | BPF_K:
1158 case BPF_JMP | BPF_JLE | BPF_X:
1159 case BPF_JMP | BPF_JSLE | BPF_K:
1160 case BPF_JMP | BPF_JSLE | BPF_X:
1161 case BPF_JMP32 | BPF_JLE | BPF_K:
1162 case BPF_JMP32 | BPF_JLE | BPF_X:
1163 case BPF_JMP32 | BPF_JSLE | BPF_K:
1164 case BPF_JMP32 | BPF_JSLE | BPF_X:
1165 true_cond = COND_LE;
1166 goto cond_branch;
1167 case BPF_JMP | BPF_JEQ | BPF_K:
1168 case BPF_JMP | BPF_JEQ | BPF_X:
1169 case BPF_JMP32 | BPF_JEQ | BPF_K:
1170 case BPF_JMP32 | BPF_JEQ | BPF_X:
1171 true_cond = COND_EQ;
1172 goto cond_branch;
1173 case BPF_JMP | BPF_JNE | BPF_K:
1174 case BPF_JMP | BPF_JNE | BPF_X:
1175 case BPF_JMP32 | BPF_JNE | BPF_K:
1176 case BPF_JMP32 | BPF_JNE | BPF_X:
1177 true_cond = COND_NE;
1178 goto cond_branch;
1179 case BPF_JMP | BPF_JSET | BPF_K:
1180 case BPF_JMP | BPF_JSET | BPF_X:
1181 case BPF_JMP32 | BPF_JSET | BPF_K:
1182 case BPF_JMP32 | BPF_JSET | BPF_X:
1183 true_cond = COND_NE;
1184 /* Fall through */
1185
1186 cond_branch:
1187 switch (code) {
1188 case BPF_JMP | BPF_JGT | BPF_X:
1189 case BPF_JMP | BPF_JLT | BPF_X:
1190 case BPF_JMP | BPF_JGE | BPF_X:
1191 case BPF_JMP | BPF_JLE | BPF_X:
1192 case BPF_JMP | BPF_JEQ | BPF_X:
1193 case BPF_JMP | BPF_JNE | BPF_X:
1194 case BPF_JMP32 | BPF_JGT | BPF_X:
1195 case BPF_JMP32 | BPF_JLT | BPF_X:
1196 case BPF_JMP32 | BPF_JGE | BPF_X:
1197 case BPF_JMP32 | BPF_JLE | BPF_X:
1198 case BPF_JMP32 | BPF_JEQ | BPF_X:
1199 case BPF_JMP32 | BPF_JNE | BPF_X:
1200 /* unsigned comparison */
1201 if (BPF_CLASS(code) == BPF_JMP32)
1202 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1203 else
1204 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1205 break;
1206 case BPF_JMP | BPF_JSGT | BPF_X:
1207 case BPF_JMP | BPF_JSLT | BPF_X:
1208 case BPF_JMP | BPF_JSGE | BPF_X:
1209 case BPF_JMP | BPF_JSLE | BPF_X:
1210 case BPF_JMP32 | BPF_JSGT | BPF_X:
1211 case BPF_JMP32 | BPF_JSLT | BPF_X:
1212 case BPF_JMP32 | BPF_JSGE | BPF_X:
1213 case BPF_JMP32 | BPF_JSLE | BPF_X:
1214 /* signed comparison */
1215 if (BPF_CLASS(code) == BPF_JMP32)
1216 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1217 else
1218 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1219 break;
1220 case BPF_JMP | BPF_JSET | BPF_X:
1221 case BPF_JMP32 | BPF_JSET | BPF_X:
1222 if (BPF_CLASS(code) == BPF_JMP) {
1223 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1224 } else {
1225 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1226 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1227 }
1228 break;
1229 case BPF_JMP | BPF_JNE | BPF_K:
1230 case BPF_JMP | BPF_JEQ | BPF_K:
1231 case BPF_JMP | BPF_JGT | BPF_K:
1232 case BPF_JMP | BPF_JLT | BPF_K:
1233 case BPF_JMP | BPF_JGE | BPF_K:
1234 case BPF_JMP | BPF_JLE | BPF_K:
1235 case BPF_JMP32 | BPF_JNE | BPF_K:
1236 case BPF_JMP32 | BPF_JEQ | BPF_K:
1237 case BPF_JMP32 | BPF_JGT | BPF_K:
1238 case BPF_JMP32 | BPF_JLT | BPF_K:
1239 case BPF_JMP32 | BPF_JGE | BPF_K:
1240 case BPF_JMP32 | BPF_JLE | BPF_K:
1241 {
1242 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1243
1244 /*
1245 * Need sign-extended load, so only positive
1246 * values can be used as imm in cmpldi
1247 */
1248 if (imm >= 0 && imm < 32768) {
1249 if (is_jmp32)
1250 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1251 else
1252 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1253 } else {
1254 /* sign-extending load */
1255 PPC_LI32(tmp1_reg, imm);
1256 /* ... but unsigned comparison */
1257 if (is_jmp32)
1258 EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1259 else
1260 EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1261 }
1262 break;
1263 }
1264 case BPF_JMP | BPF_JSGT | BPF_K:
1265 case BPF_JMP | BPF_JSLT | BPF_K:
1266 case BPF_JMP | BPF_JSGE | BPF_K:
1267 case BPF_JMP | BPF_JSLE | BPF_K:
1268 case BPF_JMP32 | BPF_JSGT | BPF_K:
1269 case BPF_JMP32 | BPF_JSLT | BPF_K:
1270 case BPF_JMP32 | BPF_JSGE | BPF_K:
1271 case BPF_JMP32 | BPF_JSLE | BPF_K:
1272 {
1273 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1274
1275 /*
1276 * signed comparison, so any 16-bit value
1277 * can be used in cmpdi
1278 */
1279 if (imm >= -32768 && imm < 32768) {
1280 if (is_jmp32)
1281 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1282 else
1283 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1284 } else {
1285 PPC_LI32(tmp1_reg, imm);
1286 if (is_jmp32)
1287 EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1288 else
1289 EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1290 }
1291 break;
1292 }
1293 case BPF_JMP | BPF_JSET | BPF_K:
1294 case BPF_JMP32 | BPF_JSET | BPF_K:
1295 /* andi does not sign-extend the immediate */
1296 if (imm >= 0 && imm < 32768)
1297 /* PPC_ANDI is _only/always_ dot-form */
1298 EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1299 else {
1300 PPC_LI32(tmp1_reg, imm);
1301 if (BPF_CLASS(code) == BPF_JMP) {
1302 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1303 tmp1_reg));
1304 } else {
1305 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1306 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1307 0, 0, 31));
1308 }
1309 }
1310 break;
1311 }
1312 PPC_BCC(true_cond, addrs[i + 1 + off]);
1313 break;
1314
1315 /*
1316 * Tail call
1317 */
1318 case BPF_JMP | BPF_TAIL_CALL:
1319 ctx->seen |= SEEN_TAILCALL;
1320 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1321 if (ret < 0)
1322 return ret;
1323 break;
1324
1325 default:
1326 /*
1327 * The filter contains something cruel & unusual.
1328 * We don't handle it, but also there shouldn't be
1329 * anything missing from our list.
1330 */
1331 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1332 code, i);
1333 return -ENOTSUPP;
1334 }
1335 }
1336
1337 /* Set end-of-body-code address for exit. */
1338 addrs[i] = ctx->idx * 4;
1339
1340 return 0;
1341 }
1342