1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * eBPF JIT compiler for PPC32
4 *
5 * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu>
6 * CS GROUP France
7 *
8 * Based on PPC64 eBPF JIT compiler by Naveen N. Rao
9 */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18
19 #include "bpf_jit.h"
20
21 /*
22 * Stack layout:
23 *
24 * [ prev sp ] <-------------
25 * [ nv gpr save area ] 16 * 4 |
26 * fp (r31) --> [ ebpf stack space ] upto 512 |
27 * [ frame header ] 16 |
28 * sp (r1) ---> [ stack pointer ] --------------
29 */
30
31 /* for gpr non volatile registers r17 to r31 (14) + tail call */
32 #define BPF_PPC_STACK_SAVE (15 * 4 + 4)
33 /* stack frame, ensure this is quadword aligned */
34 #define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
35
36 #define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
37
38 /* PPC NVR range -- update this if we ever use NVRs below r17 */
39 #define BPF_PPC_NVR_MIN _R17
40 #define BPF_PPC_TC _R16
41
42 /* BPF register usage */
43 #define TMP_REG (MAX_BPF_JIT_REG + 0)
44
45 /* BPF to ppc register mappings */
bpf_jit_init_reg_mapping(struct codegen_context * ctx)46 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
47 {
48 /* function return value */
49 ctx->b2p[BPF_REG_0] = _R12;
50 /* function arguments */
51 ctx->b2p[BPF_REG_1] = _R4;
52 ctx->b2p[BPF_REG_2] = _R6;
53 ctx->b2p[BPF_REG_3] = _R8;
54 ctx->b2p[BPF_REG_4] = _R10;
55 ctx->b2p[BPF_REG_5] = _R22;
56 /* non volatile registers */
57 ctx->b2p[BPF_REG_6] = _R24;
58 ctx->b2p[BPF_REG_7] = _R26;
59 ctx->b2p[BPF_REG_8] = _R28;
60 ctx->b2p[BPF_REG_9] = _R30;
61 /* frame pointer aka BPF_REG_10 */
62 ctx->b2p[BPF_REG_FP] = _R18;
63 /* eBPF jit internal registers */
64 ctx->b2p[BPF_REG_AX] = _R20;
65 ctx->b2p[TMP_REG] = _R31; /* 32 bits */
66 }
67
bpf_jit_stack_offsetof(struct codegen_context * ctx,int reg)68 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
69 {
70 if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
71 return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg);
72
73 WARN(true, "BPF JIT is asking about unknown registers, will crash the stack");
74 /* Use the hole we have left for alignment */
75 return BPF_PPC_STACKFRAME(ctx) - 4;
76 }
77
78 #define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */
79 #define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */
80 #define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
81
bpf_has_stack_frame(struct codegen_context * ctx)82 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
83 {
84 /*
85 * We only need a stack frame if:
86 * - we call other functions (kernel helpers), or
87 * - we use non volatile registers, or
88 * - we use tail call counter
89 * - the bpf program uses its stack area
90 * The latter condition is deduced from the usage of BPF_REG_FP
91 */
92 return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
93 bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
94 }
95
bpf_jit_realloc_regs(struct codegen_context * ctx)96 void bpf_jit_realloc_regs(struct codegen_context *ctx)
97 {
98 unsigned int nvreg_mask;
99
100 if (ctx->seen & SEEN_FUNC)
101 nvreg_mask = SEEN_NVREG_TEMP_MASK;
102 else
103 nvreg_mask = SEEN_NVREG_FULL_MASK;
104
105 while (ctx->seen & nvreg_mask &&
106 (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) {
107 int old = 32 - fls(ctx->seen & (nvreg_mask & 0xaaaaaaab));
108 int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa));
109 int i;
110
111 for (i = BPF_REG_0; i <= TMP_REG; i++) {
112 if (ctx->b2p[i] != old)
113 continue;
114 ctx->b2p[i] = new;
115 bpf_set_seen_register(ctx, new);
116 bpf_clear_seen_register(ctx, old);
117 if (i != TMP_REG) {
118 bpf_set_seen_register(ctx, new - 1);
119 bpf_clear_seen_register(ctx, old - 1);
120 }
121 break;
122 }
123 }
124 }
125
bpf_jit_build_prologue(u32 * image,struct codegen_context * ctx)126 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
127 {
128 int i;
129
130 /* Initialize tail_call_cnt, to be skipped if we do tail calls. */
131 if (ctx->seen & SEEN_TAILCALL)
132 EMIT(PPC_RAW_LI(_R4, 0));
133 else
134 EMIT(PPC_RAW_NOP());
135
136 #define BPF_TAILCALL_PROLOGUE_SIZE 4
137
138 if (bpf_has_stack_frame(ctx))
139 EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
140
141 if (ctx->seen & SEEN_TAILCALL)
142 EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
143
144 /* First arg comes in as a 32 bits pointer. */
145 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
146 EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
147
148 /*
149 * We need a stack frame, but we don't necessarily need to
150 * save/restore LR unless we call other functions
151 */
152 if (ctx->seen & SEEN_FUNC)
153 EMIT(PPC_RAW_MFLR(_R0));
154
155 /*
156 * Back up non-volatile regs -- registers r18-r31
157 */
158 for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
159 if (bpf_is_seen_register(ctx, i))
160 EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
161
162 /* Setup frame pointer to point to the bpf stack area */
163 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) {
164 EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0));
165 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
166 STACK_FRAME_MIN_SIZE + ctx->stack_size));
167 }
168
169 if (ctx->seen & SEEN_FUNC)
170 EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
171 }
172
bpf_jit_emit_common_epilogue(u32 * image,struct codegen_context * ctx)173 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
174 {
175 int i;
176
177 /* Restore NVRs */
178 for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
179 if (bpf_is_seen_register(ctx, i))
180 EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
181
182 if (ctx->seen & SEEN_FUNC)
183 EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
184
185 /* Tear down our stack frame */
186 if (bpf_has_stack_frame(ctx))
187 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
188
189 if (ctx->seen & SEEN_FUNC)
190 EMIT(PPC_RAW_MTLR(_R0));
191
192 }
193
bpf_jit_build_epilogue(u32 * image,struct codegen_context * ctx)194 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
195 {
196 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
197
198 bpf_jit_emit_common_epilogue(image, ctx);
199
200 EMIT(PPC_RAW_BLR());
201 }
202
203 /* Relative offset needs to be calculated based on final image location */
bpf_jit_emit_func_call_rel(u32 * image,u32 * fimage,struct codegen_context * ctx,u64 func)204 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
205 {
206 s32 rel = (s32)func - (s32)(fimage + ctx->idx);
207
208 if (image && rel < 0x2000000 && rel >= -0x2000000) {
209 EMIT(PPC_RAW_BL(rel));
210 } else {
211 /* Load function address into r0 */
212 EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
213 EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
214 EMIT(PPC_RAW_MTCTR(_R0));
215 EMIT(PPC_RAW_BCTRL());
216 }
217
218 return 0;
219 }
220
bpf_jit_emit_tail_call(u32 * image,struct codegen_context * ctx,u32 out)221 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
222 {
223 /*
224 * By now, the eBPF program has already setup parameters in r3-r6
225 * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
226 * r5-r6/BPF_REG_2 - pointer to bpf_array
227 * r7-r8/BPF_REG_3 - index in bpf_array
228 */
229 int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
230 int b2p_index = bpf_to_ppc(BPF_REG_3);
231
232 /*
233 * if (index >= array->map.max_entries)
234 * goto out;
235 */
236 EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
237 EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
238 EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
239 PPC_BCC_SHORT(COND_GE, out);
240
241 /*
242 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
243 * goto out;
244 */
245 EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
246 /* tail_call_cnt++; */
247 EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
248 PPC_BCC_SHORT(COND_GE, out);
249
250 /* prog = array->ptrs[index]; */
251 EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
252 EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
253 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
254
255 /*
256 * if (prog == NULL)
257 * goto out;
258 */
259 EMIT(PPC_RAW_CMPLWI(_R3, 0));
260 PPC_BCC_SHORT(COND_EQ, out);
261
262 /* goto *(prog->bpf_func + prologue_size); */
263 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
264 EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
265 EMIT(PPC_RAW_MTCTR(_R3));
266
267 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1)));
268
269 /* Put tail_call_cnt in r4 */
270 EMIT(PPC_RAW_MR(_R4, _R0));
271
272 /* tear restore NVRs, ... */
273 bpf_jit_emit_common_epilogue(image, ctx);
274
275 EMIT(PPC_RAW_BCTR());
276
277 /* out: */
278 return 0;
279 }
280
281 /* Assemble the body code between the prologue & epilogue */
bpf_jit_build_body(struct bpf_prog * fp,u32 * image,u32 * fimage,struct codegen_context * ctx,u32 * addrs,int pass,bool extra_pass)282 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
283 u32 *addrs, int pass, bool extra_pass)
284 {
285 const struct bpf_insn *insn = fp->insnsi;
286 int flen = fp->len;
287 int i, ret;
288
289 /* Start of epilogue code - will only be valid 2nd pass onwards */
290 u32 exit_addr = addrs[flen];
291
292 for (i = 0; i < flen; i++) {
293 u32 code = insn[i].code;
294 u32 prevcode = i ? insn[i - 1].code : 0;
295 u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
296 u32 dst_reg_h = dst_reg - 1;
297 u32 src_reg = bpf_to_ppc(insn[i].src_reg);
298 u32 src_reg_h = src_reg - 1;
299 u32 src2_reg = dst_reg;
300 u32 src2_reg_h = dst_reg_h;
301 u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
302 u32 tmp_reg = bpf_to_ppc(TMP_REG);
303 u32 size = BPF_SIZE(code);
304 u32 save_reg, ret_reg;
305 s16 off = insn[i].off;
306 s32 imm = insn[i].imm;
307 bool func_addr_fixed;
308 u64 func_addr;
309 u32 true_cond;
310 u32 tmp_idx;
311 int j;
312
313 if (i && (BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_ALU) &&
314 (BPF_CLASS(prevcode) == BPF_ALU64 || BPF_CLASS(prevcode) == BPF_ALU) &&
315 BPF_OP(prevcode) == BPF_MOV && BPF_SRC(prevcode) == BPF_X &&
316 insn[i - 1].dst_reg == insn[i].dst_reg && insn[i - 1].imm != 1) {
317 src2_reg = bpf_to_ppc(insn[i - 1].src_reg);
318 src2_reg_h = src2_reg - 1;
319 ctx->idx = addrs[i - 1] / 4;
320 }
321
322 /*
323 * addrs[] maps a BPF bytecode address into a real offset from
324 * the start of the body code.
325 */
326 addrs[i] = ctx->idx * 4;
327
328 /*
329 * As an optimization, we note down which registers
330 * are used so that we can only save/restore those in our
331 * prologue and epilogue. We do this here regardless of whether
332 * the actual BPF instruction uses src/dst registers or not
333 * (for instance, BPF_CALL does not use them). The expectation
334 * is that those instructions will have src_reg/dst_reg set to
335 * 0. Even otherwise, we just lose some prologue/epilogue
336 * optimization but everything else should work without
337 * any issues.
338 */
339 if (dst_reg >= 3 && dst_reg < 32) {
340 bpf_set_seen_register(ctx, dst_reg);
341 bpf_set_seen_register(ctx, dst_reg_h);
342 }
343
344 if (src_reg >= 3 && src_reg < 32) {
345 bpf_set_seen_register(ctx, src_reg);
346 bpf_set_seen_register(ctx, src_reg_h);
347 }
348
349 switch (code) {
350 /*
351 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
352 */
353 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
354 EMIT(PPC_RAW_ADD(dst_reg, src2_reg, src_reg));
355 break;
356 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
357 EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, src_reg));
358 EMIT(PPC_RAW_ADDE(dst_reg_h, src2_reg_h, src_reg_h));
359 break;
360 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
361 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, src_reg));
362 break;
363 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
364 EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, src2_reg));
365 EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, src2_reg_h));
366 break;
367 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
368 imm = -imm;
369 fallthrough;
370 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
371 if (!imm) {
372 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
373 } else if (IMM_HA(imm) & 0xffff) {
374 EMIT(PPC_RAW_ADDIS(dst_reg, src2_reg, IMM_HA(imm)));
375 src2_reg = dst_reg;
376 }
377 if (IMM_L(imm))
378 EMIT(PPC_RAW_ADDI(dst_reg, src2_reg, IMM_L(imm)));
379 break;
380 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
381 imm = -imm;
382 fallthrough;
383 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
384 if (!imm) {
385 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
386 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
387 break;
388 }
389 if (imm >= -32768 && imm < 32768) {
390 EMIT(PPC_RAW_ADDIC(dst_reg, src2_reg, imm));
391 } else {
392 PPC_LI32(_R0, imm);
393 EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, _R0));
394 }
395 if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
396 EMIT(PPC_RAW_ADDZE(dst_reg_h, src2_reg_h));
397 else
398 EMIT(PPC_RAW_ADDME(dst_reg_h, src2_reg_h));
399 break;
400 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
401 bpf_set_seen_register(ctx, tmp_reg);
402 EMIT(PPC_RAW_MULW(_R0, src2_reg, src_reg_h));
403 EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, src_reg));
404 EMIT(PPC_RAW_MULHWU(tmp_reg, src2_reg, src_reg));
405 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg));
406 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
407 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
408 break;
409 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
410 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg));
411 break;
412 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
413 if (imm == 1) {
414 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
415 } else if (imm == -1) {
416 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
417 } else if (is_power_of_2((u32)imm)) {
418 EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, ilog2(imm)));
419 } else if (imm >= -32768 && imm < 32768) {
420 EMIT(PPC_RAW_MULI(dst_reg, src2_reg, imm));
421 } else {
422 PPC_LI32(_R0, imm);
423 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, _R0));
424 }
425 break;
426 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
427 if (!imm) {
428 PPC_LI32(dst_reg, 0);
429 PPC_LI32(dst_reg_h, 0);
430 } else if (imm == 1) {
431 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
432 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
433 } else if (imm == -1) {
434 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
435 EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
436 } else if (imm > 0 && is_power_of_2(imm)) {
437 imm = ilog2(imm);
438 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm));
439 EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31));
440 EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm));
441 } else {
442 bpf_set_seen_register(ctx, tmp_reg);
443 PPC_LI32(tmp_reg, imm);
444 EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, tmp_reg));
445 if (imm < 0)
446 EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, src2_reg));
447 EMIT(PPC_RAW_MULHWU(_R0, src2_reg, tmp_reg));
448 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, tmp_reg));
449 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
450 }
451 break;
452 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
453 if (off)
454 EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, src_reg));
455 else
456 EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
457 break;
458 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
459 if (off)
460 EMIT(PPC_RAW_DIVW(_R0, src2_reg, src_reg));
461 else
462 EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
463 EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
464 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
465 break;
466 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
467 return -EOPNOTSUPP;
468 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
469 return -EOPNOTSUPP;
470 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
471 if (!imm)
472 return -EINVAL;
473 if (imm == 1) {
474 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
475 } else if (is_power_of_2((u32)imm)) {
476 if (off)
477 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, ilog2(imm)));
478 else
479 EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
480 } else {
481 PPC_LI32(_R0, imm);
482 if (off)
483 EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, _R0));
484 else
485 EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
486 }
487 break;
488 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
489 if (!imm)
490 return -EINVAL;
491
492 if (!is_power_of_2((u32)imm)) {
493 bpf_set_seen_register(ctx, tmp_reg);
494 PPC_LI32(tmp_reg, imm);
495 if (off)
496 EMIT(PPC_RAW_DIVW(_R0, src2_reg, tmp_reg));
497 else
498 EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
499 EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
500 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
501 } else if (imm == 1) {
502 EMIT(PPC_RAW_LI(dst_reg, 0));
503 } else if (off) {
504 EMIT(PPC_RAW_SRAWI(_R0, src2_reg, ilog2(imm)));
505 EMIT(PPC_RAW_ADDZE(_R0, _R0));
506 EMIT(PPC_RAW_SLWI(_R0, _R0, ilog2(imm)));
507 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
508 } else {
509 imm = ilog2((u32)imm);
510 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31));
511 }
512 break;
513 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
514 if (!imm)
515 return -EINVAL;
516 if (imm < 0)
517 imm = -imm;
518 if (!is_power_of_2(imm))
519 return -EOPNOTSUPP;
520 if (imm == 1) {
521 EMIT(PPC_RAW_LI(dst_reg, 0));
522 EMIT(PPC_RAW_LI(dst_reg_h, 0));
523 } else if (off) {
524 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
525 EMIT(PPC_RAW_XOR(dst_reg, src2_reg, dst_reg_h));
526 EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
527 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31));
528 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, dst_reg_h));
529 EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
530 EMIT(PPC_RAW_SUBFE(dst_reg_h, dst_reg_h, dst_reg_h));
531 } else {
532 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31));
533 EMIT(PPC_RAW_LI(dst_reg_h, 0));
534 }
535 break;
536 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
537 if (!imm)
538 return -EINVAL;
539 if (!is_power_of_2(abs(imm)))
540 return -EOPNOTSUPP;
541
542 if (imm < 0) {
543 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
544 EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
545 imm = -imm;
546 src2_reg = dst_reg;
547 }
548 if (imm == 1) {
549 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
550 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
551 } else {
552 imm = ilog2(imm);
553 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
554 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
555 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm));
556 }
557 break;
558 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
559 EMIT(PPC_RAW_NEG(dst_reg, src2_reg));
560 break;
561 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
562 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
563 EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
564 break;
565
566 /*
567 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
568 */
569 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
570 EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg));
571 EMIT(PPC_RAW_AND(dst_reg_h, src2_reg_h, src_reg_h));
572 break;
573 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
574 EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg));
575 break;
576 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
577 if (imm >= 0)
578 EMIT(PPC_RAW_LI(dst_reg_h, 0));
579 fallthrough;
580 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
581 if (!IMM_H(imm)) {
582 EMIT(PPC_RAW_ANDI(dst_reg, src2_reg, IMM_L(imm)));
583 } else if (!IMM_L(imm)) {
584 EMIT(PPC_RAW_ANDIS(dst_reg, src2_reg, IMM_H(imm)));
585 } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) {
586 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0,
587 32 - fls(imm), 32 - ffs(imm)));
588 } else {
589 PPC_LI32(_R0, imm);
590 EMIT(PPC_RAW_AND(dst_reg, src2_reg, _R0));
591 }
592 break;
593 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
594 EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg));
595 EMIT(PPC_RAW_OR(dst_reg_h, src2_reg_h, src_reg_h));
596 break;
597 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
598 EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg));
599 break;
600 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
601 /* Sign-extended */
602 if (imm < 0)
603 EMIT(PPC_RAW_LI(dst_reg_h, -1));
604 fallthrough;
605 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
606 if (IMM_L(imm)) {
607 EMIT(PPC_RAW_ORI(dst_reg, src2_reg, IMM_L(imm)));
608 src2_reg = dst_reg;
609 }
610 if (IMM_H(imm))
611 EMIT(PPC_RAW_ORIS(dst_reg, src2_reg, IMM_H(imm)));
612 break;
613 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
614 if (dst_reg == src_reg) {
615 EMIT(PPC_RAW_LI(dst_reg, 0));
616 EMIT(PPC_RAW_LI(dst_reg_h, 0));
617 } else {
618 EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg));
619 EMIT(PPC_RAW_XOR(dst_reg_h, src2_reg_h, src_reg_h));
620 }
621 break;
622 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
623 if (dst_reg == src_reg)
624 EMIT(PPC_RAW_LI(dst_reg, 0));
625 else
626 EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg));
627 break;
628 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
629 if (imm < 0)
630 EMIT(PPC_RAW_NOR(dst_reg_h, src2_reg_h, src2_reg_h));
631 fallthrough;
632 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
633 if (IMM_L(imm)) {
634 EMIT(PPC_RAW_XORI(dst_reg, src2_reg, IMM_L(imm)));
635 src2_reg = dst_reg;
636 }
637 if (IMM_H(imm))
638 EMIT(PPC_RAW_XORIS(dst_reg, src2_reg, IMM_H(imm)));
639 break;
640 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
641 EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg));
642 break;
643 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
644 bpf_set_seen_register(ctx, tmp_reg);
645 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
646 EMIT(PPC_RAW_SLW(dst_reg_h, src2_reg_h, src_reg));
647 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
648 EMIT(PPC_RAW_SRW(_R0, src2_reg, _R0));
649 EMIT(PPC_RAW_SLW(tmp_reg, src2_reg, tmp_reg));
650 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
651 EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg));
652 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
653 break;
654 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */
655 if (imm)
656 EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm));
657 else
658 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
659 break;
660 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */
661 if (imm < 0)
662 return -EINVAL;
663 if (!imm) {
664 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
665 } else if (imm < 32) {
666 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm));
667 EMIT(PPC_RAW_RLWIMI(dst_reg_h, src2_reg, imm, 32 - imm, 31));
668 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, imm, 0, 31 - imm));
669 } else if (imm < 64) {
670 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg, imm, 0, 31 - imm));
671 EMIT(PPC_RAW_LI(dst_reg, 0));
672 } else {
673 EMIT(PPC_RAW_LI(dst_reg_h, 0));
674 EMIT(PPC_RAW_LI(dst_reg, 0));
675 }
676 break;
677 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
678 EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
679 break;
680 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
681 bpf_set_seen_register(ctx, tmp_reg);
682 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
683 EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
684 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
685 EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0));
686 EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
687 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
688 EMIT(PPC_RAW_SRW(dst_reg_h, src2_reg_h, src_reg));
689 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
690 break;
691 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
692 if (imm)
693 EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, imm));
694 else
695 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
696 break;
697 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
698 if (imm < 0)
699 return -EINVAL;
700 if (!imm) {
701 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
702 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
703 } else if (imm < 32) {
704 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
705 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
706 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, 32 - imm, imm, 31));
707 } else if (imm < 64) {
708 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg_h, 64 - imm, imm - 32, 31));
709 EMIT(PPC_RAW_LI(dst_reg_h, 0));
710 } else {
711 EMIT(PPC_RAW_LI(dst_reg, 0));
712 EMIT(PPC_RAW_LI(dst_reg_h, 0));
713 }
714 break;
715 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
716 EMIT(PPC_RAW_SRAW(dst_reg, src2_reg, src_reg));
717 break;
718 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
719 bpf_set_seen_register(ctx, tmp_reg);
720 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
721 EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
722 EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0));
723 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
724 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
725 EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
726 EMIT(PPC_RAW_SRAW(tmp_reg, src2_reg_h, tmp_reg));
727 EMIT(PPC_RAW_SRAW(dst_reg_h, src2_reg_h, src_reg));
728 EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
729 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
730 break;
731 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
732 if (imm)
733 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, imm));
734 else
735 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
736 break;
737 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
738 if (imm < 0)
739 return -EINVAL;
740 if (!imm) {
741 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
742 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
743 } else if (imm < 32) {
744 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
745 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
746 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm));
747 } else if (imm < 64) {
748 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, imm - 32));
749 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
750 } else {
751 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, 31));
752 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
753 }
754 break;
755
756 /*
757 * MOV
758 */
759 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
760 if (off == 8) {
761 EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
762 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
763 } else if (off == 16) {
764 EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
765 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
766 } else if (off == 32 && dst_reg == src_reg) {
767 EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
768 } else if (off == 32) {
769 EMIT(PPC_RAW_MR(dst_reg, src_reg));
770 EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
771 } else if (dst_reg != src_reg) {
772 EMIT(PPC_RAW_MR(dst_reg, src_reg));
773 EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
774 }
775 break;
776 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
777 /* special mov32 for zext */
778 if (imm == 1)
779 EMIT(PPC_RAW_LI(dst_reg_h, 0));
780 else if (off == 8)
781 EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
782 else if (off == 16)
783 EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
784 else if (dst_reg != src_reg)
785 EMIT(PPC_RAW_MR(dst_reg, src_reg));
786 break;
787 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
788 PPC_LI32(dst_reg, imm);
789 PPC_EX32(dst_reg_h, imm);
790 break;
791 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
792 PPC_LI32(dst_reg, imm);
793 break;
794
795 /*
796 * BPF_FROM_BE/LE
797 */
798 case BPF_ALU | BPF_END | BPF_FROM_LE:
799 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
800 switch (imm) {
801 case 16:
802 /* Copy 16 bits to upper part */
803 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg, 16, 0, 15));
804 /* Rotate 8 bits right & mask */
805 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31));
806 break;
807 case 32:
808 /*
809 * Rotate word left by 8 bits:
810 * 2 bytes are already in their final position
811 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
812 */
813 EMIT(PPC_RAW_RLWINM(_R0, src2_reg, 8, 0, 31));
814 /* Rotate 24 bits and insert byte 1 */
815 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 0, 7));
816 /* Rotate 24 bits and insert byte 3 */
817 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 16, 23));
818 EMIT(PPC_RAW_MR(dst_reg, _R0));
819 break;
820 case 64:
821 bpf_set_seen_register(ctx, tmp_reg);
822 EMIT(PPC_RAW_RLWINM(tmp_reg, src2_reg, 8, 0, 31));
823 EMIT(PPC_RAW_RLWINM(_R0, src2_reg_h, 8, 0, 31));
824 /* Rotate 24 bits and insert byte 1 */
825 EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 0, 7));
826 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 0, 7));
827 /* Rotate 24 bits and insert byte 3 */
828 EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 16, 23));
829 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 16, 23));
830 EMIT(PPC_RAW_MR(dst_reg, _R0));
831 EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
832 break;
833 }
834 if (BPF_CLASS(code) == BPF_ALU64 && imm != 64)
835 EMIT(PPC_RAW_LI(dst_reg_h, 0));
836 break;
837 case BPF_ALU | BPF_END | BPF_FROM_BE:
838 switch (imm) {
839 case 16:
840 /* zero-extend 16 bits into 32 bits */
841 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 16, 31));
842 break;
843 case 32:
844 case 64:
845 /* nop */
846 break;
847 }
848 break;
849
850 /*
851 * BPF_ST NOSPEC (speculation barrier)
852 */
853 case BPF_ST | BPF_NOSPEC:
854 break;
855
856 /*
857 * BPF_ST(X)
858 */
859 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
860 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
861 break;
862 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
863 PPC_LI32(_R0, imm);
864 EMIT(PPC_RAW_STB(_R0, dst_reg, off));
865 break;
866 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
867 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
868 break;
869 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
870 PPC_LI32(_R0, imm);
871 EMIT(PPC_RAW_STH(_R0, dst_reg, off));
872 break;
873 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
874 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
875 break;
876 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
877 PPC_LI32(_R0, imm);
878 EMIT(PPC_RAW_STW(_R0, dst_reg, off));
879 break;
880 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
881 EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
882 EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
883 break;
884 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
885 PPC_LI32(_R0, imm);
886 EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
887 PPC_EX32(_R0, imm);
888 EMIT(PPC_RAW_STW(_R0, dst_reg, off));
889 break;
890
891 /*
892 * BPF_STX ATOMIC (atomic ops)
893 */
894 case BPF_STX | BPF_ATOMIC | BPF_W:
895 save_reg = _R0;
896 ret_reg = src_reg;
897
898 bpf_set_seen_register(ctx, tmp_reg);
899 bpf_set_seen_register(ctx, ax_reg);
900
901 /* Get offset into TMP_REG */
902 EMIT(PPC_RAW_LI(tmp_reg, off));
903 /*
904 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
905 * before and after the operation.
906 *
907 * This is a requirement in the Linux Kernel Memory Model.
908 * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
909 */
910 if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
911 EMIT(PPC_RAW_SYNC());
912 tmp_idx = ctx->idx * 4;
913 /* load value from memory into r0 */
914 EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
915
916 /* Save old value in BPF_REG_AX */
917 if (imm & BPF_FETCH)
918 EMIT(PPC_RAW_MR(ax_reg, _R0));
919
920 switch (imm) {
921 case BPF_ADD:
922 case BPF_ADD | BPF_FETCH:
923 EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
924 break;
925 case BPF_AND:
926 case BPF_AND | BPF_FETCH:
927 EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
928 break;
929 case BPF_OR:
930 case BPF_OR | BPF_FETCH:
931 EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
932 break;
933 case BPF_XOR:
934 case BPF_XOR | BPF_FETCH:
935 EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
936 break;
937 case BPF_CMPXCHG:
938 /*
939 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
940 * in src_reg for other cases.
941 */
942 ret_reg = bpf_to_ppc(BPF_REG_0);
943
944 /* Compare with old value in BPF_REG_0 */
945 EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
946 /* Don't set if different from old value */
947 PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
948 fallthrough;
949 case BPF_XCHG:
950 save_reg = src_reg;
951 break;
952 default:
953 pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
954 code, i);
955 return -EOPNOTSUPP;
956 }
957
958 /* store new value */
959 EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
960 /* we're done if this succeeded */
961 PPC_BCC_SHORT(COND_NE, tmp_idx);
962
963 /* For the BPF_FETCH variant, get old data into src_reg */
964 if (imm & BPF_FETCH) {
965 /* Emit 'sync' to enforce full ordering */
966 if (IS_ENABLED(CONFIG_SMP))
967 EMIT(PPC_RAW_SYNC());
968 EMIT(PPC_RAW_MR(ret_reg, ax_reg));
969 if (!fp->aux->verifier_zext)
970 EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
971 }
972 break;
973
974 case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
975 return -EOPNOTSUPP;
976
977 /*
978 * BPF_LDX
979 */
980 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
981 case BPF_LDX | BPF_MEMSX | BPF_B:
982 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
983 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
984 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
985 case BPF_LDX | BPF_MEMSX | BPF_H:
986 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
987 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
988 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
989 case BPF_LDX | BPF_MEMSX | BPF_W:
990 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
991 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
992 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
993 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
994 /*
995 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
996 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
997 * load only if addr is kernel address (see is_kernel_addr()), otherwise
998 * set dst_reg=0 and move on.
999 */
1000 if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
1001 PPC_LI32(_R0, TASK_SIZE - off);
1002 EMIT(PPC_RAW_CMPLW(src_reg, _R0));
1003 PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
1004 EMIT(PPC_RAW_LI(dst_reg, 0));
1005 /*
1006 * For BPF_DW case, "li reg_h,0" would be needed when
1007 * !fp->aux->verifier_zext. Emit NOP otherwise.
1008 *
1009 * Note that "li reg_h,0" is emitted for BPF_B/H/W case,
1010 * if necessary. So, jump there instead of emitting an
1011 * additional "li reg_h,0" instruction.
1012 */
1013 if (size == BPF_DW && !fp->aux->verifier_zext)
1014 EMIT(PPC_RAW_LI(dst_reg_h, 0));
1015 else
1016 EMIT(PPC_RAW_NOP());
1017 /*
1018 * Need to jump two instructions instead of one for BPF_DW case
1019 * as there are two load instructions for dst_reg_h & dst_reg
1020 * respectively.
1021 */
1022 if (size == BPF_DW ||
1023 (size == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX))
1024 PPC_JMP((ctx->idx + 3) * 4);
1025 else
1026 PPC_JMP((ctx->idx + 2) * 4);
1027 }
1028
1029 if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
1030 switch (size) {
1031 case BPF_B:
1032 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1033 EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
1034 break;
1035 case BPF_H:
1036 EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
1037 break;
1038 case BPF_W:
1039 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1040 break;
1041 }
1042 if (!fp->aux->verifier_zext)
1043 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
1044
1045 } else {
1046 switch (size) {
1047 case BPF_B:
1048 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1049 break;
1050 case BPF_H:
1051 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
1052 break;
1053 case BPF_W:
1054 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1055 break;
1056 case BPF_DW:
1057 EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
1058 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
1059 break;
1060 }
1061 if (size != BPF_DW && !fp->aux->verifier_zext)
1062 EMIT(PPC_RAW_LI(dst_reg_h, 0));
1063 }
1064
1065 if (BPF_MODE(code) == BPF_PROBE_MEM) {
1066 int insn_idx = ctx->idx - 1;
1067 int jmp_off = 4;
1068
1069 /*
1070 * In case of BPF_DW, two lwz instructions are emitted, one
1071 * for higher 32-bit and another for lower 32-bit. So, set
1072 * ex->insn to the first of the two and jump over both
1073 * instructions in fixup.
1074 *
1075 * Similarly, with !verifier_zext, two instructions are
1076 * emitted for BPF_B/H/W case. So, set ex->insn to the
1077 * instruction that could fault and skip over both
1078 * instructions.
1079 */
1080 if (size == BPF_DW || !fp->aux->verifier_zext) {
1081 insn_idx -= 1;
1082 jmp_off += 4;
1083 }
1084
1085 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx,
1086 jmp_off, dst_reg);
1087 if (ret)
1088 return ret;
1089 }
1090 break;
1091
1092 /*
1093 * Doubleword load
1094 * 16 byte instruction that uses two 'struct bpf_insn'
1095 */
1096 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1097 tmp_idx = ctx->idx;
1098 PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
1099 PPC_LI32(dst_reg, (u32)insn[i].imm);
1100 /* padding to allow full 4 instructions for later patching */
1101 if (!image)
1102 for (j = ctx->idx - tmp_idx; j < 4; j++)
1103 EMIT(PPC_RAW_NOP());
1104 /* Adjust for two bpf instructions */
1105 addrs[++i] = ctx->idx * 4;
1106 break;
1107
1108 /*
1109 * Return/Exit
1110 */
1111 case BPF_JMP | BPF_EXIT:
1112 /*
1113 * If this isn't the very last instruction, branch to
1114 * the epilogue. If we _are_ the last instruction,
1115 * we'll just fall through to the epilogue.
1116 */
1117 if (i != flen - 1) {
1118 ret = bpf_jit_emit_exit_insn(image, ctx, _R0, exit_addr);
1119 if (ret)
1120 return ret;
1121 }
1122 /* else fall through to the epilogue */
1123 break;
1124
1125 /*
1126 * Call kernel helper or bpf function
1127 */
1128 case BPF_JMP | BPF_CALL:
1129 ctx->seen |= SEEN_FUNC;
1130
1131 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1132 &func_addr, &func_addr_fixed);
1133 if (ret < 0)
1134 return ret;
1135
1136 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
1137 EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5) - 1, _R1, 8));
1138 EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
1139 }
1140
1141 ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1142 if (ret)
1143 return ret;
1144
1145 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0) - 1, _R3));
1146 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R4));
1147 break;
1148
1149 /*
1150 * Jumps and branches
1151 */
1152 case BPF_JMP | BPF_JA:
1153 PPC_JMP(addrs[i + 1 + off]);
1154 break;
1155 case BPF_JMP32 | BPF_JA:
1156 PPC_JMP(addrs[i + 1 + imm]);
1157 break;
1158
1159 case BPF_JMP | BPF_JGT | BPF_K:
1160 case BPF_JMP | BPF_JGT | BPF_X:
1161 case BPF_JMP | BPF_JSGT | BPF_K:
1162 case BPF_JMP | BPF_JSGT | BPF_X:
1163 case BPF_JMP32 | BPF_JGT | BPF_K:
1164 case BPF_JMP32 | BPF_JGT | BPF_X:
1165 case BPF_JMP32 | BPF_JSGT | BPF_K:
1166 case BPF_JMP32 | BPF_JSGT | BPF_X:
1167 true_cond = COND_GT;
1168 goto cond_branch;
1169 case BPF_JMP | BPF_JLT | BPF_K:
1170 case BPF_JMP | BPF_JLT | BPF_X:
1171 case BPF_JMP | BPF_JSLT | BPF_K:
1172 case BPF_JMP | BPF_JSLT | BPF_X:
1173 case BPF_JMP32 | BPF_JLT | BPF_K:
1174 case BPF_JMP32 | BPF_JLT | BPF_X:
1175 case BPF_JMP32 | BPF_JSLT | BPF_K:
1176 case BPF_JMP32 | BPF_JSLT | BPF_X:
1177 true_cond = COND_LT;
1178 goto cond_branch;
1179 case BPF_JMP | BPF_JGE | BPF_K:
1180 case BPF_JMP | BPF_JGE | BPF_X:
1181 case BPF_JMP | BPF_JSGE | BPF_K:
1182 case BPF_JMP | BPF_JSGE | BPF_X:
1183 case BPF_JMP32 | BPF_JGE | BPF_K:
1184 case BPF_JMP32 | BPF_JGE | BPF_X:
1185 case BPF_JMP32 | BPF_JSGE | BPF_K:
1186 case BPF_JMP32 | BPF_JSGE | BPF_X:
1187 true_cond = COND_GE;
1188 goto cond_branch;
1189 case BPF_JMP | BPF_JLE | BPF_K:
1190 case BPF_JMP | BPF_JLE | BPF_X:
1191 case BPF_JMP | BPF_JSLE | BPF_K:
1192 case BPF_JMP | BPF_JSLE | BPF_X:
1193 case BPF_JMP32 | BPF_JLE | BPF_K:
1194 case BPF_JMP32 | BPF_JLE | BPF_X:
1195 case BPF_JMP32 | BPF_JSLE | BPF_K:
1196 case BPF_JMP32 | BPF_JSLE | BPF_X:
1197 true_cond = COND_LE;
1198 goto cond_branch;
1199 case BPF_JMP | BPF_JEQ | BPF_K:
1200 case BPF_JMP | BPF_JEQ | BPF_X:
1201 case BPF_JMP32 | BPF_JEQ | BPF_K:
1202 case BPF_JMP32 | BPF_JEQ | BPF_X:
1203 true_cond = COND_EQ;
1204 goto cond_branch;
1205 case BPF_JMP | BPF_JNE | BPF_K:
1206 case BPF_JMP | BPF_JNE | BPF_X:
1207 case BPF_JMP32 | BPF_JNE | BPF_K:
1208 case BPF_JMP32 | BPF_JNE | BPF_X:
1209 true_cond = COND_NE;
1210 goto cond_branch;
1211 case BPF_JMP | BPF_JSET | BPF_K:
1212 case BPF_JMP | BPF_JSET | BPF_X:
1213 case BPF_JMP32 | BPF_JSET | BPF_K:
1214 case BPF_JMP32 | BPF_JSET | BPF_X:
1215 true_cond = COND_NE;
1216 /* fallthrough; */
1217
1218 cond_branch:
1219 switch (code) {
1220 case BPF_JMP | BPF_JGT | BPF_X:
1221 case BPF_JMP | BPF_JLT | BPF_X:
1222 case BPF_JMP | BPF_JGE | BPF_X:
1223 case BPF_JMP | BPF_JLE | BPF_X:
1224 case BPF_JMP | BPF_JEQ | BPF_X:
1225 case BPF_JMP | BPF_JNE | BPF_X:
1226 /* unsigned comparison */
1227 EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h));
1228 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1229 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1230 break;
1231 case BPF_JMP32 | BPF_JGT | BPF_X:
1232 case BPF_JMP32 | BPF_JLT | BPF_X:
1233 case BPF_JMP32 | BPF_JGE | BPF_X:
1234 case BPF_JMP32 | BPF_JLE | BPF_X:
1235 case BPF_JMP32 | BPF_JEQ | BPF_X:
1236 case BPF_JMP32 | BPF_JNE | BPF_X:
1237 /* unsigned comparison */
1238 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1239 break;
1240 case BPF_JMP | BPF_JSGT | BPF_X:
1241 case BPF_JMP | BPF_JSLT | BPF_X:
1242 case BPF_JMP | BPF_JSGE | BPF_X:
1243 case BPF_JMP | BPF_JSLE | BPF_X:
1244 /* signed comparison */
1245 EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h));
1246 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1247 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1248 break;
1249 case BPF_JMP32 | BPF_JSGT | BPF_X:
1250 case BPF_JMP32 | BPF_JSLT | BPF_X:
1251 case BPF_JMP32 | BPF_JSGE | BPF_X:
1252 case BPF_JMP32 | BPF_JSLE | BPF_X:
1253 /* signed comparison */
1254 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1255 break;
1256 case BPF_JMP | BPF_JSET | BPF_X:
1257 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
1258 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1259 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
1260 break;
1261 case BPF_JMP32 | BPF_JSET | BPF_X: {
1262 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
1263 break;
1264 case BPF_JMP | BPF_JNE | BPF_K:
1265 case BPF_JMP | BPF_JEQ | BPF_K:
1266 case BPF_JMP | BPF_JGT | BPF_K:
1267 case BPF_JMP | BPF_JLT | BPF_K:
1268 case BPF_JMP | BPF_JGE | BPF_K:
1269 case BPF_JMP | BPF_JLE | BPF_K:
1270 /*
1271 * Need sign-extended load, so only positive
1272 * values can be used as imm in cmplwi
1273 */
1274 if (imm >= 0 && imm < 32768) {
1275 EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0));
1276 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1277 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1278 } else {
1279 /* sign-extending load ... but unsigned comparison */
1280 PPC_EX32(_R0, imm);
1281 EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
1282 PPC_LI32(_R0, imm);
1283 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1284 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1285 }
1286 break;
1287 case BPF_JMP32 | BPF_JNE | BPF_K:
1288 case BPF_JMP32 | BPF_JEQ | BPF_K:
1289 case BPF_JMP32 | BPF_JGT | BPF_K:
1290 case BPF_JMP32 | BPF_JLT | BPF_K:
1291 case BPF_JMP32 | BPF_JGE | BPF_K:
1292 case BPF_JMP32 | BPF_JLE | BPF_K:
1293 if (imm >= 0 && imm < 65536) {
1294 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1295 } else {
1296 PPC_LI32(_R0, imm);
1297 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1298 }
1299 break;
1300 }
1301 case BPF_JMP | BPF_JSGT | BPF_K:
1302 case BPF_JMP | BPF_JSLT | BPF_K:
1303 case BPF_JMP | BPF_JSGE | BPF_K:
1304 case BPF_JMP | BPF_JSLE | BPF_K:
1305 if (imm >= 0 && imm < 65536) {
1306 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
1307 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1308 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1309 } else {
1310 /* sign-extending load */
1311 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
1312 PPC_LI32(_R0, imm);
1313 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1314 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1315 }
1316 break;
1317 case BPF_JMP32 | BPF_JSGT | BPF_K:
1318 case BPF_JMP32 | BPF_JSLT | BPF_K:
1319 case BPF_JMP32 | BPF_JSGE | BPF_K:
1320 case BPF_JMP32 | BPF_JSLE | BPF_K:
1321 /*
1322 * signed comparison, so any 16-bit value
1323 * can be used in cmpwi
1324 */
1325 if (imm >= -32768 && imm < 32768) {
1326 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1327 } else {
1328 /* sign-extending load */
1329 PPC_LI32(_R0, imm);
1330 EMIT(PPC_RAW_CMPW(dst_reg, _R0));
1331 }
1332 break;
1333 case BPF_JMP | BPF_JSET | BPF_K:
1334 /* andi does not sign-extend the immediate */
1335 if (imm >= 0 && imm < 32768) {
1336 /* PPC_ANDI is _only/always_ dot-form */
1337 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
1338 } else {
1339 PPC_LI32(_R0, imm);
1340 if (imm < 0) {
1341 EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
1342 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1343 }
1344 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
1345 }
1346 break;
1347 case BPF_JMP32 | BPF_JSET | BPF_K:
1348 /* andi does not sign-extend the immediate */
1349 if (imm >= 0 && imm < 32768) {
1350 /* PPC_ANDI is _only/always_ dot-form */
1351 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
1352 } else {
1353 PPC_LI32(_R0, imm);
1354 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
1355 }
1356 break;
1357 }
1358 PPC_BCC(true_cond, addrs[i + 1 + off]);
1359 break;
1360
1361 /*
1362 * Tail call
1363 */
1364 case BPF_JMP | BPF_TAIL_CALL:
1365 ctx->seen |= SEEN_TAILCALL;
1366 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1367 if (ret < 0)
1368 return ret;
1369 break;
1370
1371 default:
1372 /*
1373 * The filter contains something cruel & unusual.
1374 * We don't handle it, but also there shouldn't be
1375 * anything missing from our list.
1376 */
1377 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i);
1378 return -EOPNOTSUPP;
1379 }
1380 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
1381 !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
1382 EMIT(PPC_RAW_LI(dst_reg_h, 0));
1383 }
1384
1385 /* Set end-of-body-code address for exit. */
1386 addrs[i] = ctx->idx * 4;
1387
1388 return 0;
1389 }
1390