1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * eBPF JIT compiler for PPC32 4 * 5 * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu> 6 * CS GROUP France 7 * 8 * Based on PPC64 eBPF JIT compiler by Naveen N. Rao 9 */ 10 #include <linux/moduleloader.h> 11 #include <asm/cacheflush.h> 12 #include <asm/asm-compat.h> 13 #include <linux/netdevice.h> 14 #include <linux/filter.h> 15 #include <linux/if_vlan.h> 16 #include <asm/kprobes.h> 17 #include <linux/bpf.h> 18 19 #include "bpf_jit.h" 20 21 /* 22 * Stack layout: 23 * 24 * [ prev sp ] <------------- 25 * [ nv gpr save area ] 16 * 4 | 26 * fp (r31) --> [ ebpf stack space ] upto 512 | 27 * [ frame header ] 16 | 28 * sp (r1) ---> [ stack pointer ] -------------- 29 */ 30 31 /* for gpr non volatile registers r17 to r31 (14) + tail call */ 32 #define BPF_PPC_STACK_SAVE (15 * 4 + 4) 33 /* stack frame, ensure this is quadword aligned */ 34 #define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size) 35 36 #define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0)) 37 38 /* PPC NVR range -- update this if we ever use NVRs below r17 */ 39 #define BPF_PPC_NVR_MIN _R17 40 #define BPF_PPC_TC _R16 41 42 /* BPF register usage */ 43 #define TMP_REG (MAX_BPF_JIT_REG + 0) 44 45 /* BPF to ppc register mappings */ 46 void bpf_jit_init_reg_mapping(struct codegen_context *ctx) 47 { 48 /* function return value */ 49 ctx->b2p[BPF_REG_0] = _R12; 50 /* function arguments */ 51 ctx->b2p[BPF_REG_1] = _R4; 52 ctx->b2p[BPF_REG_2] = _R6; 53 ctx->b2p[BPF_REG_3] = _R8; 54 ctx->b2p[BPF_REG_4] = _R10; 55 ctx->b2p[BPF_REG_5] = _R22; 56 /* non volatile registers */ 57 ctx->b2p[BPF_REG_6] = _R24; 58 ctx->b2p[BPF_REG_7] = _R26; 59 ctx->b2p[BPF_REG_8] = _R28; 60 ctx->b2p[BPF_REG_9] = _R30; 61 /* frame pointer aka BPF_REG_10 */ 62 ctx->b2p[BPF_REG_FP] = _R18; 63 /* eBPF jit internal registers */ 64 ctx->b2p[BPF_REG_AX] = _R20; 65 ctx->b2p[TMP_REG] = _R31; /* 32 bits */ 66 } 67 68 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) 69 { 70 if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC) 71 return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg); 72 73 WARN(true, "BPF JIT is asking about unknown registers, will crash the stack"); 74 /* Use the hole we have left for alignment */ 75 return BPF_PPC_STACKFRAME(ctx) - 4; 76 } 77 78 #define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */ 79 #define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */ 80 #define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */ 81 82 static inline bool bpf_has_stack_frame(struct codegen_context *ctx) 83 { 84 /* 85 * We only need a stack frame if: 86 * - we call other functions (kernel helpers), or 87 * - we use non volatile registers, or 88 * - we use tail call counter 89 * - the bpf program uses its stack area 90 * The latter condition is deduced from the usage of BPF_REG_FP 91 */ 92 return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) || 93 bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)); 94 } 95 96 void bpf_jit_realloc_regs(struct codegen_context *ctx) 97 { 98 unsigned int nvreg_mask; 99 100 if (ctx->seen & SEEN_FUNC) 101 nvreg_mask = SEEN_NVREG_TEMP_MASK; 102 else 103 nvreg_mask = SEEN_NVREG_FULL_MASK; 104 105 while (ctx->seen & nvreg_mask && 106 (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) { 107 int old = 32 - fls(ctx->seen & (nvreg_mask & 0xaaaaaaab)); 108 int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa)); 109 int i; 110 111 for (i = BPF_REG_0; i <= TMP_REG; i++) { 112 if (ctx->b2p[i] != old) 113 continue; 114 ctx->b2p[i] = new; 115 bpf_set_seen_register(ctx, new); 116 bpf_clear_seen_register(ctx, old); 117 if (i != TMP_REG) { 118 bpf_set_seen_register(ctx, new - 1); 119 bpf_clear_seen_register(ctx, old - 1); 120 } 121 break; 122 } 123 } 124 } 125 126 void prepare_for_fsession_fentry(u32 *image, struct codegen_context *ctx, int cookie_cnt, 127 int cookie_off, int retval_off) 128 { 129 /* 130 * Set session cookies value 131 * Clear cookies field on stack 132 * Ensure retval to be cleared on fentry 133 */ 134 EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG), 0)); 135 136 for (int i = 0; i < cookie_cnt; i++) { 137 EMIT(PPC_RAW_STW(bpf_to_ppc(TMP_REG), _R1, cookie_off + 4 * i)); 138 EMIT(PPC_RAW_STW(bpf_to_ppc(TMP_REG), _R1, cookie_off + 4 * i + 4)); 139 } 140 141 EMIT(PPC_RAW_STW(bpf_to_ppc(TMP_REG), _R1, retval_off)); 142 EMIT(PPC_RAW_STW(bpf_to_ppc(TMP_REG), _R1, retval_off + 4)); 143 } 144 145 void store_func_meta(u32 *image, struct codegen_context *ctx, 146 u64 func_meta, int func_meta_off) 147 { 148 /* 149 * Store func_meta to stack: [R1 + func_meta_off] = func_meta 150 * func_meta := argument count in first byte + cookie value 151 */ 152 /* Store lower word */ 153 PPC_LI32(bpf_to_ppc(TMP_REG), (u32)func_meta); 154 EMIT(PPC_RAW_STW(bpf_to_ppc(TMP_REG), _R1, func_meta_off)); 155 156 /* Store upper word */ 157 PPC_LI32(bpf_to_ppc(TMP_REG), (u32)(func_meta >> 32)); 158 EMIT(PPC_RAW_STW(bpf_to_ppc(TMP_REG), _R1, func_meta_off + 4)); 159 } 160 161 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) 162 { 163 int i; 164 165 /* Instruction for trampoline attach */ 166 EMIT(PPC_RAW_NOP()); 167 168 /* Initialize tail_call_cnt, to be skipped if we do tail calls. */ 169 if (ctx->seen & SEEN_TAILCALL) 170 EMIT(PPC_RAW_LI(_R4, 0)); 171 else 172 EMIT(PPC_RAW_NOP()); 173 174 #define BPF_TAILCALL_PROLOGUE_SIZE 8 175 176 if (bpf_has_stack_frame(ctx)) 177 EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx))); 178 179 if (ctx->seen & SEEN_TAILCALL) 180 EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); 181 182 /* First arg comes in as a 32 bits pointer. */ 183 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3)); 184 EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0)); 185 186 /* 187 * We need a stack frame, but we don't necessarily need to 188 * save/restore LR unless we call other functions 189 */ 190 if (ctx->seen & SEEN_FUNC) 191 EMIT(PPC_RAW_MFLR(_R0)); 192 193 /* 194 * Back up non-volatile regs -- registers r18-r31 195 */ 196 for (i = BPF_PPC_NVR_MIN; i <= 31; i++) 197 if (bpf_is_seen_register(ctx, i)) 198 EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i))); 199 200 /* Setup frame pointer to point to the bpf stack area */ 201 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) { 202 EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0)); 203 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, 204 STACK_FRAME_MIN_SIZE + ctx->stack_size)); 205 } 206 207 if (ctx->seen & SEEN_FUNC) 208 EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); 209 } 210 211 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) 212 { 213 int i; 214 215 /* Restore NVRs */ 216 for (i = BPF_PPC_NVR_MIN; i <= 31; i++) 217 if (bpf_is_seen_register(ctx, i)) 218 EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i))); 219 220 if (ctx->seen & SEEN_FUNC) 221 EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); 222 223 /* Tear down our stack frame */ 224 if (bpf_has_stack_frame(ctx)) 225 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx))); 226 227 if (ctx->seen & SEEN_FUNC) 228 EMIT(PPC_RAW_MTLR(_R0)); 229 230 } 231 232 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) 233 { 234 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0))); 235 236 bpf_jit_emit_common_epilogue(image, ctx); 237 238 EMIT(PPC_RAW_BLR()); 239 240 bpf_jit_build_fentry_stubs(image, ctx); 241 } 242 243 /* Relative offset needs to be calculated based on final image location */ 244 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) 245 { 246 s32 rel = (s32)func - (s32)(fimage + ctx->idx); 247 248 if (image && rel < 0x2000000 && rel >= -0x2000000) { 249 EMIT(PPC_RAW_BL(rel)); 250 } else { 251 /* Load function address into r0 */ 252 EMIT(PPC_RAW_LIS(_R0, IMM_H(func))); 253 EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func))); 254 EMIT(PPC_RAW_MTCTR(_R0)); 255 EMIT(PPC_RAW_BCTRL()); 256 } 257 258 return 0; 259 } 260 261 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) 262 { 263 /* 264 * By now, the eBPF program has already setup parameters in r3-r6 265 * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program 266 * r5-r6/BPF_REG_2 - pointer to bpf_array 267 * r7-r8/BPF_REG_3 - index in bpf_array 268 */ 269 int b2p_bpf_array = bpf_to_ppc(BPF_REG_2); 270 int b2p_index = bpf_to_ppc(BPF_REG_3); 271 272 /* 273 * if (index >= array->map.max_entries) 274 * goto out; 275 */ 276 EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries))); 277 EMIT(PPC_RAW_CMPLW(b2p_index, _R0)); 278 EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); 279 PPC_BCC_SHORT(COND_GE, out); 280 281 /* 282 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) 283 * goto out; 284 */ 285 EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT)); 286 /* tail_call_cnt++; */ 287 EMIT(PPC_RAW_ADDIC(_R0, _R0, 1)); 288 PPC_BCC_SHORT(COND_GE, out); 289 290 /* prog = array->ptrs[index]; */ 291 EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29)); 292 EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array)); 293 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs))); 294 295 /* 296 * if (prog == NULL) 297 * goto out; 298 */ 299 EMIT(PPC_RAW_CMPLWI(_R3, 0)); 300 PPC_BCC_SHORT(COND_EQ, out); 301 302 /* goto *(prog->bpf_func + prologue_size); */ 303 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func))); 304 EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE)); 305 EMIT(PPC_RAW_MTCTR(_R3)); 306 307 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1))); 308 309 /* Put tail_call_cnt in r4 */ 310 EMIT(PPC_RAW_MR(_R4, _R0)); 311 312 /* tear restore NVRs, ... */ 313 bpf_jit_emit_common_epilogue(image, ctx); 314 315 EMIT(PPC_RAW_BCTR()); 316 317 /* out: */ 318 return 0; 319 } 320 321 /* Assemble the body code between the prologue & epilogue */ 322 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx, 323 u32 *addrs, int pass, bool extra_pass) 324 { 325 const struct bpf_insn *insn = fp->insnsi; 326 int flen = fp->len; 327 int i, ret; 328 329 /* Start of epilogue code - will only be valid 2nd pass onwards */ 330 u32 exit_addr = addrs[flen]; 331 332 for (i = 0; i < flen; i++) { 333 u32 code = insn[i].code; 334 u32 prevcode = i ? insn[i - 1].code : 0; 335 u32 dst_reg = bpf_to_ppc(insn[i].dst_reg); 336 u32 dst_reg_h = dst_reg - 1; 337 u32 src_reg = bpf_to_ppc(insn[i].src_reg); 338 u32 src_reg_h = src_reg - 1; 339 u32 src2_reg = dst_reg; 340 u32 src2_reg_h = dst_reg_h; 341 u32 ax_reg = bpf_to_ppc(BPF_REG_AX); 342 u32 tmp_reg = bpf_to_ppc(TMP_REG); 343 u32 size = BPF_SIZE(code); 344 u32 save_reg, ret_reg; 345 s16 off = insn[i].off; 346 s32 imm = insn[i].imm; 347 bool func_addr_fixed; 348 u64 func_addr; 349 u32 true_cond; 350 u32 tmp_idx; 351 352 if (i && (BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_ALU) && 353 (BPF_CLASS(prevcode) == BPF_ALU64 || BPF_CLASS(prevcode) == BPF_ALU) && 354 BPF_OP(prevcode) == BPF_MOV && BPF_SRC(prevcode) == BPF_X && 355 insn[i - 1].dst_reg == insn[i].dst_reg && insn[i - 1].imm != 1) { 356 src2_reg = bpf_to_ppc(insn[i - 1].src_reg); 357 src2_reg_h = src2_reg - 1; 358 ctx->idx = addrs[i - 1] / 4; 359 } 360 361 /* 362 * addrs[] maps a BPF bytecode address into a real offset from 363 * the start of the body code. 364 */ 365 addrs[i] = ctx->idx * 4; 366 367 /* 368 * As an optimization, we note down which registers 369 * are used so that we can only save/restore those in our 370 * prologue and epilogue. We do this here regardless of whether 371 * the actual BPF instruction uses src/dst registers or not 372 * (for instance, BPF_CALL does not use them). The expectation 373 * is that those instructions will have src_reg/dst_reg set to 374 * 0. Even otherwise, we just lose some prologue/epilogue 375 * optimization but everything else should work without 376 * any issues. 377 */ 378 if (dst_reg >= 3 && dst_reg < 32) { 379 bpf_set_seen_register(ctx, dst_reg); 380 bpf_set_seen_register(ctx, dst_reg_h); 381 } 382 383 if (src_reg >= 3 && src_reg < 32) { 384 bpf_set_seen_register(ctx, src_reg); 385 bpf_set_seen_register(ctx, src_reg_h); 386 } 387 388 switch (code) { 389 /* 390 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG 391 */ 392 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */ 393 EMIT(PPC_RAW_ADD(dst_reg, src2_reg, src_reg)); 394 break; 395 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */ 396 EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, src_reg)); 397 EMIT(PPC_RAW_ADDE(dst_reg_h, src2_reg_h, src_reg_h)); 398 break; 399 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */ 400 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, src_reg)); 401 break; 402 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */ 403 EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, src2_reg)); 404 EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, src2_reg_h)); 405 break; 406 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ 407 imm = -imm; 408 fallthrough; 409 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ 410 if (!imm) { 411 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 412 } else if (IMM_HA(imm) & 0xffff) { 413 EMIT(PPC_RAW_ADDIS(dst_reg, src2_reg, IMM_HA(imm))); 414 src2_reg = dst_reg; 415 } 416 if (IMM_L(imm)) 417 EMIT(PPC_RAW_ADDI(dst_reg, src2_reg, IMM_L(imm))); 418 break; 419 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */ 420 imm = -imm; 421 fallthrough; 422 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */ 423 if (!imm) { 424 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 425 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h)); 426 break; 427 } 428 if (imm >= -32768 && imm < 32768) { 429 EMIT(PPC_RAW_ADDIC(dst_reg, src2_reg, imm)); 430 } else { 431 PPC_LI32(_R0, imm); 432 EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, _R0)); 433 } 434 if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000)) 435 EMIT(PPC_RAW_ADDZE(dst_reg_h, src2_reg_h)); 436 else 437 EMIT(PPC_RAW_ADDME(dst_reg_h, src2_reg_h)); 438 break; 439 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */ 440 bpf_set_seen_register(ctx, tmp_reg); 441 EMIT(PPC_RAW_MULW(_R0, src2_reg, src_reg_h)); 442 EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, src_reg)); 443 EMIT(PPC_RAW_MULHWU(tmp_reg, src2_reg, src_reg)); 444 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg)); 445 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0)); 446 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg)); 447 break; 448 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ 449 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg)); 450 break; 451 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */ 452 if (imm == 1) { 453 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 454 } else if (imm == -1) { 455 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0)); 456 } else if (is_power_of_2((u32)imm)) { 457 EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, ilog2(imm))); 458 } else if (imm >= -32768 && imm < 32768) { 459 EMIT(PPC_RAW_MULI(dst_reg, src2_reg, imm)); 460 } else { 461 PPC_LI32(_R0, imm); 462 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, _R0)); 463 } 464 break; 465 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */ 466 if (!imm) { 467 PPC_LI32(dst_reg, 0); 468 PPC_LI32(dst_reg_h, 0); 469 } else if (imm == 1) { 470 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 471 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h)); 472 } else if (imm == -1) { 473 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0)); 474 EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h)); 475 } else if (imm > 0 && is_power_of_2(imm)) { 476 imm = ilog2(imm); 477 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm)); 478 EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31)); 479 EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm)); 480 } else { 481 bpf_set_seen_register(ctx, tmp_reg); 482 PPC_LI32(tmp_reg, imm); 483 EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, tmp_reg)); 484 if (imm < 0) 485 EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, src2_reg)); 486 EMIT(PPC_RAW_MULHWU(_R0, src2_reg, tmp_reg)); 487 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, tmp_reg)); 488 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0)); 489 } 490 break; 491 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ 492 if (off) 493 EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, src_reg)); 494 else 495 EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg)); 496 break; 497 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ 498 if (off) 499 EMIT(PPC_RAW_DIVW(_R0, src2_reg, src_reg)); 500 else 501 EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg)); 502 EMIT(PPC_RAW_MULW(_R0, src_reg, _R0)); 503 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0)); 504 break; 505 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ 506 return -EOPNOTSUPP; 507 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ 508 return -EOPNOTSUPP; 509 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ 510 if (!imm) 511 return -EINVAL; 512 if (imm == 1) { 513 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 514 } else if (is_power_of_2((u32)imm)) { 515 if (off) 516 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, ilog2(imm))); 517 else 518 EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm))); 519 } else { 520 PPC_LI32(_R0, imm); 521 if (off) 522 EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, _R0)); 523 else 524 EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0)); 525 } 526 break; 527 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ 528 if (!imm) 529 return -EINVAL; 530 531 if (!is_power_of_2((u32)imm)) { 532 bpf_set_seen_register(ctx, tmp_reg); 533 PPC_LI32(tmp_reg, imm); 534 if (off) 535 EMIT(PPC_RAW_DIVW(_R0, src2_reg, tmp_reg)); 536 else 537 EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg)); 538 EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0)); 539 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0)); 540 } else if (imm == 1) { 541 EMIT(PPC_RAW_LI(dst_reg, 0)); 542 } else if (off) { 543 EMIT(PPC_RAW_SRAWI(_R0, src2_reg, ilog2(imm))); 544 EMIT(PPC_RAW_ADDZE(_R0, _R0)); 545 EMIT(PPC_RAW_SLWI(_R0, _R0, ilog2(imm))); 546 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0)); 547 } else { 548 imm = ilog2((u32)imm); 549 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31)); 550 } 551 break; 552 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */ 553 if (!imm) 554 return -EINVAL; 555 if (imm < 0) 556 imm = -imm; 557 if (!is_power_of_2(imm)) 558 return -EOPNOTSUPP; 559 if (imm == 1) { 560 EMIT(PPC_RAW_LI(dst_reg, 0)); 561 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 562 } else if (off) { 563 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31)); 564 EMIT(PPC_RAW_XOR(dst_reg, src2_reg, dst_reg_h)); 565 EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg)); 566 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31)); 567 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, dst_reg_h)); 568 EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg)); 569 EMIT(PPC_RAW_SUBFE(dst_reg_h, dst_reg_h, dst_reg_h)); 570 } else { 571 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31)); 572 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 573 } 574 break; 575 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ 576 if (!imm) 577 return -EINVAL; 578 if (!is_power_of_2(abs(imm))) 579 return -EOPNOTSUPP; 580 581 if (imm < 0) { 582 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0)); 583 EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h)); 584 imm = -imm; 585 src2_reg = dst_reg; 586 } 587 if (imm == 1) { 588 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 589 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h)); 590 } else { 591 imm = ilog2(imm); 592 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31)); 593 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1)); 594 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm)); 595 } 596 break; 597 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */ 598 EMIT(PPC_RAW_NEG(dst_reg, src2_reg)); 599 break; 600 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 601 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0)); 602 EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h)); 603 break; 604 605 /* 606 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH 607 */ 608 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ 609 EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg)); 610 EMIT(PPC_RAW_AND(dst_reg_h, src2_reg_h, src_reg_h)); 611 break; 612 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */ 613 EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg)); 614 break; 615 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ 616 if (imm >= 0) 617 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 618 fallthrough; 619 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */ 620 if (!IMM_H(imm)) { 621 EMIT(PPC_RAW_ANDI(dst_reg, src2_reg, IMM_L(imm))); 622 } else if (!IMM_L(imm)) { 623 EMIT(PPC_RAW_ANDIS(dst_reg, src2_reg, IMM_H(imm))); 624 } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) { 625 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 626 32 - fls(imm), 32 - ffs(imm))); 627 } else { 628 PPC_LI32(_R0, imm); 629 EMIT(PPC_RAW_AND(dst_reg, src2_reg, _R0)); 630 } 631 break; 632 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ 633 EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg)); 634 EMIT(PPC_RAW_OR(dst_reg_h, src2_reg_h, src_reg_h)); 635 break; 636 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ 637 EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg)); 638 break; 639 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */ 640 /* Sign-extended */ 641 if (imm < 0) 642 EMIT(PPC_RAW_LI(dst_reg_h, -1)); 643 fallthrough; 644 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */ 645 if (IMM_L(imm)) { 646 EMIT(PPC_RAW_ORI(dst_reg, src2_reg, IMM_L(imm))); 647 src2_reg = dst_reg; 648 } 649 if (IMM_H(imm)) 650 EMIT(PPC_RAW_ORIS(dst_reg, src2_reg, IMM_H(imm))); 651 break; 652 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */ 653 if (dst_reg == src_reg) { 654 EMIT(PPC_RAW_LI(dst_reg, 0)); 655 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 656 } else { 657 EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg)); 658 EMIT(PPC_RAW_XOR(dst_reg_h, src2_reg_h, src_reg_h)); 659 } 660 break; 661 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */ 662 if (dst_reg == src_reg) 663 EMIT(PPC_RAW_LI(dst_reg, 0)); 664 else 665 EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg)); 666 break; 667 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */ 668 if (imm < 0) 669 EMIT(PPC_RAW_NOR(dst_reg_h, src2_reg_h, src2_reg_h)); 670 fallthrough; 671 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */ 672 if (IMM_L(imm)) { 673 EMIT(PPC_RAW_XORI(dst_reg, src2_reg, IMM_L(imm))); 674 src2_reg = dst_reg; 675 } 676 if (IMM_H(imm)) 677 EMIT(PPC_RAW_XORIS(dst_reg, src2_reg, IMM_H(imm))); 678 break; 679 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */ 680 EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg)); 681 break; 682 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */ 683 bpf_set_seen_register(ctx, tmp_reg); 684 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); 685 EMIT(PPC_RAW_SLW(dst_reg_h, src2_reg_h, src_reg)); 686 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); 687 EMIT(PPC_RAW_SRW(_R0, src2_reg, _R0)); 688 EMIT(PPC_RAW_SLW(tmp_reg, src2_reg, tmp_reg)); 689 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0)); 690 EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg)); 691 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg)); 692 break; 693 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */ 694 if (imm) 695 EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm)); 696 else 697 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 698 break; 699 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */ 700 if (imm < 0) 701 return -EINVAL; 702 if (!imm) { 703 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 704 } else if (imm < 32) { 705 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm)); 706 EMIT(PPC_RAW_RLWIMI(dst_reg_h, src2_reg, imm, 32 - imm, 31)); 707 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, imm, 0, 31 - imm)); 708 } else if (imm < 64) { 709 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg, imm, 0, 31 - imm)); 710 EMIT(PPC_RAW_LI(dst_reg, 0)); 711 } else { 712 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 713 EMIT(PPC_RAW_LI(dst_reg, 0)); 714 } 715 break; 716 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */ 717 EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg)); 718 break; 719 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */ 720 bpf_set_seen_register(ctx, tmp_reg); 721 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); 722 EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg)); 723 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); 724 EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0)); 725 EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg)); 726 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0)); 727 EMIT(PPC_RAW_SRW(dst_reg_h, src2_reg_h, src_reg)); 728 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg)); 729 break; 730 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */ 731 if (imm) 732 EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, imm)); 733 else 734 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 735 break; 736 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */ 737 if (imm < 0) 738 return -EINVAL; 739 if (!imm) { 740 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 741 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h)); 742 } else if (imm < 32) { 743 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31)); 744 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1)); 745 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, 32 - imm, imm, 31)); 746 } else if (imm < 64) { 747 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg_h, 64 - imm, imm - 32, 31)); 748 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 749 } else { 750 EMIT(PPC_RAW_LI(dst_reg, 0)); 751 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 752 } 753 break; 754 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ 755 EMIT(PPC_RAW_SRAW(dst_reg, src2_reg, src_reg)); 756 break; 757 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ 758 bpf_set_seen_register(ctx, tmp_reg); 759 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); 760 EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg)); 761 EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0)); 762 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); 763 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0)); 764 EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26)); 765 EMIT(PPC_RAW_SRAW(tmp_reg, src2_reg_h, tmp_reg)); 766 EMIT(PPC_RAW_SRAW(dst_reg_h, src2_reg_h, src_reg)); 767 EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0)); 768 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg)); 769 break; 770 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */ 771 if (imm) 772 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, imm)); 773 else 774 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 775 break; 776 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */ 777 if (imm < 0) 778 return -EINVAL; 779 if (!imm) { 780 EMIT(PPC_RAW_MR(dst_reg, src2_reg)); 781 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h)); 782 } else if (imm < 32) { 783 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31)); 784 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1)); 785 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm)); 786 } else if (imm < 64) { 787 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, imm - 32)); 788 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31)); 789 } else { 790 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, 31)); 791 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31)); 792 } 793 break; 794 795 /* 796 * MOV 797 */ 798 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ 799 if (off == 8) { 800 EMIT(PPC_RAW_EXTSB(dst_reg, src_reg)); 801 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31)); 802 } else if (off == 16) { 803 EMIT(PPC_RAW_EXTSH(dst_reg, src_reg)); 804 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31)); 805 } else if (off == 32 && dst_reg == src_reg) { 806 EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31)); 807 } else if (off == 32) { 808 EMIT(PPC_RAW_MR(dst_reg, src_reg)); 809 EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31)); 810 } else if (dst_reg != src_reg) { 811 EMIT(PPC_RAW_MR(dst_reg, src_reg)); 812 EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h)); 813 } 814 break; 815 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ 816 /* special mov32 for zext */ 817 if (imm == 1) 818 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 819 else if (off == 8) 820 EMIT(PPC_RAW_EXTSB(dst_reg, src_reg)); 821 else if (off == 16) 822 EMIT(PPC_RAW_EXTSH(dst_reg, src_reg)); 823 else if (dst_reg != src_reg) 824 EMIT(PPC_RAW_MR(dst_reg, src_reg)); 825 break; 826 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ 827 PPC_LI32(dst_reg, imm); 828 PPC_EX32(dst_reg_h, imm); 829 break; 830 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ 831 PPC_LI32(dst_reg, imm); 832 break; 833 834 /* 835 * BPF_FROM_BE/LE 836 */ 837 case BPF_ALU | BPF_END | BPF_FROM_LE: 838 case BPF_ALU64 | BPF_END | BPF_FROM_LE: 839 switch (imm) { 840 case 16: 841 /* Copy 16 bits to upper part */ 842 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg, 16, 0, 15)); 843 /* Rotate 8 bits right & mask */ 844 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31)); 845 break; 846 case 32: 847 /* 848 * Rotate word left by 8 bits: 849 * 2 bytes are already in their final position 850 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4) 851 */ 852 EMIT(PPC_RAW_RLWINM(_R0, src2_reg, 8, 0, 31)); 853 /* Rotate 24 bits and insert byte 1 */ 854 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 0, 7)); 855 /* Rotate 24 bits and insert byte 3 */ 856 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 16, 23)); 857 EMIT(PPC_RAW_MR(dst_reg, _R0)); 858 break; 859 case 64: 860 bpf_set_seen_register(ctx, tmp_reg); 861 EMIT(PPC_RAW_RLWINM(tmp_reg, src2_reg, 8, 0, 31)); 862 EMIT(PPC_RAW_RLWINM(_R0, src2_reg_h, 8, 0, 31)); 863 /* Rotate 24 bits and insert byte 1 */ 864 EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 0, 7)); 865 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 0, 7)); 866 /* Rotate 24 bits and insert byte 3 */ 867 EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 16, 23)); 868 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 16, 23)); 869 EMIT(PPC_RAW_MR(dst_reg, _R0)); 870 EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg)); 871 break; 872 } 873 if (BPF_CLASS(code) == BPF_ALU64 && imm != 64) 874 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 875 break; 876 case BPF_ALU | BPF_END | BPF_FROM_BE: 877 switch (imm) { 878 case 16: 879 /* zero-extend 16 bits into 32 bits */ 880 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 16, 31)); 881 break; 882 case 32: 883 case 64: 884 /* nop */ 885 break; 886 } 887 break; 888 889 /* 890 * BPF_ST NOSPEC (speculation barrier) 891 */ 892 case BPF_ST | BPF_NOSPEC: 893 break; 894 895 /* 896 * BPF_ST(X) 897 */ 898 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ 899 EMIT(PPC_RAW_STB(src_reg, dst_reg, off)); 900 break; 901 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ 902 PPC_LI32(_R0, imm); 903 EMIT(PPC_RAW_STB(_R0, dst_reg, off)); 904 break; 905 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ 906 EMIT(PPC_RAW_STH(src_reg, dst_reg, off)); 907 break; 908 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ 909 PPC_LI32(_R0, imm); 910 EMIT(PPC_RAW_STH(_R0, dst_reg, off)); 911 break; 912 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ 913 EMIT(PPC_RAW_STW(src_reg, dst_reg, off)); 914 break; 915 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ 916 PPC_LI32(_R0, imm); 917 EMIT(PPC_RAW_STW(_R0, dst_reg, off)); 918 break; 919 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ 920 EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off)); 921 EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4)); 922 break; 923 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ 924 PPC_LI32(_R0, imm); 925 EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4)); 926 PPC_EX32(_R0, imm); 927 EMIT(PPC_RAW_STW(_R0, dst_reg, off)); 928 break; 929 930 /* 931 * BPF_STX ATOMIC (atomic ops) 932 */ 933 case BPF_STX | BPF_ATOMIC | BPF_W: 934 save_reg = _R0; 935 ret_reg = src_reg; 936 937 bpf_set_seen_register(ctx, tmp_reg); 938 bpf_set_seen_register(ctx, ax_reg); 939 940 /* Get offset into TMP_REG */ 941 EMIT(PPC_RAW_LI(tmp_reg, off)); 942 /* 943 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync' 944 * before and after the operation. 945 * 946 * This is a requirement in the Linux Kernel Memory Model. 947 * See __cmpxchg_u32() in asm/cmpxchg.h as an example. 948 */ 949 if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP)) 950 EMIT(PPC_RAW_SYNC()); 951 tmp_idx = ctx->idx * 4; 952 /* load value from memory into r0 */ 953 EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0)); 954 955 /* Save old value in BPF_REG_AX */ 956 if (imm & BPF_FETCH) 957 EMIT(PPC_RAW_MR(ax_reg, _R0)); 958 959 switch (imm) { 960 case BPF_ADD: 961 case BPF_ADD | BPF_FETCH: 962 EMIT(PPC_RAW_ADD(_R0, _R0, src_reg)); 963 break; 964 case BPF_AND: 965 case BPF_AND | BPF_FETCH: 966 EMIT(PPC_RAW_AND(_R0, _R0, src_reg)); 967 break; 968 case BPF_OR: 969 case BPF_OR | BPF_FETCH: 970 EMIT(PPC_RAW_OR(_R0, _R0, src_reg)); 971 break; 972 case BPF_XOR: 973 case BPF_XOR | BPF_FETCH: 974 EMIT(PPC_RAW_XOR(_R0, _R0, src_reg)); 975 break; 976 case BPF_CMPXCHG: 977 /* 978 * Return old value in BPF_REG_0 for BPF_CMPXCHG & 979 * in src_reg for other cases. 980 */ 981 ret_reg = bpf_to_ppc(BPF_REG_0); 982 983 /* Compare with old value in BPF_REG_0 */ 984 EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0)); 985 /* Don't set if different from old value */ 986 PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4); 987 fallthrough; 988 case BPF_XCHG: 989 save_reg = src_reg; 990 break; 991 default: 992 pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n", 993 code, i); 994 return -EOPNOTSUPP; 995 } 996 997 /* store new value */ 998 EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg)); 999 /* we're done if this succeeded */ 1000 PPC_BCC_SHORT(COND_NE, tmp_idx); 1001 1002 /* For the BPF_FETCH variant, get old data into src_reg */ 1003 if (imm & BPF_FETCH) { 1004 /* Emit 'sync' to enforce full ordering */ 1005 if (IS_ENABLED(CONFIG_SMP)) 1006 EMIT(PPC_RAW_SYNC()); 1007 EMIT(PPC_RAW_MR(ret_reg, ax_reg)); 1008 if (!fp->aux->verifier_zext) 1009 EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */ 1010 } 1011 break; 1012 1013 case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */ 1014 return -EOPNOTSUPP; 1015 1016 /* 1017 * BPF_LDX 1018 */ 1019 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ 1020 case BPF_LDX | BPF_MEMSX | BPF_B: 1021 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1022 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: 1023 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ 1024 case BPF_LDX | BPF_MEMSX | BPF_H: 1025 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1026 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: 1027 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ 1028 case BPF_LDX | BPF_MEMSX | BPF_W: 1029 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1030 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: 1031 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ 1032 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1033 /* 1034 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid 1035 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM 1036 * load only if addr is kernel address (see is_kernel_addr()), otherwise 1037 * set dst_reg=0 and move on. 1038 */ 1039 if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) { 1040 PPC_LI32(_R0, TASK_SIZE - off); 1041 EMIT(PPC_RAW_CMPLW(src_reg, _R0)); 1042 PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4); 1043 EMIT(PPC_RAW_LI(dst_reg, 0)); 1044 /* 1045 * For BPF_DW case, "li reg_h,0" would be needed when 1046 * !fp->aux->verifier_zext. Emit NOP otherwise. 1047 * 1048 * Note that "li reg_h,0" is emitted for BPF_B/H/W case, 1049 * if necessary. So, jump there instead of emitting an 1050 * additional "li reg_h,0" instruction. 1051 */ 1052 if (size == BPF_DW && !fp->aux->verifier_zext) 1053 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 1054 else 1055 EMIT(PPC_RAW_NOP()); 1056 /* 1057 * Need to jump two instructions instead of one for BPF_DW case 1058 * as there are two load instructions for dst_reg_h & dst_reg 1059 * respectively. 1060 */ 1061 if (size == BPF_DW || 1062 (size == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) 1063 PPC_JMP((ctx->idx + 3) * 4); 1064 else 1065 PPC_JMP((ctx->idx + 2) * 4); 1066 } 1067 1068 if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) { 1069 switch (size) { 1070 case BPF_B: 1071 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); 1072 EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg)); 1073 break; 1074 case BPF_H: 1075 EMIT(PPC_RAW_LHA(dst_reg, src_reg, off)); 1076 break; 1077 case BPF_W: 1078 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); 1079 break; 1080 } 1081 if (!fp->aux->verifier_zext) 1082 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31)); 1083 1084 } else { 1085 switch (size) { 1086 case BPF_B: 1087 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); 1088 break; 1089 case BPF_H: 1090 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); 1091 break; 1092 case BPF_W: 1093 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); 1094 break; 1095 case BPF_DW: 1096 EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off)); 1097 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4)); 1098 break; 1099 } 1100 if (size != BPF_DW && !fp->aux->verifier_zext) 1101 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 1102 } 1103 1104 if (BPF_MODE(code) == BPF_PROBE_MEM) { 1105 int insn_idx = ctx->idx - 1; 1106 int jmp_off = 4; 1107 1108 /* 1109 * In case of BPF_DW, two lwz instructions are emitted, one 1110 * for higher 32-bit and another for lower 32-bit. So, set 1111 * ex->insn to the first of the two and jump over both 1112 * instructions in fixup. 1113 * 1114 * Similarly, with !verifier_zext, two instructions are 1115 * emitted for BPF_B/H/W case. So, set ex->insn to the 1116 * instruction that could fault and skip over both 1117 * instructions. 1118 */ 1119 if (size == BPF_DW || !fp->aux->verifier_zext) { 1120 insn_idx -= 1; 1121 jmp_off += 4; 1122 } 1123 1124 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx, 1125 jmp_off, dst_reg, code); 1126 if (ret) 1127 return ret; 1128 } 1129 break; 1130 1131 /* 1132 * Doubleword load 1133 * 16 byte instruction that uses two 'struct bpf_insn' 1134 */ 1135 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ 1136 PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm); 1137 PPC_LI32(dst_reg, (u32)insn[i].imm); 1138 /* Adjust for two bpf instructions */ 1139 addrs[++i] = ctx->idx * 4; 1140 break; 1141 1142 /* 1143 * Return/Exit 1144 */ 1145 case BPF_JMP | BPF_EXIT: 1146 /* 1147 * If this isn't the very last instruction, branch to 1148 * the epilogue. If we _are_ the last instruction, 1149 * we'll just fall through to the epilogue. 1150 */ 1151 if (i != flen - 1) { 1152 ret = bpf_jit_emit_exit_insn(image, ctx, _R0, exit_addr); 1153 if (ret) 1154 return ret; 1155 } 1156 /* else fall through to the epilogue */ 1157 break; 1158 1159 /* 1160 * Call kernel helper or bpf function 1161 */ 1162 case BPF_JMP | BPF_CALL: 1163 ctx->seen |= SEEN_FUNC; 1164 1165 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass, 1166 &func_addr, &func_addr_fixed); 1167 if (ret < 0) 1168 return ret; 1169 1170 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) { 1171 EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5) - 1, _R1, 8)); 1172 EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12)); 1173 } 1174 1175 ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr); 1176 if (ret) 1177 return ret; 1178 1179 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0) - 1, _R3)); 1180 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R4)); 1181 break; 1182 1183 /* 1184 * Jumps and branches 1185 */ 1186 case BPF_JMP | BPF_JA: 1187 PPC_JMP(addrs[i + 1 + off]); 1188 break; 1189 case BPF_JMP32 | BPF_JA: 1190 PPC_JMP(addrs[i + 1 + imm]); 1191 break; 1192 1193 case BPF_JMP | BPF_JGT | BPF_K: 1194 case BPF_JMP | BPF_JGT | BPF_X: 1195 case BPF_JMP | BPF_JSGT | BPF_K: 1196 case BPF_JMP | BPF_JSGT | BPF_X: 1197 case BPF_JMP32 | BPF_JGT | BPF_K: 1198 case BPF_JMP32 | BPF_JGT | BPF_X: 1199 case BPF_JMP32 | BPF_JSGT | BPF_K: 1200 case BPF_JMP32 | BPF_JSGT | BPF_X: 1201 true_cond = COND_GT; 1202 goto cond_branch; 1203 case BPF_JMP | BPF_JLT | BPF_K: 1204 case BPF_JMP | BPF_JLT | BPF_X: 1205 case BPF_JMP | BPF_JSLT | BPF_K: 1206 case BPF_JMP | BPF_JSLT | BPF_X: 1207 case BPF_JMP32 | BPF_JLT | BPF_K: 1208 case BPF_JMP32 | BPF_JLT | BPF_X: 1209 case BPF_JMP32 | BPF_JSLT | BPF_K: 1210 case BPF_JMP32 | BPF_JSLT | BPF_X: 1211 true_cond = COND_LT; 1212 goto cond_branch; 1213 case BPF_JMP | BPF_JGE | BPF_K: 1214 case BPF_JMP | BPF_JGE | BPF_X: 1215 case BPF_JMP | BPF_JSGE | BPF_K: 1216 case BPF_JMP | BPF_JSGE | BPF_X: 1217 case BPF_JMP32 | BPF_JGE | BPF_K: 1218 case BPF_JMP32 | BPF_JGE | BPF_X: 1219 case BPF_JMP32 | BPF_JSGE | BPF_K: 1220 case BPF_JMP32 | BPF_JSGE | BPF_X: 1221 true_cond = COND_GE; 1222 goto cond_branch; 1223 case BPF_JMP | BPF_JLE | BPF_K: 1224 case BPF_JMP | BPF_JLE | BPF_X: 1225 case BPF_JMP | BPF_JSLE | BPF_K: 1226 case BPF_JMP | BPF_JSLE | BPF_X: 1227 case BPF_JMP32 | BPF_JLE | BPF_K: 1228 case BPF_JMP32 | BPF_JLE | BPF_X: 1229 case BPF_JMP32 | BPF_JSLE | BPF_K: 1230 case BPF_JMP32 | BPF_JSLE | BPF_X: 1231 true_cond = COND_LE; 1232 goto cond_branch; 1233 case BPF_JMP | BPF_JEQ | BPF_K: 1234 case BPF_JMP | BPF_JEQ | BPF_X: 1235 case BPF_JMP32 | BPF_JEQ | BPF_K: 1236 case BPF_JMP32 | BPF_JEQ | BPF_X: 1237 true_cond = COND_EQ; 1238 goto cond_branch; 1239 case BPF_JMP | BPF_JNE | BPF_K: 1240 case BPF_JMP | BPF_JNE | BPF_X: 1241 case BPF_JMP32 | BPF_JNE | BPF_K: 1242 case BPF_JMP32 | BPF_JNE | BPF_X: 1243 true_cond = COND_NE; 1244 goto cond_branch; 1245 case BPF_JMP | BPF_JSET | BPF_K: 1246 case BPF_JMP | BPF_JSET | BPF_X: 1247 case BPF_JMP32 | BPF_JSET | BPF_K: 1248 case BPF_JMP32 | BPF_JSET | BPF_X: 1249 true_cond = COND_NE; 1250 /* fallthrough; */ 1251 1252 cond_branch: 1253 switch (code) { 1254 case BPF_JMP | BPF_JGT | BPF_X: 1255 case BPF_JMP | BPF_JLT | BPF_X: 1256 case BPF_JMP | BPF_JGE | BPF_X: 1257 case BPF_JMP | BPF_JLE | BPF_X: 1258 case BPF_JMP | BPF_JEQ | BPF_X: 1259 case BPF_JMP | BPF_JNE | BPF_X: 1260 /* unsigned comparison */ 1261 EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h)); 1262 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1263 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); 1264 break; 1265 case BPF_JMP32 | BPF_JGT | BPF_X: 1266 case BPF_JMP32 | BPF_JLT | BPF_X: 1267 case BPF_JMP32 | BPF_JGE | BPF_X: 1268 case BPF_JMP32 | BPF_JLE | BPF_X: 1269 case BPF_JMP32 | BPF_JEQ | BPF_X: 1270 case BPF_JMP32 | BPF_JNE | BPF_X: 1271 /* unsigned comparison */ 1272 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); 1273 break; 1274 case BPF_JMP | BPF_JSGT | BPF_X: 1275 case BPF_JMP | BPF_JSLT | BPF_X: 1276 case BPF_JMP | BPF_JSGE | BPF_X: 1277 case BPF_JMP | BPF_JSLE | BPF_X: 1278 /* signed comparison */ 1279 EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h)); 1280 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1281 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); 1282 break; 1283 case BPF_JMP32 | BPF_JSGT | BPF_X: 1284 case BPF_JMP32 | BPF_JSLT | BPF_X: 1285 case BPF_JMP32 | BPF_JSGE | BPF_X: 1286 case BPF_JMP32 | BPF_JSLE | BPF_X: 1287 /* signed comparison */ 1288 EMIT(PPC_RAW_CMPW(dst_reg, src_reg)); 1289 break; 1290 case BPF_JMP | BPF_JSET | BPF_X: 1291 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h)); 1292 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1293 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg)); 1294 break; 1295 case BPF_JMP32 | BPF_JSET | BPF_X: { 1296 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg)); 1297 break; 1298 case BPF_JMP | BPF_JNE | BPF_K: 1299 case BPF_JMP | BPF_JEQ | BPF_K: 1300 case BPF_JMP | BPF_JGT | BPF_K: 1301 case BPF_JMP | BPF_JLT | BPF_K: 1302 case BPF_JMP | BPF_JGE | BPF_K: 1303 case BPF_JMP | BPF_JLE | BPF_K: 1304 /* 1305 * Need sign-extended load, so only positive 1306 * values can be used as imm in cmplwi 1307 */ 1308 if (imm >= 0 && imm < 32768) { 1309 EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0)); 1310 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1311 EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); 1312 } else { 1313 /* sign-extending load ... but unsigned comparison */ 1314 PPC_EX32(_R0, imm); 1315 EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0)); 1316 PPC_LI32(_R0, imm); 1317 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1318 EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); 1319 } 1320 break; 1321 case BPF_JMP32 | BPF_JNE | BPF_K: 1322 case BPF_JMP32 | BPF_JEQ | BPF_K: 1323 case BPF_JMP32 | BPF_JGT | BPF_K: 1324 case BPF_JMP32 | BPF_JLT | BPF_K: 1325 case BPF_JMP32 | BPF_JGE | BPF_K: 1326 case BPF_JMP32 | BPF_JLE | BPF_K: 1327 if (imm >= 0 && imm < 65536) { 1328 EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); 1329 } else { 1330 PPC_LI32(_R0, imm); 1331 EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); 1332 } 1333 break; 1334 } 1335 case BPF_JMP | BPF_JSGT | BPF_K: 1336 case BPF_JMP | BPF_JSLT | BPF_K: 1337 case BPF_JMP | BPF_JSGE | BPF_K: 1338 case BPF_JMP | BPF_JSLE | BPF_K: 1339 if (imm >= 0 && imm < 65536) { 1340 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0)); 1341 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1342 EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); 1343 } else { 1344 /* sign-extending load */ 1345 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0)); 1346 PPC_LI32(_R0, imm); 1347 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1348 EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); 1349 } 1350 break; 1351 case BPF_JMP32 | BPF_JSGT | BPF_K: 1352 case BPF_JMP32 | BPF_JSLT | BPF_K: 1353 case BPF_JMP32 | BPF_JSGE | BPF_K: 1354 case BPF_JMP32 | BPF_JSLE | BPF_K: 1355 /* 1356 * signed comparison, so any 16-bit value 1357 * can be used in cmpwi 1358 */ 1359 if (imm >= -32768 && imm < 32768) { 1360 EMIT(PPC_RAW_CMPWI(dst_reg, imm)); 1361 } else { 1362 /* sign-extending load */ 1363 PPC_LI32(_R0, imm); 1364 EMIT(PPC_RAW_CMPW(dst_reg, _R0)); 1365 } 1366 break; 1367 case BPF_JMP | BPF_JSET | BPF_K: 1368 /* andi does not sign-extend the immediate */ 1369 if (imm >= 0 && imm < 32768) { 1370 /* PPC_ANDI is _only/always_ dot-form */ 1371 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm)); 1372 } else { 1373 PPC_LI32(_R0, imm); 1374 if (imm < 0) { 1375 EMIT(PPC_RAW_CMPWI(dst_reg_h, 0)); 1376 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1377 } 1378 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0)); 1379 } 1380 break; 1381 case BPF_JMP32 | BPF_JSET | BPF_K: 1382 /* andi does not sign-extend the immediate */ 1383 if (imm >= 0 && imm < 32768) { 1384 /* PPC_ANDI is _only/always_ dot-form */ 1385 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm)); 1386 } else { 1387 PPC_LI32(_R0, imm); 1388 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0)); 1389 } 1390 break; 1391 } 1392 PPC_BCC(true_cond, addrs[i + 1 + off]); 1393 break; 1394 1395 /* 1396 * Tail call 1397 */ 1398 case BPF_JMP | BPF_TAIL_CALL: 1399 ctx->seen |= SEEN_TAILCALL; 1400 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); 1401 if (ret < 0) 1402 return ret; 1403 break; 1404 1405 default: 1406 /* 1407 * The filter contains something cruel & unusual. 1408 * We don't handle it, but also there shouldn't be 1409 * anything missing from our list. 1410 */ 1411 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i); 1412 return -EOPNOTSUPP; 1413 } 1414 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext && 1415 !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64)) 1416 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 1417 } 1418 1419 /* Set end-of-body-code address for exit. */ 1420 addrs[i] = ctx->idx * 4; 1421 1422 return 0; 1423 } 1424