1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * bpf_jit_comp64.c: eBPF JIT compiler 4 * 5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> 6 * IBM Corporation 7 * 8 * Based on the powerpc classic BPF JIT compiler by Matt Evans 9 */ 10 #include <linux/moduleloader.h> 11 #include <asm/cacheflush.h> 12 #include <asm/asm-compat.h> 13 #include <linux/netdevice.h> 14 #include <linux/filter.h> 15 #include <linux/if_vlan.h> 16 #include <asm/kprobes.h> 17 #include <linux/bpf.h> 18 #include <asm/security_features.h> 19 20 #include "bpf_jit.h" 21 22 /* 23 * Stack layout: 24 * Ensure the top half (upto local_tmp_var) stays consistent 25 * with our redzone usage. 26 * 27 * [ prev sp ] <------------- 28 * [ nv gpr save area ] 5*8 | 29 * [ tail_call_cnt ] 8 | 30 * [ local_tmp_var ] 16 | 31 * fp (r31) --> [ ebpf stack space ] upto 512 | 32 * [ frame header ] 32/112 | 33 * sp (r1) ---> [ stack pointer ] -------------- 34 */ 35 36 /* for gpr non volatile registers BPG_REG_6 to 10 */ 37 #define BPF_PPC_STACK_SAVE (5*8) 38 /* for bpf JIT code internal usage */ 39 #define BPF_PPC_STACK_LOCALS 24 40 /* stack frame excluding BPF stack, ensure this is quadword aligned */ 41 #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ 42 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE) 43 44 /* BPF register usage */ 45 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) 46 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) 47 48 /* BPF to ppc register mappings */ 49 void bpf_jit_init_reg_mapping(struct codegen_context *ctx) 50 { 51 /* function return value */ 52 ctx->b2p[BPF_REG_0] = _R8; 53 /* function arguments */ 54 ctx->b2p[BPF_REG_1] = _R3; 55 ctx->b2p[BPF_REG_2] = _R4; 56 ctx->b2p[BPF_REG_3] = _R5; 57 ctx->b2p[BPF_REG_4] = _R6; 58 ctx->b2p[BPF_REG_5] = _R7; 59 /* non volatile registers */ 60 ctx->b2p[BPF_REG_6] = _R27; 61 ctx->b2p[BPF_REG_7] = _R28; 62 ctx->b2p[BPF_REG_8] = _R29; 63 ctx->b2p[BPF_REG_9] = _R30; 64 /* frame pointer aka BPF_REG_10 */ 65 ctx->b2p[BPF_REG_FP] = _R31; 66 /* eBPF jit internal registers */ 67 ctx->b2p[BPF_REG_AX] = _R12; 68 ctx->b2p[TMP_REG_1] = _R9; 69 ctx->b2p[TMP_REG_2] = _R10; 70 } 71 72 /* PPC NVR range -- update this if we ever use NVRs below r27 */ 73 #define BPF_PPC_NVR_MIN _R27 74 75 static inline bool bpf_has_stack_frame(struct codegen_context *ctx) 76 { 77 /* 78 * We only need a stack frame if: 79 * - we call other functions (kernel helpers), or 80 * - the bpf program uses its stack area 81 * The latter condition is deduced from the usage of BPF_REG_FP 82 */ 83 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)); 84 } 85 86 /* 87 * When not setting up our own stackframe, the redzone usage is: 88 * 89 * [ prev sp ] <------------- 90 * [ ... ] | 91 * sp (r1) ---> [ stack pointer ] -------------- 92 * [ nv gpr save area ] 5*8 93 * [ tail_call_cnt ] 8 94 * [ local_tmp_var ] 16 95 * [ unused red zone ] 208 bytes protected 96 */ 97 static int bpf_jit_stack_local(struct codegen_context *ctx) 98 { 99 if (bpf_has_stack_frame(ctx)) 100 return STACK_FRAME_MIN_SIZE + ctx->stack_size; 101 else 102 return -(BPF_PPC_STACK_SAVE + 24); 103 } 104 105 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) 106 { 107 return bpf_jit_stack_local(ctx) + 16; 108 } 109 110 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) 111 { 112 if (reg >= BPF_PPC_NVR_MIN && reg < 32) 113 return (bpf_has_stack_frame(ctx) ? 114 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0) 115 - (8 * (32 - reg)); 116 117 pr_err("BPF JIT is asking about unknown registers"); 118 BUG(); 119 } 120 121 void bpf_jit_realloc_regs(struct codegen_context *ctx) 122 { 123 } 124 125 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) 126 { 127 int i; 128 129 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) 130 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc))); 131 132 /* 133 * Initialize tail_call_cnt if we do tail calls. 134 * Otherwise, put in NOPs so that it can be skipped when we are 135 * invoked through a tail call. 136 */ 137 if (ctx->seen & SEEN_TAILCALL) { 138 EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0)); 139 /* this goes in the redzone */ 140 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8))); 141 } else { 142 EMIT(PPC_RAW_NOP()); 143 EMIT(PPC_RAW_NOP()); 144 } 145 146 if (bpf_has_stack_frame(ctx)) { 147 /* 148 * We need a stack frame, but we don't necessarily need to 149 * save/restore LR unless we call other functions 150 */ 151 if (ctx->seen & SEEN_FUNC) { 152 EMIT(PPC_RAW_MFLR(_R0)); 153 EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)); 154 } 155 156 EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size))); 157 } 158 159 /* 160 * Back up non-volatile regs -- BPF registers 6-10 161 * If we haven't created our own stack frame, we save these 162 * in the protected zone below the previous stack frame 163 */ 164 for (i = BPF_REG_6; i <= BPF_REG_10; i++) 165 if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) 166 EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); 167 168 /* Setup frame pointer to point to the bpf stack area */ 169 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) 170 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, 171 STACK_FRAME_MIN_SIZE + ctx->stack_size)); 172 } 173 174 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) 175 { 176 int i; 177 178 /* Restore NVRs */ 179 for (i = BPF_REG_6; i <= BPF_REG_10; i++) 180 if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) 181 EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); 182 183 /* Tear down our stack frame */ 184 if (bpf_has_stack_frame(ctx)) { 185 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size)); 186 if (ctx->seen & SEEN_FUNC) { 187 EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF)); 188 EMIT(PPC_RAW_MTLR(_R0)); 189 } 190 } 191 } 192 193 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) 194 { 195 bpf_jit_emit_common_epilogue(image, ctx); 196 197 /* Move result to r3 */ 198 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0))); 199 200 EMIT(PPC_RAW_BLR()); 201 } 202 203 static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func) 204 { 205 unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0; 206 long reladdr; 207 208 if (WARN_ON_ONCE(!core_kernel_text(func_addr))) 209 return -EINVAL; 210 211 reladdr = func_addr - kernel_toc_addr(); 212 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { 213 pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func); 214 return -ERANGE; 215 } 216 217 EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr))); 218 EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr))); 219 EMIT(PPC_RAW_MTCTR(_R12)); 220 EMIT(PPC_RAW_BCTRL()); 221 222 return 0; 223 } 224 225 int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func) 226 { 227 unsigned int i, ctx_idx = ctx->idx; 228 229 if (WARN_ON_ONCE(func && is_module_text_address(func))) 230 return -EINVAL; 231 232 /* skip past descriptor if elf v1 */ 233 func += FUNCTION_DESCR_SIZE; 234 235 /* Load function address into r12 */ 236 PPC_LI64(_R12, func); 237 238 /* For bpf-to-bpf function calls, the callee's address is unknown 239 * until the last extra pass. As seen above, we use PPC_LI64() to 240 * load the callee's address, but this may optimize the number of 241 * instructions required based on the nature of the address. 242 * 243 * Since we don't want the number of instructions emitted to increase, 244 * we pad the optimized PPC_LI64() call with NOPs to guarantee that 245 * we always have a five-instruction sequence, which is the maximum 246 * that PPC_LI64() can emit. 247 */ 248 if (!image) 249 for (i = ctx->idx - ctx_idx; i < 5; i++) 250 EMIT(PPC_RAW_NOP()); 251 252 EMIT(PPC_RAW_MTCTR(_R12)); 253 EMIT(PPC_RAW_BCTRL()); 254 255 return 0; 256 } 257 258 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) 259 { 260 /* 261 * By now, the eBPF program has already setup parameters in r3, r4 and r5 262 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program 263 * r4/BPF_REG_2 - pointer to bpf_array 264 * r5/BPF_REG_3 - index in bpf_array 265 */ 266 int b2p_bpf_array = bpf_to_ppc(BPF_REG_2); 267 int b2p_index = bpf_to_ppc(BPF_REG_3); 268 int bpf_tailcall_prologue_size = 8; 269 270 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) 271 bpf_tailcall_prologue_size += 4; /* skip past the toc load */ 272 273 /* 274 * if (index >= array->map.max_entries) 275 * goto out; 276 */ 277 EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries))); 278 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31)); 279 EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1))); 280 PPC_BCC_SHORT(COND_GE, out); 281 282 /* 283 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) 284 * goto out; 285 */ 286 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx))); 287 EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT)); 288 PPC_BCC_SHORT(COND_GE, out); 289 290 /* 291 * tail_call_cnt++; 292 */ 293 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1)); 294 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx))); 295 296 /* prog = array->ptrs[index]; */ 297 EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8)); 298 EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array)); 299 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs))); 300 301 /* 302 * if (prog == NULL) 303 * goto out; 304 */ 305 EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0)); 306 PPC_BCC_SHORT(COND_EQ, out); 307 308 /* goto *(prog->bpf_func + prologue_size); */ 309 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func))); 310 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 311 FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size)); 312 EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1))); 313 314 /* tear down stack, restore NVRs, ... */ 315 bpf_jit_emit_common_epilogue(image, ctx); 316 317 EMIT(PPC_RAW_BCTR()); 318 319 /* out: */ 320 return 0; 321 } 322 323 /* 324 * We spill into the redzone always, even if the bpf program has its own stackframe. 325 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local() 326 */ 327 void bpf_stf_barrier(void); 328 329 asm ( 330 " .global bpf_stf_barrier ;" 331 " bpf_stf_barrier: ;" 332 " std 21,-64(1) ;" 333 " std 22,-56(1) ;" 334 " sync ;" 335 " ld 21,-64(1) ;" 336 " ld 22,-56(1) ;" 337 " ori 31,31,0 ;" 338 " .rept 14 ;" 339 " b 1f ;" 340 " 1: ;" 341 " .endr ;" 342 " blr ;" 343 ); 344 345 /* Assemble the body code between the prologue & epilogue */ 346 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, 347 u32 *addrs, int pass, bool extra_pass) 348 { 349 enum stf_barrier_type stf_barrier = stf_barrier_type_get(); 350 const struct bpf_insn *insn = fp->insnsi; 351 int flen = fp->len; 352 int i, ret; 353 354 /* Start of epilogue code - will only be valid 2nd pass onwards */ 355 u32 exit_addr = addrs[flen]; 356 357 for (i = 0; i < flen; i++) { 358 u32 code = insn[i].code; 359 u32 dst_reg = bpf_to_ppc(insn[i].dst_reg); 360 u32 src_reg = bpf_to_ppc(insn[i].src_reg); 361 u32 size = BPF_SIZE(code); 362 u32 tmp1_reg = bpf_to_ppc(TMP_REG_1); 363 u32 tmp2_reg = bpf_to_ppc(TMP_REG_2); 364 u32 save_reg, ret_reg; 365 s16 off = insn[i].off; 366 s32 imm = insn[i].imm; 367 bool func_addr_fixed; 368 u64 func_addr; 369 u64 imm64; 370 u32 true_cond; 371 u32 tmp_idx; 372 int j; 373 374 /* 375 * addrs[] maps a BPF bytecode address into a real offset from 376 * the start of the body code. 377 */ 378 addrs[i] = ctx->idx * 4; 379 380 /* 381 * As an optimization, we note down which non-volatile registers 382 * are used so that we can only save/restore those in our 383 * prologue and epilogue. We do this here regardless of whether 384 * the actual BPF instruction uses src/dst registers or not 385 * (for instance, BPF_CALL does not use them). The expectation 386 * is that those instructions will have src_reg/dst_reg set to 387 * 0. Even otherwise, we just lose some prologue/epilogue 388 * optimization but everything else should work without 389 * any issues. 390 */ 391 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32) 392 bpf_set_seen_register(ctx, dst_reg); 393 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32) 394 bpf_set_seen_register(ctx, src_reg); 395 396 switch (code) { 397 /* 398 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG 399 */ 400 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */ 401 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */ 402 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg)); 403 goto bpf_alu32_trunc; 404 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */ 405 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */ 406 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg)); 407 goto bpf_alu32_trunc; 408 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ 409 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */ 410 if (!imm) { 411 goto bpf_alu32_trunc; 412 } else if (imm >= -32768 && imm < 32768) { 413 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm))); 414 } else { 415 PPC_LI32(tmp1_reg, imm); 416 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg)); 417 } 418 goto bpf_alu32_trunc; 419 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ 420 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */ 421 if (!imm) { 422 goto bpf_alu32_trunc; 423 } else if (imm > -32768 && imm <= 32768) { 424 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm))); 425 } else { 426 PPC_LI32(tmp1_reg, imm); 427 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); 428 } 429 goto bpf_alu32_trunc; 430 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ 431 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */ 432 if (BPF_CLASS(code) == BPF_ALU) 433 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg)); 434 else 435 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg)); 436 goto bpf_alu32_trunc; 437 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */ 438 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */ 439 if (imm >= -32768 && imm < 32768) 440 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm))); 441 else { 442 PPC_LI32(tmp1_reg, imm); 443 if (BPF_CLASS(code) == BPF_ALU) 444 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg)); 445 else 446 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg)); 447 } 448 goto bpf_alu32_trunc; 449 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ 450 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ 451 if (BPF_OP(code) == BPF_MOD) { 452 EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg)); 453 EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg)); 454 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); 455 } else 456 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg)); 457 goto bpf_alu32_trunc; 458 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ 459 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ 460 if (BPF_OP(code) == BPF_MOD) { 461 EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg)); 462 EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg)); 463 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); 464 } else 465 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg)); 466 break; 467 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ 468 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ 469 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */ 470 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ 471 if (imm == 0) 472 return -EINVAL; 473 if (imm == 1) { 474 if (BPF_OP(code) == BPF_DIV) { 475 goto bpf_alu32_trunc; 476 } else { 477 EMIT(PPC_RAW_LI(dst_reg, 0)); 478 break; 479 } 480 } 481 482 PPC_LI32(tmp1_reg, imm); 483 switch (BPF_CLASS(code)) { 484 case BPF_ALU: 485 if (BPF_OP(code) == BPF_MOD) { 486 EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg)); 487 EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg)); 488 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); 489 } else 490 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg)); 491 break; 492 case BPF_ALU64: 493 if (BPF_OP(code) == BPF_MOD) { 494 EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg)); 495 EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg)); 496 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); 497 } else 498 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg)); 499 break; 500 } 501 goto bpf_alu32_trunc; 502 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */ 503 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 504 EMIT(PPC_RAW_NEG(dst_reg, dst_reg)); 505 goto bpf_alu32_trunc; 506 507 /* 508 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH 509 */ 510 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */ 511 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ 512 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg)); 513 goto bpf_alu32_trunc; 514 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */ 515 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ 516 if (!IMM_H(imm)) 517 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm))); 518 else { 519 /* Sign-extended */ 520 PPC_LI32(tmp1_reg, imm); 521 EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg)); 522 } 523 goto bpf_alu32_trunc; 524 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ 525 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ 526 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg)); 527 goto bpf_alu32_trunc; 528 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */ 529 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */ 530 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) { 531 /* Sign-extended */ 532 PPC_LI32(tmp1_reg, imm); 533 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg)); 534 } else { 535 if (IMM_L(imm)) 536 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm))); 537 if (IMM_H(imm)) 538 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm))); 539 } 540 goto bpf_alu32_trunc; 541 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */ 542 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */ 543 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg)); 544 goto bpf_alu32_trunc; 545 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */ 546 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */ 547 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) { 548 /* Sign-extended */ 549 PPC_LI32(tmp1_reg, imm); 550 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg)); 551 } else { 552 if (IMM_L(imm)) 553 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm))); 554 if (IMM_H(imm)) 555 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm))); 556 } 557 goto bpf_alu32_trunc; 558 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */ 559 /* slw clears top 32 bits */ 560 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg)); 561 /* skip zero extension move, but set address map. */ 562 if (insn_is_zext(&insn[i + 1])) 563 addrs[++i] = ctx->idx * 4; 564 break; 565 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */ 566 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg)); 567 break; 568 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */ 569 /* with imm 0, we still need to clear top 32 bits */ 570 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm)); 571 if (insn_is_zext(&insn[i + 1])) 572 addrs[++i] = ctx->idx * 4; 573 break; 574 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */ 575 if (imm != 0) 576 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm)); 577 break; 578 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */ 579 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); 580 if (insn_is_zext(&insn[i + 1])) 581 addrs[++i] = ctx->idx * 4; 582 break; 583 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */ 584 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg)); 585 break; 586 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */ 587 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm)); 588 if (insn_is_zext(&insn[i + 1])) 589 addrs[++i] = ctx->idx * 4; 590 break; 591 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */ 592 if (imm != 0) 593 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm)); 594 break; 595 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ 596 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg)); 597 goto bpf_alu32_trunc; 598 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ 599 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg)); 600 break; 601 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */ 602 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm)); 603 goto bpf_alu32_trunc; 604 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */ 605 if (imm != 0) 606 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm)); 607 break; 608 609 /* 610 * MOV 611 */ 612 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ 613 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ 614 if (imm == 1) { 615 /* special mov32 for zext */ 616 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31)); 617 break; 618 } 619 EMIT(PPC_RAW_MR(dst_reg, src_reg)); 620 goto bpf_alu32_trunc; 621 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ 622 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ 623 PPC_LI32(dst_reg, imm); 624 if (imm < 0) 625 goto bpf_alu32_trunc; 626 else if (insn_is_zext(&insn[i + 1])) 627 addrs[++i] = ctx->idx * 4; 628 break; 629 630 bpf_alu32_trunc: 631 /* Truncate to 32-bits */ 632 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext) 633 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31)); 634 break; 635 636 /* 637 * BPF_FROM_BE/LE 638 */ 639 case BPF_ALU | BPF_END | BPF_FROM_LE: 640 case BPF_ALU | BPF_END | BPF_FROM_BE: 641 #ifdef __BIG_ENDIAN__ 642 if (BPF_SRC(code) == BPF_FROM_BE) 643 goto emit_clear; 644 #else /* !__BIG_ENDIAN__ */ 645 if (BPF_SRC(code) == BPF_FROM_LE) 646 goto emit_clear; 647 #endif 648 switch (imm) { 649 case 16: 650 /* Rotate 8 bits left & mask with 0x0000ff00 */ 651 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23)); 652 /* Rotate 8 bits right & insert LSB to reg */ 653 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31)); 654 /* Move result back to dst_reg */ 655 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg)); 656 break; 657 case 32: 658 /* 659 * Rotate word left by 8 bits: 660 * 2 bytes are already in their final position 661 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4) 662 */ 663 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31)); 664 /* Rotate 24 bits and insert byte 1 */ 665 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7)); 666 /* Rotate 24 bits and insert byte 3 */ 667 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23)); 668 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg)); 669 break; 670 case 64: 671 /* Store the value to stack and then use byte-reverse loads */ 672 EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx))); 673 EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx))); 674 if (cpu_has_feature(CPU_FTR_ARCH_206)) { 675 EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg)); 676 } else { 677 EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg)); 678 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) 679 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32)); 680 EMIT(PPC_RAW_LI(tmp2_reg, 4)); 681 EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg)); 682 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 683 EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32)); 684 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg)); 685 } 686 break; 687 } 688 break; 689 690 emit_clear: 691 switch (imm) { 692 case 16: 693 /* zero-extend 16 bits into 64 bits */ 694 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48)); 695 if (insn_is_zext(&insn[i + 1])) 696 addrs[++i] = ctx->idx * 4; 697 break; 698 case 32: 699 if (!fp->aux->verifier_zext) 700 /* zero-extend 32 bits into 64 bits */ 701 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32)); 702 break; 703 case 64: 704 /* nop */ 705 break; 706 } 707 break; 708 709 /* 710 * BPF_ST NOSPEC (speculation barrier) 711 */ 712 case BPF_ST | BPF_NOSPEC: 713 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) || 714 !security_ftr_enabled(SEC_FTR_STF_BARRIER)) 715 break; 716 717 switch (stf_barrier) { 718 case STF_BARRIER_EIEIO: 719 EMIT(PPC_RAW_EIEIO() | 0x02000000); 720 break; 721 case STF_BARRIER_SYNC_ORI: 722 EMIT(PPC_RAW_SYNC()); 723 EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0)); 724 EMIT(PPC_RAW_ORI(_R31, _R31, 0)); 725 break; 726 case STF_BARRIER_FALLBACK: 727 ctx->seen |= SEEN_FUNC; 728 PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier)); 729 EMIT(PPC_RAW_MTCTR(_R12)); 730 EMIT(PPC_RAW_BCTRL()); 731 break; 732 case STF_BARRIER_NONE: 733 break; 734 } 735 break; 736 737 /* 738 * BPF_ST(X) 739 */ 740 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ 741 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ 742 if (BPF_CLASS(code) == BPF_ST) { 743 EMIT(PPC_RAW_LI(tmp1_reg, imm)); 744 src_reg = tmp1_reg; 745 } 746 EMIT(PPC_RAW_STB(src_reg, dst_reg, off)); 747 break; 748 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ 749 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ 750 if (BPF_CLASS(code) == BPF_ST) { 751 EMIT(PPC_RAW_LI(tmp1_reg, imm)); 752 src_reg = tmp1_reg; 753 } 754 EMIT(PPC_RAW_STH(src_reg, dst_reg, off)); 755 break; 756 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ 757 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ 758 if (BPF_CLASS(code) == BPF_ST) { 759 PPC_LI32(tmp1_reg, imm); 760 src_reg = tmp1_reg; 761 } 762 EMIT(PPC_RAW_STW(src_reg, dst_reg, off)); 763 break; 764 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ 765 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ 766 if (BPF_CLASS(code) == BPF_ST) { 767 PPC_LI32(tmp1_reg, imm); 768 src_reg = tmp1_reg; 769 } 770 if (off % 4) { 771 EMIT(PPC_RAW_LI(tmp2_reg, off)); 772 EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg)); 773 } else { 774 EMIT(PPC_RAW_STD(src_reg, dst_reg, off)); 775 } 776 break; 777 778 /* 779 * BPF_STX ATOMIC (atomic ops) 780 */ 781 case BPF_STX | BPF_ATOMIC | BPF_W: 782 case BPF_STX | BPF_ATOMIC | BPF_DW: 783 save_reg = tmp2_reg; 784 ret_reg = src_reg; 785 786 /* Get offset into TMP_REG_1 */ 787 EMIT(PPC_RAW_LI(tmp1_reg, off)); 788 tmp_idx = ctx->idx * 4; 789 /* load value from memory into TMP_REG_2 */ 790 if (size == BPF_DW) 791 EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0)); 792 else 793 EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0)); 794 795 /* Save old value in _R0 */ 796 if (imm & BPF_FETCH) 797 EMIT(PPC_RAW_MR(_R0, tmp2_reg)); 798 799 switch (imm) { 800 case BPF_ADD: 801 case BPF_ADD | BPF_FETCH: 802 EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg)); 803 break; 804 case BPF_AND: 805 case BPF_AND | BPF_FETCH: 806 EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg)); 807 break; 808 case BPF_OR: 809 case BPF_OR | BPF_FETCH: 810 EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg)); 811 break; 812 case BPF_XOR: 813 case BPF_XOR | BPF_FETCH: 814 EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg)); 815 break; 816 case BPF_CMPXCHG: 817 /* 818 * Return old value in BPF_REG_0 for BPF_CMPXCHG & 819 * in src_reg for other cases. 820 */ 821 ret_reg = bpf_to_ppc(BPF_REG_0); 822 823 /* Compare with old value in BPF_R0 */ 824 if (size == BPF_DW) 825 EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg)); 826 else 827 EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg)); 828 /* Don't set if different from old value */ 829 PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4); 830 fallthrough; 831 case BPF_XCHG: 832 save_reg = src_reg; 833 break; 834 default: 835 pr_err_ratelimited( 836 "eBPF filter atomic op code %02x (@%d) unsupported\n", 837 code, i); 838 return -EOPNOTSUPP; 839 } 840 841 /* store new value */ 842 if (size == BPF_DW) 843 EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg)); 844 else 845 EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg)); 846 /* we're done if this succeeded */ 847 PPC_BCC_SHORT(COND_NE, tmp_idx); 848 849 if (imm & BPF_FETCH) { 850 EMIT(PPC_RAW_MR(ret_reg, _R0)); 851 /* 852 * Skip unnecessary zero-extension for 32-bit cmpxchg. 853 * For context, see commit 39491867ace5. 854 */ 855 if (size != BPF_DW && imm == BPF_CMPXCHG && 856 insn_is_zext(&insn[i + 1])) 857 addrs[++i] = ctx->idx * 4; 858 } 859 break; 860 861 /* 862 * BPF_LDX 863 */ 864 /* dst = *(u8 *)(ul) (src + off) */ 865 case BPF_LDX | BPF_MEM | BPF_B: 866 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 867 /* dst = *(u16 *)(ul) (src + off) */ 868 case BPF_LDX | BPF_MEM | BPF_H: 869 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 870 /* dst = *(u32 *)(ul) (src + off) */ 871 case BPF_LDX | BPF_MEM | BPF_W: 872 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 873 /* dst = *(u64 *)(ul) (src + off) */ 874 case BPF_LDX | BPF_MEM | BPF_DW: 875 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 876 /* 877 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid 878 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM 879 * load only if addr is kernel address (see is_kernel_addr()), otherwise 880 * set dst_reg=0 and move on. 881 */ 882 if (BPF_MODE(code) == BPF_PROBE_MEM) { 883 EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off)); 884 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) 885 PPC_LI64(tmp2_reg, 0x8000000000000000ul); 886 else /* BOOK3S_64 */ 887 PPC_LI64(tmp2_reg, PAGE_OFFSET); 888 EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg)); 889 PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4); 890 EMIT(PPC_RAW_LI(dst_reg, 0)); 891 /* 892 * Check if 'off' is word aligned for BPF_DW, because 893 * we might generate two instructions. 894 */ 895 if (BPF_SIZE(code) == BPF_DW && (off & 3)) 896 PPC_JMP((ctx->idx + 3) * 4); 897 else 898 PPC_JMP((ctx->idx + 2) * 4); 899 } 900 901 switch (size) { 902 case BPF_B: 903 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); 904 break; 905 case BPF_H: 906 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); 907 break; 908 case BPF_W: 909 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); 910 break; 911 case BPF_DW: 912 if (off % 4) { 913 EMIT(PPC_RAW_LI(tmp1_reg, off)); 914 EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg)); 915 } else { 916 EMIT(PPC_RAW_LD(dst_reg, src_reg, off)); 917 } 918 break; 919 } 920 921 if (size != BPF_DW && insn_is_zext(&insn[i + 1])) 922 addrs[++i] = ctx->idx * 4; 923 924 if (BPF_MODE(code) == BPF_PROBE_MEM) { 925 ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1, 926 4, dst_reg); 927 if (ret) 928 return ret; 929 } 930 break; 931 932 /* 933 * Doubleword load 934 * 16 byte instruction that uses two 'struct bpf_insn' 935 */ 936 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ 937 imm64 = ((u64)(u32) insn[i].imm) | 938 (((u64)(u32) insn[i+1].imm) << 32); 939 tmp_idx = ctx->idx; 940 PPC_LI64(dst_reg, imm64); 941 /* padding to allow full 5 instructions for later patching */ 942 if (!image) 943 for (j = ctx->idx - tmp_idx; j < 5; j++) 944 EMIT(PPC_RAW_NOP()); 945 /* Adjust for two bpf instructions */ 946 addrs[++i] = ctx->idx * 4; 947 break; 948 949 /* 950 * Return/Exit 951 */ 952 case BPF_JMP | BPF_EXIT: 953 /* 954 * If this isn't the very last instruction, branch to 955 * the epilogue. If we _are_ the last instruction, 956 * we'll just fall through to the epilogue. 957 */ 958 if (i != flen - 1) { 959 ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr); 960 if (ret) 961 return ret; 962 } 963 /* else fall through to the epilogue */ 964 break; 965 966 /* 967 * Call kernel helper or bpf function 968 */ 969 case BPF_JMP | BPF_CALL: 970 ctx->seen |= SEEN_FUNC; 971 972 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass, 973 &func_addr, &func_addr_fixed); 974 if (ret < 0) 975 return ret; 976 977 if (func_addr_fixed) 978 ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr); 979 else 980 ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr); 981 982 if (ret) 983 return ret; 984 985 /* move return value from r3 to BPF_REG_0 */ 986 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3)); 987 break; 988 989 /* 990 * Jumps and branches 991 */ 992 case BPF_JMP | BPF_JA: 993 PPC_JMP(addrs[i + 1 + off]); 994 break; 995 996 case BPF_JMP | BPF_JGT | BPF_K: 997 case BPF_JMP | BPF_JGT | BPF_X: 998 case BPF_JMP | BPF_JSGT | BPF_K: 999 case BPF_JMP | BPF_JSGT | BPF_X: 1000 case BPF_JMP32 | BPF_JGT | BPF_K: 1001 case BPF_JMP32 | BPF_JGT | BPF_X: 1002 case BPF_JMP32 | BPF_JSGT | BPF_K: 1003 case BPF_JMP32 | BPF_JSGT | BPF_X: 1004 true_cond = COND_GT; 1005 goto cond_branch; 1006 case BPF_JMP | BPF_JLT | BPF_K: 1007 case BPF_JMP | BPF_JLT | BPF_X: 1008 case BPF_JMP | BPF_JSLT | BPF_K: 1009 case BPF_JMP | BPF_JSLT | BPF_X: 1010 case BPF_JMP32 | BPF_JLT | BPF_K: 1011 case BPF_JMP32 | BPF_JLT | BPF_X: 1012 case BPF_JMP32 | BPF_JSLT | BPF_K: 1013 case BPF_JMP32 | BPF_JSLT | BPF_X: 1014 true_cond = COND_LT; 1015 goto cond_branch; 1016 case BPF_JMP | BPF_JGE | BPF_K: 1017 case BPF_JMP | BPF_JGE | BPF_X: 1018 case BPF_JMP | BPF_JSGE | BPF_K: 1019 case BPF_JMP | BPF_JSGE | BPF_X: 1020 case BPF_JMP32 | BPF_JGE | BPF_K: 1021 case BPF_JMP32 | BPF_JGE | BPF_X: 1022 case BPF_JMP32 | BPF_JSGE | BPF_K: 1023 case BPF_JMP32 | BPF_JSGE | BPF_X: 1024 true_cond = COND_GE; 1025 goto cond_branch; 1026 case BPF_JMP | BPF_JLE | BPF_K: 1027 case BPF_JMP | BPF_JLE | BPF_X: 1028 case BPF_JMP | BPF_JSLE | BPF_K: 1029 case BPF_JMP | BPF_JSLE | BPF_X: 1030 case BPF_JMP32 | BPF_JLE | BPF_K: 1031 case BPF_JMP32 | BPF_JLE | BPF_X: 1032 case BPF_JMP32 | BPF_JSLE | BPF_K: 1033 case BPF_JMP32 | BPF_JSLE | BPF_X: 1034 true_cond = COND_LE; 1035 goto cond_branch; 1036 case BPF_JMP | BPF_JEQ | BPF_K: 1037 case BPF_JMP | BPF_JEQ | BPF_X: 1038 case BPF_JMP32 | BPF_JEQ | BPF_K: 1039 case BPF_JMP32 | BPF_JEQ | BPF_X: 1040 true_cond = COND_EQ; 1041 goto cond_branch; 1042 case BPF_JMP | BPF_JNE | BPF_K: 1043 case BPF_JMP | BPF_JNE | BPF_X: 1044 case BPF_JMP32 | BPF_JNE | BPF_K: 1045 case BPF_JMP32 | BPF_JNE | BPF_X: 1046 true_cond = COND_NE; 1047 goto cond_branch; 1048 case BPF_JMP | BPF_JSET | BPF_K: 1049 case BPF_JMP | BPF_JSET | BPF_X: 1050 case BPF_JMP32 | BPF_JSET | BPF_K: 1051 case BPF_JMP32 | BPF_JSET | BPF_X: 1052 true_cond = COND_NE; 1053 /* Fall through */ 1054 1055 cond_branch: 1056 switch (code) { 1057 case BPF_JMP | BPF_JGT | BPF_X: 1058 case BPF_JMP | BPF_JLT | BPF_X: 1059 case BPF_JMP | BPF_JGE | BPF_X: 1060 case BPF_JMP | BPF_JLE | BPF_X: 1061 case BPF_JMP | BPF_JEQ | BPF_X: 1062 case BPF_JMP | BPF_JNE | BPF_X: 1063 case BPF_JMP32 | BPF_JGT | BPF_X: 1064 case BPF_JMP32 | BPF_JLT | BPF_X: 1065 case BPF_JMP32 | BPF_JGE | BPF_X: 1066 case BPF_JMP32 | BPF_JLE | BPF_X: 1067 case BPF_JMP32 | BPF_JEQ | BPF_X: 1068 case BPF_JMP32 | BPF_JNE | BPF_X: 1069 /* unsigned comparison */ 1070 if (BPF_CLASS(code) == BPF_JMP32) 1071 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); 1072 else 1073 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg)); 1074 break; 1075 case BPF_JMP | BPF_JSGT | BPF_X: 1076 case BPF_JMP | BPF_JSLT | BPF_X: 1077 case BPF_JMP | BPF_JSGE | BPF_X: 1078 case BPF_JMP | BPF_JSLE | BPF_X: 1079 case BPF_JMP32 | BPF_JSGT | BPF_X: 1080 case BPF_JMP32 | BPF_JSLT | BPF_X: 1081 case BPF_JMP32 | BPF_JSGE | BPF_X: 1082 case BPF_JMP32 | BPF_JSLE | BPF_X: 1083 /* signed comparison */ 1084 if (BPF_CLASS(code) == BPF_JMP32) 1085 EMIT(PPC_RAW_CMPW(dst_reg, src_reg)); 1086 else 1087 EMIT(PPC_RAW_CMPD(dst_reg, src_reg)); 1088 break; 1089 case BPF_JMP | BPF_JSET | BPF_X: 1090 case BPF_JMP32 | BPF_JSET | BPF_X: 1091 if (BPF_CLASS(code) == BPF_JMP) { 1092 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg)); 1093 } else { 1094 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg)); 1095 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31)); 1096 } 1097 break; 1098 case BPF_JMP | BPF_JNE | BPF_K: 1099 case BPF_JMP | BPF_JEQ | BPF_K: 1100 case BPF_JMP | BPF_JGT | BPF_K: 1101 case BPF_JMP | BPF_JLT | BPF_K: 1102 case BPF_JMP | BPF_JGE | BPF_K: 1103 case BPF_JMP | BPF_JLE | BPF_K: 1104 case BPF_JMP32 | BPF_JNE | BPF_K: 1105 case BPF_JMP32 | BPF_JEQ | BPF_K: 1106 case BPF_JMP32 | BPF_JGT | BPF_K: 1107 case BPF_JMP32 | BPF_JLT | BPF_K: 1108 case BPF_JMP32 | BPF_JGE | BPF_K: 1109 case BPF_JMP32 | BPF_JLE | BPF_K: 1110 { 1111 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32; 1112 1113 /* 1114 * Need sign-extended load, so only positive 1115 * values can be used as imm in cmpldi 1116 */ 1117 if (imm >= 0 && imm < 32768) { 1118 if (is_jmp32) 1119 EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); 1120 else 1121 EMIT(PPC_RAW_CMPLDI(dst_reg, imm)); 1122 } else { 1123 /* sign-extending load */ 1124 PPC_LI32(tmp1_reg, imm); 1125 /* ... but unsigned comparison */ 1126 if (is_jmp32) 1127 EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg)); 1128 else 1129 EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg)); 1130 } 1131 break; 1132 } 1133 case BPF_JMP | BPF_JSGT | BPF_K: 1134 case BPF_JMP | BPF_JSLT | BPF_K: 1135 case BPF_JMP | BPF_JSGE | BPF_K: 1136 case BPF_JMP | BPF_JSLE | BPF_K: 1137 case BPF_JMP32 | BPF_JSGT | BPF_K: 1138 case BPF_JMP32 | BPF_JSLT | BPF_K: 1139 case BPF_JMP32 | BPF_JSGE | BPF_K: 1140 case BPF_JMP32 | BPF_JSLE | BPF_K: 1141 { 1142 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32; 1143 1144 /* 1145 * signed comparison, so any 16-bit value 1146 * can be used in cmpdi 1147 */ 1148 if (imm >= -32768 && imm < 32768) { 1149 if (is_jmp32) 1150 EMIT(PPC_RAW_CMPWI(dst_reg, imm)); 1151 else 1152 EMIT(PPC_RAW_CMPDI(dst_reg, imm)); 1153 } else { 1154 PPC_LI32(tmp1_reg, imm); 1155 if (is_jmp32) 1156 EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg)); 1157 else 1158 EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg)); 1159 } 1160 break; 1161 } 1162 case BPF_JMP | BPF_JSET | BPF_K: 1163 case BPF_JMP32 | BPF_JSET | BPF_K: 1164 /* andi does not sign-extend the immediate */ 1165 if (imm >= 0 && imm < 32768) 1166 /* PPC_ANDI is _only/always_ dot-form */ 1167 EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm)); 1168 else { 1169 PPC_LI32(tmp1_reg, imm); 1170 if (BPF_CLASS(code) == BPF_JMP) { 1171 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, 1172 tmp1_reg)); 1173 } else { 1174 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg)); 1175 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 1176 0, 0, 31)); 1177 } 1178 } 1179 break; 1180 } 1181 PPC_BCC(true_cond, addrs[i + 1 + off]); 1182 break; 1183 1184 /* 1185 * Tail call 1186 */ 1187 case BPF_JMP | BPF_TAIL_CALL: 1188 ctx->seen |= SEEN_TAILCALL; 1189 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); 1190 if (ret < 0) 1191 return ret; 1192 break; 1193 1194 default: 1195 /* 1196 * The filter contains something cruel & unusual. 1197 * We don't handle it, but also there shouldn't be 1198 * anything missing from our list. 1199 */ 1200 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", 1201 code, i); 1202 return -ENOTSUPP; 1203 } 1204 } 1205 1206 /* Set end-of-body-code address for exit. */ 1207 addrs[i] = ctx->idx * 4; 1208 1209 return 0; 1210 } 1211