1 /* 2 * Just-In-Time compiler for eBPF filters on 32bit ARM 3 * 4 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com> 5 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License as published by the 9 * Free Software Foundation; version 2 of the License. 10 */ 11 12 #include <linux/bpf.h> 13 #include <linux/bitops.h> 14 #include <linux/compiler.h> 15 #include <linux/errno.h> 16 #include <linux/filter.h> 17 #include <linux/netdevice.h> 18 #include <linux/string.h> 19 #include <linux/slab.h> 20 #include <linux/if_vlan.h> 21 22 #include <asm/cacheflush.h> 23 #include <asm/hwcap.h> 24 #include <asm/opcodes.h> 25 #include <asm/system_info.h> 26 27 #include "bpf_jit_32.h" 28 29 /* 30 * eBPF prog stack layout: 31 * 32 * high 33 * original ARM_SP => +-----+ 34 * | | callee saved registers 35 * +-----+ <= (BPF_FP + SCRATCH_SIZE) 36 * | ... | eBPF JIT scratch space 37 * eBPF fp register => +-----+ 38 * (BPF_FP) | ... | eBPF prog stack 39 * +-----+ 40 * |RSVD | JIT scratchpad 41 * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE) 42 * | | 43 * | ... | Function call stack 44 * | | 45 * +-----+ 46 * low 47 * 48 * The callee saved registers depends on whether frame pointers are enabled. 49 * With frame pointers (to be compliant with the ABI): 50 * 51 * high 52 * original ARM_SP => +--------------+ \ 53 * | pc | | 54 * current ARM_FP => +--------------+ } callee saved registers 55 * |r4-r9,fp,ip,lr| | 56 * +--------------+ / 57 * low 58 * 59 * Without frame pointers: 60 * 61 * high 62 * original ARM_SP => +--------------+ 63 * | r4-r9,fp,lr | callee saved registers 64 * current ARM_FP => +--------------+ 65 * low 66 * 67 * When popping registers off the stack at the end of a BPF function, we 68 * reference them via the current ARM_FP register. 69 */ 70 #define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \ 71 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \ 72 1 << ARM_FP) 73 #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) 74 #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) 75 76 enum { 77 /* Stack layout - these are offsets from (top of stack - 4) */ 78 BPF_R2_HI, 79 BPF_R2_LO, 80 BPF_R3_HI, 81 BPF_R3_LO, 82 BPF_R4_HI, 83 BPF_R4_LO, 84 BPF_R5_HI, 85 BPF_R5_LO, 86 BPF_R7_HI, 87 BPF_R7_LO, 88 BPF_R8_HI, 89 BPF_R8_LO, 90 BPF_R9_HI, 91 BPF_R9_LO, 92 BPF_FP_HI, 93 BPF_FP_LO, 94 BPF_TC_HI, 95 BPF_TC_LO, 96 BPF_AX_HI, 97 BPF_AX_LO, 98 /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, 99 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, 100 * BPF_REG_FP and Tail call counts. 101 */ 102 BPF_JIT_SCRATCH_REGS, 103 }; 104 105 /* 106 * Negative "register" values indicate the register is stored on the stack 107 * and are the offset from the top of the eBPF JIT scratch space. 108 */ 109 #define STACK_OFFSET(k) (-4 - (k) * 4) 110 #define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4) 111 112 #ifdef CONFIG_FRAME_POINTER 113 #define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4) 114 #else 115 #define EBPF_SCRATCH_TO_ARM_FP(x) (x) 116 #endif 117 118 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ 119 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ 120 #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ 121 122 #define FLAG_IMM_OVERFLOW (1 << 0) 123 124 /* 125 * Map eBPF registers to ARM 32bit registers or stack scratch space. 126 * 127 * 1. First argument is passed using the arm 32bit registers and rest of the 128 * arguments are passed on stack scratch space. 129 * 2. First callee-saved argument is mapped to arm 32 bit registers and rest 130 * arguments are mapped to scratch space on stack. 131 * 3. We need two 64 bit temp registers to do complex operations on eBPF 132 * registers. 133 * 134 * As the eBPF registers are all 64 bit registers and arm has only 32 bit 135 * registers, we have to map each eBPF registers with two arm 32 bit regs or 136 * scratch memory space and we have to build eBPF 64 bit register from those. 137 * 138 */ 139 static const s8 bpf2a32[][2] = { 140 /* return value from in-kernel function, and exit value from eBPF */ 141 [BPF_REG_0] = {ARM_R1, ARM_R0}, 142 /* arguments from eBPF program to in-kernel function */ 143 [BPF_REG_1] = {ARM_R3, ARM_R2}, 144 /* Stored on stack scratch space */ 145 [BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)}, 146 [BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)}, 147 [BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)}, 148 [BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)}, 149 /* callee saved registers that in-kernel function will preserve */ 150 [BPF_REG_6] = {ARM_R5, ARM_R4}, 151 /* Stored on stack scratch space */ 152 [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)}, 153 [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)}, 154 [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)}, 155 /* Read only Frame Pointer to access Stack */ 156 [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)}, 157 /* Temporary Register for internal BPF JIT, can be used 158 * for constant blindings and others. 159 */ 160 [TMP_REG_1] = {ARM_R7, ARM_R6}, 161 [TMP_REG_2] = {ARM_R9, ARM_R8}, 162 /* Tail call count. Stored on stack scratch space. */ 163 [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)}, 164 /* temporary register for blinding constants. 165 * Stored on stack scratch space. 166 */ 167 [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)}, 168 }; 169 170 #define dst_lo dst[1] 171 #define dst_hi dst[0] 172 #define src_lo src[1] 173 #define src_hi src[0] 174 175 /* 176 * JIT Context: 177 * 178 * prog : bpf_prog 179 * idx : index of current last JITed instruction. 180 * prologue_bytes : bytes used in prologue. 181 * epilogue_offset : offset of epilogue starting. 182 * offsets : array of eBPF instruction offsets in 183 * JITed code. 184 * target : final JITed code. 185 * epilogue_bytes : no of bytes used in epilogue. 186 * imm_count : no of immediate counts used for global 187 * variables. 188 * imms : array of global variable addresses. 189 */ 190 191 struct jit_ctx { 192 const struct bpf_prog *prog; 193 unsigned int idx; 194 unsigned int prologue_bytes; 195 unsigned int epilogue_offset; 196 unsigned int cpu_architecture; 197 u32 flags; 198 u32 *offsets; 199 u32 *target; 200 u32 stack_size; 201 #if __LINUX_ARM_ARCH__ < 7 202 u16 epilogue_bytes; 203 u16 imm_count; 204 u32 *imms; 205 #endif 206 }; 207 208 /* 209 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking 210 * (where the assembly routines like __aeabi_uidiv could cause problems). 211 */ 212 static u32 jit_udiv32(u32 dividend, u32 divisor) 213 { 214 return dividend / divisor; 215 } 216 217 static u32 jit_mod32(u32 dividend, u32 divisor) 218 { 219 return dividend % divisor; 220 } 221 222 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx) 223 { 224 inst |= (cond << 28); 225 inst = __opcode_to_mem_arm(inst); 226 227 if (ctx->target != NULL) 228 ctx->target[ctx->idx] = inst; 229 230 ctx->idx++; 231 } 232 233 /* 234 * Emit an instruction that will be executed unconditionally. 235 */ 236 static inline void emit(u32 inst, struct jit_ctx *ctx) 237 { 238 _emit(ARM_COND_AL, inst, ctx); 239 } 240 241 /* 242 * This is rather horrid, but necessary to convert an integer constant 243 * to an immediate operand for the opcodes, and be able to detect at 244 * build time whether the constant can't be converted (iow, usable in 245 * BUILD_BUG_ON()). 246 */ 247 #define imm12val(v, s) (rol32(v, (s)) | (s) << 7) 248 #define const_imm8m(x) \ 249 ({ int r; \ 250 u32 v = (x); \ 251 if (!(v & ~0x000000ff)) \ 252 r = imm12val(v, 0); \ 253 else if (!(v & ~0xc000003f)) \ 254 r = imm12val(v, 2); \ 255 else if (!(v & ~0xf000000f)) \ 256 r = imm12val(v, 4); \ 257 else if (!(v & ~0xfc000003)) \ 258 r = imm12val(v, 6); \ 259 else if (!(v & ~0xff000000)) \ 260 r = imm12val(v, 8); \ 261 else if (!(v & ~0x3fc00000)) \ 262 r = imm12val(v, 10); \ 263 else if (!(v & ~0x0ff00000)) \ 264 r = imm12val(v, 12); \ 265 else if (!(v & ~0x03fc0000)) \ 266 r = imm12val(v, 14); \ 267 else if (!(v & ~0x00ff0000)) \ 268 r = imm12val(v, 16); \ 269 else if (!(v & ~0x003fc000)) \ 270 r = imm12val(v, 18); \ 271 else if (!(v & ~0x000ff000)) \ 272 r = imm12val(v, 20); \ 273 else if (!(v & ~0x0003fc00)) \ 274 r = imm12val(v, 22); \ 275 else if (!(v & ~0x0000ff00)) \ 276 r = imm12val(v, 24); \ 277 else if (!(v & ~0x00003fc0)) \ 278 r = imm12val(v, 26); \ 279 else if (!(v & ~0x00000ff0)) \ 280 r = imm12val(v, 28); \ 281 else if (!(v & ~0x000003fc)) \ 282 r = imm12val(v, 30); \ 283 else \ 284 r = -1; \ 285 r; }) 286 287 /* 288 * Checks if immediate value can be converted to imm12(12 bits) value. 289 */ 290 static int imm8m(u32 x) 291 { 292 u32 rot; 293 294 for (rot = 0; rot < 16; rot++) 295 if ((x & ~ror32(0xff, 2 * rot)) == 0) 296 return rol32(x, 2 * rot) | (rot << 8); 297 return -1; 298 } 299 300 #define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x)) 301 302 static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12) 303 { 304 op |= rt << 12 | rn << 16; 305 if (imm12 >= 0) 306 op |= ARM_INST_LDST__U; 307 else 308 imm12 = -imm12; 309 return op | (imm12 & ARM_INST_LDST__IMM12); 310 } 311 312 static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8) 313 { 314 op |= rt << 12 | rn << 16; 315 if (imm8 >= 0) 316 op |= ARM_INST_LDST__U; 317 else 318 imm8 = -imm8; 319 return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f); 320 } 321 322 #define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off) 323 #define ARM_LDRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off) 324 #define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off) 325 #define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off) 326 327 #define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off) 328 #define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off) 329 #define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off) 330 #define ARM_STRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off) 331 332 /* 333 * Initializes the JIT space with undefined instructions. 334 */ 335 static void jit_fill_hole(void *area, unsigned int size) 336 { 337 u32 *ptr; 338 /* We are guaranteed to have aligned memory. */ 339 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) 340 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); 341 } 342 343 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) 344 /* EABI requires the stack to be aligned to 64-bit boundaries */ 345 #define STACK_ALIGNMENT 8 346 #else 347 /* Stack must be aligned to 32-bit boundaries */ 348 #define STACK_ALIGNMENT 4 349 #endif 350 351 /* total stack size used in JITed code */ 352 #define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE) 353 #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) 354 355 #if __LINUX_ARM_ARCH__ < 7 356 357 static u16 imm_offset(u32 k, struct jit_ctx *ctx) 358 { 359 unsigned int i = 0, offset; 360 u16 imm; 361 362 /* on the "fake" run we just count them (duplicates included) */ 363 if (ctx->target == NULL) { 364 ctx->imm_count++; 365 return 0; 366 } 367 368 while ((i < ctx->imm_count) && ctx->imms[i]) { 369 if (ctx->imms[i] == k) 370 break; 371 i++; 372 } 373 374 if (ctx->imms[i] == 0) 375 ctx->imms[i] = k; 376 377 /* constants go just after the epilogue */ 378 offset = ctx->offsets[ctx->prog->len - 1] * 4; 379 offset += ctx->prologue_bytes; 380 offset += ctx->epilogue_bytes; 381 offset += i * 4; 382 383 ctx->target[offset / 4] = k; 384 385 /* PC in ARM mode == address of the instruction + 8 */ 386 imm = offset - (8 + ctx->idx * 4); 387 388 if (imm & ~0xfff) { 389 /* 390 * literal pool is too far, signal it into flags. we 391 * can only detect it on the second pass unfortunately. 392 */ 393 ctx->flags |= FLAG_IMM_OVERFLOW; 394 return 0; 395 } 396 397 return imm; 398 } 399 400 #endif /* __LINUX_ARM_ARCH__ */ 401 402 static inline int bpf2a32_offset(int bpf_to, int bpf_from, 403 const struct jit_ctx *ctx) { 404 int to, from; 405 406 if (ctx->target == NULL) 407 return 0; 408 to = ctx->offsets[bpf_to]; 409 from = ctx->offsets[bpf_from]; 410 411 return to - from - 1; 412 } 413 414 /* 415 * Move an immediate that's not an imm8m to a core register. 416 */ 417 static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx) 418 { 419 #if __LINUX_ARM_ARCH__ < 7 420 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); 421 #else 422 emit(ARM_MOVW(rd, val & 0xffff), ctx); 423 if (val > 0xffff) 424 emit(ARM_MOVT(rd, val >> 16), ctx); 425 #endif 426 } 427 428 static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx) 429 { 430 int imm12 = imm8m(val); 431 432 if (imm12 >= 0) 433 emit(ARM_MOV_I(rd, imm12), ctx); 434 else 435 emit_mov_i_no8m(rd, val, ctx); 436 } 437 438 static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx) 439 { 440 if (elf_hwcap & HWCAP_THUMB) 441 emit(ARM_BX(tgt_reg), ctx); 442 else 443 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); 444 } 445 446 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) 447 { 448 #if __LINUX_ARM_ARCH__ < 5 449 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); 450 emit_bx_r(tgt_reg, ctx); 451 #else 452 emit(ARM_BLX_R(tgt_reg), ctx); 453 #endif 454 } 455 456 static inline int epilogue_offset(const struct jit_ctx *ctx) 457 { 458 int to, from; 459 /* No need for 1st dummy run */ 460 if (ctx->target == NULL) 461 return 0; 462 to = ctx->epilogue_offset; 463 from = ctx->idx; 464 465 return to - from - 2; 466 } 467 468 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) 469 { 470 const s8 *tmp = bpf2a32[TMP_REG_1]; 471 472 #if __LINUX_ARM_ARCH__ == 7 473 if (elf_hwcap & HWCAP_IDIVA) { 474 if (op == BPF_DIV) 475 emit(ARM_UDIV(rd, rm, rn), ctx); 476 else { 477 emit(ARM_UDIV(ARM_IP, rm, rn), ctx); 478 emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx); 479 } 480 return; 481 } 482 #endif 483 484 /* 485 * For BPF_ALU | BPF_DIV | BPF_K instructions 486 * As ARM_R1 and ARM_R0 contains 1st argument of bpf 487 * function, we need to save it on caller side to save 488 * it from getting destroyed within callee. 489 * After the return from the callee, we restore ARM_R0 490 * ARM_R1. 491 */ 492 if (rn != ARM_R1) { 493 emit(ARM_MOV_R(tmp[0], ARM_R1), ctx); 494 emit(ARM_MOV_R(ARM_R1, rn), ctx); 495 } 496 if (rm != ARM_R0) { 497 emit(ARM_MOV_R(tmp[1], ARM_R0), ctx); 498 emit(ARM_MOV_R(ARM_R0, rm), ctx); 499 } 500 501 /* Call appropriate function */ 502 emit_mov_i(ARM_IP, op == BPF_DIV ? 503 (u32)jit_udiv32 : (u32)jit_mod32, ctx); 504 emit_blx_r(ARM_IP, ctx); 505 506 /* Save return value */ 507 if (rd != ARM_R0) 508 emit(ARM_MOV_R(rd, ARM_R0), ctx); 509 510 /* Restore ARM_R0 and ARM_R1 */ 511 if (rn != ARM_R1) 512 emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx); 513 if (rm != ARM_R0) 514 emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx); 515 } 516 517 /* Is the translated BPF register on stack? */ 518 static bool is_stacked(s8 reg) 519 { 520 return reg < 0; 521 } 522 523 /* If a BPF register is on the stack (stk is true), load it to the 524 * supplied temporary register and return the temporary register 525 * for subsequent operations, otherwise just use the CPU register. 526 */ 527 static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx) 528 { 529 if (is_stacked(reg)) { 530 emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx); 531 reg = tmp; 532 } 533 return reg; 534 } 535 536 static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp, 537 struct jit_ctx *ctx) 538 { 539 if (is_stacked(reg[1])) { 540 if (__LINUX_ARM_ARCH__ >= 6 || 541 ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) { 542 emit(ARM_LDRD_I(tmp[1], ARM_FP, 543 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); 544 } else { 545 emit(ARM_LDR_I(tmp[1], ARM_FP, 546 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); 547 emit(ARM_LDR_I(tmp[0], ARM_FP, 548 EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx); 549 } 550 reg = tmp; 551 } 552 return reg; 553 } 554 555 /* If a BPF register is on the stack (stk is true), save the register 556 * back to the stack. If the source register is not the same, then 557 * move it into the correct register. 558 */ 559 static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx) 560 { 561 if (is_stacked(reg)) 562 emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx); 563 else if (reg != src) 564 emit(ARM_MOV_R(reg, src), ctx); 565 } 566 567 static void arm_bpf_put_reg64(const s8 *reg, const s8 *src, 568 struct jit_ctx *ctx) 569 { 570 if (is_stacked(reg[1])) { 571 if (__LINUX_ARM_ARCH__ >= 6 || 572 ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) { 573 emit(ARM_STRD_I(src[1], ARM_FP, 574 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); 575 } else { 576 emit(ARM_STR_I(src[1], ARM_FP, 577 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); 578 emit(ARM_STR_I(src[0], ARM_FP, 579 EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx); 580 } 581 } else { 582 if (reg[1] != src[1]) 583 emit(ARM_MOV_R(reg[1], src[1]), ctx); 584 if (reg[0] != src[0]) 585 emit(ARM_MOV_R(reg[0], src[0]), ctx); 586 } 587 } 588 589 static inline void emit_a32_mov_i(const s8 dst, const u32 val, 590 struct jit_ctx *ctx) 591 { 592 const s8 *tmp = bpf2a32[TMP_REG_1]; 593 594 if (is_stacked(dst)) { 595 emit_mov_i(tmp[1], val, ctx); 596 arm_bpf_put_reg32(dst, tmp[1], ctx); 597 } else { 598 emit_mov_i(dst, val, ctx); 599 } 600 } 601 602 static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx) 603 { 604 const s8 *tmp = bpf2a32[TMP_REG_1]; 605 const s8 *rd = is_stacked(dst_lo) ? tmp : dst; 606 607 emit_mov_i(rd[1], (u32)val, ctx); 608 emit_mov_i(rd[0], val >> 32, ctx); 609 610 arm_bpf_put_reg64(dst, rd, ctx); 611 } 612 613 /* Sign extended move */ 614 static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[], 615 const u32 val, struct jit_ctx *ctx) { 616 u64 val64 = val; 617 618 if (is64 && (val & (1<<31))) 619 val64 |= 0xffffffff00000000ULL; 620 emit_a32_mov_i64(dst, val64, ctx); 621 } 622 623 static inline void emit_a32_add_r(const u8 dst, const u8 src, 624 const bool is64, const bool hi, 625 struct jit_ctx *ctx) { 626 /* 64 bit : 627 * adds dst_lo, dst_lo, src_lo 628 * adc dst_hi, dst_hi, src_hi 629 * 32 bit : 630 * add dst_lo, dst_lo, src_lo 631 */ 632 if (!hi && is64) 633 emit(ARM_ADDS_R(dst, dst, src), ctx); 634 else if (hi && is64) 635 emit(ARM_ADC_R(dst, dst, src), ctx); 636 else 637 emit(ARM_ADD_R(dst, dst, src), ctx); 638 } 639 640 static inline void emit_a32_sub_r(const u8 dst, const u8 src, 641 const bool is64, const bool hi, 642 struct jit_ctx *ctx) { 643 /* 64 bit : 644 * subs dst_lo, dst_lo, src_lo 645 * sbc dst_hi, dst_hi, src_hi 646 * 32 bit : 647 * sub dst_lo, dst_lo, src_lo 648 */ 649 if (!hi && is64) 650 emit(ARM_SUBS_R(dst, dst, src), ctx); 651 else if (hi && is64) 652 emit(ARM_SBC_R(dst, dst, src), ctx); 653 else 654 emit(ARM_SUB_R(dst, dst, src), ctx); 655 } 656 657 static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64, 658 const bool hi, const u8 op, struct jit_ctx *ctx){ 659 switch (BPF_OP(op)) { 660 /* dst = dst + src */ 661 case BPF_ADD: 662 emit_a32_add_r(dst, src, is64, hi, ctx); 663 break; 664 /* dst = dst - src */ 665 case BPF_SUB: 666 emit_a32_sub_r(dst, src, is64, hi, ctx); 667 break; 668 /* dst = dst | src */ 669 case BPF_OR: 670 emit(ARM_ORR_R(dst, dst, src), ctx); 671 break; 672 /* dst = dst & src */ 673 case BPF_AND: 674 emit(ARM_AND_R(dst, dst, src), ctx); 675 break; 676 /* dst = dst ^ src */ 677 case BPF_XOR: 678 emit(ARM_EOR_R(dst, dst, src), ctx); 679 break; 680 /* dst = dst * src */ 681 case BPF_MUL: 682 emit(ARM_MUL(dst, dst, src), ctx); 683 break; 684 /* dst = dst << src */ 685 case BPF_LSH: 686 emit(ARM_LSL_R(dst, dst, src), ctx); 687 break; 688 /* dst = dst >> src */ 689 case BPF_RSH: 690 emit(ARM_LSR_R(dst, dst, src), ctx); 691 break; 692 /* dst = dst >> src (signed)*/ 693 case BPF_ARSH: 694 emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx); 695 break; 696 } 697 } 698 699 /* ALU operation (32 bit) 700 * dst = dst (op) src 701 */ 702 static inline void emit_a32_alu_r(const s8 dst, const s8 src, 703 struct jit_ctx *ctx, const bool is64, 704 const bool hi, const u8 op) { 705 const s8 *tmp = bpf2a32[TMP_REG_1]; 706 s8 rn, rd; 707 708 rn = arm_bpf_get_reg32(src, tmp[1], ctx); 709 rd = arm_bpf_get_reg32(dst, tmp[0], ctx); 710 /* ALU operation */ 711 emit_alu_r(rd, rn, is64, hi, op, ctx); 712 arm_bpf_put_reg32(dst, rd, ctx); 713 } 714 715 /* ALU operation (64 bit) */ 716 static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], 717 const s8 src[], struct jit_ctx *ctx, 718 const u8 op) { 719 const s8 *tmp = bpf2a32[TMP_REG_1]; 720 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 721 const s8 *rd; 722 723 rd = arm_bpf_get_reg64(dst, tmp, ctx); 724 if (is64) { 725 const s8 *rs; 726 727 rs = arm_bpf_get_reg64(src, tmp2, ctx); 728 729 /* ALU operation */ 730 emit_alu_r(rd[1], rs[1], true, false, op, ctx); 731 emit_alu_r(rd[0], rs[0], true, true, op, ctx); 732 } else { 733 s8 rs; 734 735 rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); 736 737 /* ALU operation */ 738 emit_alu_r(rd[1], rs, true, false, op, ctx); 739 if (!ctx->prog->aux->verifier_zext) 740 emit_a32_mov_i(rd[0], 0, ctx); 741 } 742 743 arm_bpf_put_reg64(dst, rd, ctx); 744 } 745 746 /* dst = src (4 bytes)*/ 747 static inline void emit_a32_mov_r(const s8 dst, const s8 src, 748 struct jit_ctx *ctx) { 749 const s8 *tmp = bpf2a32[TMP_REG_1]; 750 s8 rt; 751 752 rt = arm_bpf_get_reg32(src, tmp[0], ctx); 753 arm_bpf_put_reg32(dst, rt, ctx); 754 } 755 756 /* dst = src */ 757 static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], 758 const s8 src[], 759 struct jit_ctx *ctx) { 760 if (!is64) { 761 emit_a32_mov_r(dst_lo, src_lo, ctx); 762 if (!ctx->prog->aux->verifier_zext) 763 /* Zero out high 4 bytes */ 764 emit_a32_mov_i(dst_hi, 0, ctx); 765 } else if (__LINUX_ARM_ARCH__ < 6 && 766 ctx->cpu_architecture < CPU_ARCH_ARMv5TE) { 767 /* complete 8 byte move */ 768 emit_a32_mov_r(dst_lo, src_lo, ctx); 769 emit_a32_mov_r(dst_hi, src_hi, ctx); 770 } else if (is_stacked(src_lo) && is_stacked(dst_lo)) { 771 const u8 *tmp = bpf2a32[TMP_REG_1]; 772 773 emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx); 774 emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx); 775 } else if (is_stacked(src_lo)) { 776 emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx); 777 } else if (is_stacked(dst_lo)) { 778 emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx); 779 } else { 780 emit(ARM_MOV_R(dst[0], src[0]), ctx); 781 emit(ARM_MOV_R(dst[1], src[1]), ctx); 782 } 783 } 784 785 /* Shift operations */ 786 static inline void emit_a32_alu_i(const s8 dst, const u32 val, 787 struct jit_ctx *ctx, const u8 op) { 788 const s8 *tmp = bpf2a32[TMP_REG_1]; 789 s8 rd; 790 791 rd = arm_bpf_get_reg32(dst, tmp[0], ctx); 792 793 /* Do shift operation */ 794 switch (op) { 795 case BPF_LSH: 796 emit(ARM_LSL_I(rd, rd, val), ctx); 797 break; 798 case BPF_RSH: 799 emit(ARM_LSR_I(rd, rd, val), ctx); 800 break; 801 case BPF_NEG: 802 emit(ARM_RSB_I(rd, rd, val), ctx); 803 break; 804 } 805 806 arm_bpf_put_reg32(dst, rd, ctx); 807 } 808 809 /* dst = ~dst (64 bit) */ 810 static inline void emit_a32_neg64(const s8 dst[], 811 struct jit_ctx *ctx){ 812 const s8 *tmp = bpf2a32[TMP_REG_1]; 813 const s8 *rd; 814 815 /* Setup Operand */ 816 rd = arm_bpf_get_reg64(dst, tmp, ctx); 817 818 /* Do Negate Operation */ 819 emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx); 820 emit(ARM_RSC_I(rd[0], rd[0], 0), ctx); 821 822 arm_bpf_put_reg64(dst, rd, ctx); 823 } 824 825 /* dst = dst << src */ 826 static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], 827 struct jit_ctx *ctx) { 828 const s8 *tmp = bpf2a32[TMP_REG_1]; 829 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 830 const s8 *rd; 831 s8 rt; 832 833 /* Setup Operands */ 834 rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); 835 rd = arm_bpf_get_reg64(dst, tmp, ctx); 836 837 /* Do LSH operation */ 838 emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); 839 emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); 840 emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx); 841 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx); 842 emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx); 843 emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx); 844 845 arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); 846 arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); 847 } 848 849 /* dst = dst >> src (signed)*/ 850 static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], 851 struct jit_ctx *ctx) { 852 const s8 *tmp = bpf2a32[TMP_REG_1]; 853 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 854 const s8 *rd; 855 s8 rt; 856 857 /* Setup Operands */ 858 rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); 859 rd = arm_bpf_get_reg64(dst, tmp, ctx); 860 861 /* Do the ARSH operation */ 862 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); 863 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); 864 emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); 865 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); 866 _emit(ARM_COND_MI, ARM_B(0), ctx); 867 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx); 868 emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx); 869 870 arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); 871 arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); 872 } 873 874 /* dst = dst >> src */ 875 static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], 876 struct jit_ctx *ctx) { 877 const s8 *tmp = bpf2a32[TMP_REG_1]; 878 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 879 const s8 *rd; 880 s8 rt; 881 882 /* Setup Operands */ 883 rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); 884 rd = arm_bpf_get_reg64(dst, tmp, ctx); 885 886 /* Do RSH operation */ 887 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); 888 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); 889 emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); 890 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); 891 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx); 892 emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx); 893 894 arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); 895 arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); 896 } 897 898 /* dst = dst << val */ 899 static inline void emit_a32_lsh_i64(const s8 dst[], 900 const u32 val, struct jit_ctx *ctx){ 901 const s8 *tmp = bpf2a32[TMP_REG_1]; 902 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 903 const s8 *rd; 904 905 /* Setup operands */ 906 rd = arm_bpf_get_reg64(dst, tmp, ctx); 907 908 /* Do LSH operation */ 909 if (val < 32) { 910 emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx); 911 emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx); 912 emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx); 913 } else { 914 if (val == 32) 915 emit(ARM_MOV_R(rd[0], rd[1]), ctx); 916 else 917 emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx); 918 emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx); 919 } 920 921 arm_bpf_put_reg64(dst, rd, ctx); 922 } 923 924 /* dst = dst >> val */ 925 static inline void emit_a32_rsh_i64(const s8 dst[], 926 const u32 val, struct jit_ctx *ctx) { 927 const s8 *tmp = bpf2a32[TMP_REG_1]; 928 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 929 const s8 *rd; 930 931 /* Setup operands */ 932 rd = arm_bpf_get_reg64(dst, tmp, ctx); 933 934 /* Do LSR operation */ 935 if (val < 32) { 936 emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); 937 emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); 938 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx); 939 } else if (val == 32) { 940 emit(ARM_MOV_R(rd[1], rd[0]), ctx); 941 emit(ARM_MOV_I(rd[0], 0), ctx); 942 } else { 943 emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx); 944 emit(ARM_MOV_I(rd[0], 0), ctx); 945 } 946 947 arm_bpf_put_reg64(dst, rd, ctx); 948 } 949 950 /* dst = dst >> val (signed) */ 951 static inline void emit_a32_arsh_i64(const s8 dst[], 952 const u32 val, struct jit_ctx *ctx){ 953 const s8 *tmp = bpf2a32[TMP_REG_1]; 954 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 955 const s8 *rd; 956 957 /* Setup operands */ 958 rd = arm_bpf_get_reg64(dst, tmp, ctx); 959 960 /* Do ARSH operation */ 961 if (val < 32) { 962 emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); 963 emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); 964 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx); 965 } else if (val == 32) { 966 emit(ARM_MOV_R(rd[1], rd[0]), ctx); 967 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx); 968 } else { 969 emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx); 970 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx); 971 } 972 973 arm_bpf_put_reg64(dst, rd, ctx); 974 } 975 976 static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], 977 struct jit_ctx *ctx) { 978 const s8 *tmp = bpf2a32[TMP_REG_1]; 979 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 980 const s8 *rd, *rt; 981 982 /* Setup operands for multiplication */ 983 rd = arm_bpf_get_reg64(dst, tmp, ctx); 984 rt = arm_bpf_get_reg64(src, tmp2, ctx); 985 986 /* Do Multiplication */ 987 emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx); 988 emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx); 989 emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); 990 991 emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx); 992 emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx); 993 994 arm_bpf_put_reg32(dst_lo, ARM_IP, ctx); 995 arm_bpf_put_reg32(dst_hi, rd[0], ctx); 996 } 997 998 /* *(size *)(dst + off) = src */ 999 static inline void emit_str_r(const s8 dst, const s8 src[], 1000 s32 off, struct jit_ctx *ctx, const u8 sz){ 1001 const s8 *tmp = bpf2a32[TMP_REG_1]; 1002 s32 off_max; 1003 s8 rd; 1004 1005 rd = arm_bpf_get_reg32(dst, tmp[1], ctx); 1006 1007 if (sz == BPF_H) 1008 off_max = 0xff; 1009 else 1010 off_max = 0xfff; 1011 1012 if (off < 0 || off > off_max) { 1013 emit_a32_mov_i(tmp[0], off, ctx); 1014 emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx); 1015 rd = tmp[0]; 1016 off = 0; 1017 } 1018 switch (sz) { 1019 case BPF_B: 1020 /* Store a Byte */ 1021 emit(ARM_STRB_I(src_lo, rd, off), ctx); 1022 break; 1023 case BPF_H: 1024 /* Store a HalfWord */ 1025 emit(ARM_STRH_I(src_lo, rd, off), ctx); 1026 break; 1027 case BPF_W: 1028 /* Store a Word */ 1029 emit(ARM_STR_I(src_lo, rd, off), ctx); 1030 break; 1031 case BPF_DW: 1032 /* Store a Double Word */ 1033 emit(ARM_STR_I(src_lo, rd, off), ctx); 1034 emit(ARM_STR_I(src_hi, rd, off + 4), ctx); 1035 break; 1036 } 1037 } 1038 1039 /* dst = *(size*)(src + off) */ 1040 static inline void emit_ldx_r(const s8 dst[], const s8 src, 1041 s32 off, struct jit_ctx *ctx, const u8 sz){ 1042 const s8 *tmp = bpf2a32[TMP_REG_1]; 1043 const s8 *rd = is_stacked(dst_lo) ? tmp : dst; 1044 s8 rm = src; 1045 s32 off_max; 1046 1047 if (sz == BPF_H) 1048 off_max = 0xff; 1049 else 1050 off_max = 0xfff; 1051 1052 if (off < 0 || off > off_max) { 1053 emit_a32_mov_i(tmp[0], off, ctx); 1054 emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); 1055 rm = tmp[0]; 1056 off = 0; 1057 } else if (rd[1] == rm) { 1058 emit(ARM_MOV_R(tmp[0], rm), ctx); 1059 rm = tmp[0]; 1060 } 1061 switch (sz) { 1062 case BPF_B: 1063 /* Load a Byte */ 1064 emit(ARM_LDRB_I(rd[1], rm, off), ctx); 1065 if (!ctx->prog->aux->verifier_zext) 1066 emit_a32_mov_i(rd[0], 0, ctx); 1067 break; 1068 case BPF_H: 1069 /* Load a HalfWord */ 1070 emit(ARM_LDRH_I(rd[1], rm, off), ctx); 1071 if (!ctx->prog->aux->verifier_zext) 1072 emit_a32_mov_i(rd[0], 0, ctx); 1073 break; 1074 case BPF_W: 1075 /* Load a Word */ 1076 emit(ARM_LDR_I(rd[1], rm, off), ctx); 1077 if (!ctx->prog->aux->verifier_zext) 1078 emit_a32_mov_i(rd[0], 0, ctx); 1079 break; 1080 case BPF_DW: 1081 /* Load a Double Word */ 1082 emit(ARM_LDR_I(rd[1], rm, off), ctx); 1083 emit(ARM_LDR_I(rd[0], rm, off + 4), ctx); 1084 break; 1085 } 1086 arm_bpf_put_reg64(dst, rd, ctx); 1087 } 1088 1089 /* Arithmatic Operation */ 1090 static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, 1091 const u8 rn, struct jit_ctx *ctx, u8 op, 1092 bool is_jmp64) { 1093 switch (op) { 1094 case BPF_JSET: 1095 if (is_jmp64) { 1096 emit(ARM_AND_R(ARM_IP, rt, rn), ctx); 1097 emit(ARM_AND_R(ARM_LR, rd, rm), ctx); 1098 emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); 1099 } else { 1100 emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx); 1101 } 1102 break; 1103 case BPF_JEQ: 1104 case BPF_JNE: 1105 case BPF_JGT: 1106 case BPF_JGE: 1107 case BPF_JLE: 1108 case BPF_JLT: 1109 if (is_jmp64) { 1110 emit(ARM_CMP_R(rd, rm), ctx); 1111 /* Only compare low halve if high halve are equal. */ 1112 _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); 1113 } else { 1114 emit(ARM_CMP_R(rt, rn), ctx); 1115 } 1116 break; 1117 case BPF_JSLE: 1118 case BPF_JSGT: 1119 emit(ARM_CMP_R(rn, rt), ctx); 1120 if (is_jmp64) 1121 emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); 1122 break; 1123 case BPF_JSLT: 1124 case BPF_JSGE: 1125 emit(ARM_CMP_R(rt, rn), ctx); 1126 if (is_jmp64) 1127 emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); 1128 break; 1129 } 1130 } 1131 1132 static int out_offset = -1; /* initialized on the first pass of build_body() */ 1133 static int emit_bpf_tail_call(struct jit_ctx *ctx) 1134 { 1135 1136 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ 1137 const s8 *r2 = bpf2a32[BPF_REG_2]; 1138 const s8 *r3 = bpf2a32[BPF_REG_3]; 1139 const s8 *tmp = bpf2a32[TMP_REG_1]; 1140 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 1141 const s8 *tcc = bpf2a32[TCALL_CNT]; 1142 const s8 *tc; 1143 const int idx0 = ctx->idx; 1144 #define cur_offset (ctx->idx - idx0) 1145 #define jmp_offset (out_offset - (cur_offset) - 2) 1146 u32 lo, hi; 1147 s8 r_array, r_index; 1148 int off; 1149 1150 /* if (index >= array->map.max_entries) 1151 * goto out; 1152 */ 1153 BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) > 1154 ARM_INST_LDST__IMM12); 1155 off = offsetof(struct bpf_array, map.max_entries); 1156 r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx); 1157 /* index is 32-bit for arrays */ 1158 r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); 1159 /* array->map.max_entries */ 1160 emit(ARM_LDR_I(tmp[1], r_array, off), ctx); 1161 /* index >= array->map.max_entries */ 1162 emit(ARM_CMP_R(r_index, tmp[1]), ctx); 1163 _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); 1164 1165 /* tmp2[0] = array, tmp2[1] = index */ 1166 1167 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) 1168 * goto out; 1169 * tail_call_cnt++; 1170 */ 1171 lo = (u32)MAX_TAIL_CALL_CNT; 1172 hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32); 1173 tc = arm_bpf_get_reg64(tcc, tmp, ctx); 1174 emit(ARM_CMP_I(tc[0], hi), ctx); 1175 _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx); 1176 _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); 1177 emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx); 1178 emit(ARM_ADC_I(tc[0], tc[0], 0), ctx); 1179 arm_bpf_put_reg64(tcc, tmp, ctx); 1180 1181 /* prog = array->ptrs[index] 1182 * if (prog == NULL) 1183 * goto out; 1184 */ 1185 BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0); 1186 off = imm8m(offsetof(struct bpf_array, ptrs)); 1187 emit(ARM_ADD_I(tmp[1], r_array, off), ctx); 1188 emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx); 1189 emit(ARM_CMP_I(tmp[1], 0), ctx); 1190 _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); 1191 1192 /* goto *(prog->bpf_func + prologue_size); */ 1193 BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) > 1194 ARM_INST_LDST__IMM12); 1195 off = offsetof(struct bpf_prog, bpf_func); 1196 emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx); 1197 emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); 1198 emit_bx_r(tmp[1], ctx); 1199 1200 /* out: */ 1201 if (out_offset == -1) 1202 out_offset = cur_offset; 1203 if (cur_offset != out_offset) { 1204 pr_err_once("tail_call out_offset = %d, expected %d!\n", 1205 cur_offset, out_offset); 1206 return -1; 1207 } 1208 return 0; 1209 #undef cur_offset 1210 #undef jmp_offset 1211 } 1212 1213 /* 0xabcd => 0xcdab */ 1214 static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx) 1215 { 1216 #if __LINUX_ARM_ARCH__ < 6 1217 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 1218 1219 emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); 1220 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx); 1221 emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx); 1222 emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx); 1223 #else /* ARMv6+ */ 1224 emit(ARM_REV16(rd, rn), ctx); 1225 #endif 1226 } 1227 1228 /* 0xabcdefgh => 0xghefcdab */ 1229 static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) 1230 { 1231 #if __LINUX_ARM_ARCH__ < 6 1232 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 1233 1234 emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); 1235 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx); 1236 emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx); 1237 1238 emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx); 1239 emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx); 1240 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx); 1241 emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx); 1242 emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx); 1243 emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx); 1244 emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx); 1245 1246 #else /* ARMv6+ */ 1247 emit(ARM_REV(rd, rn), ctx); 1248 #endif 1249 } 1250 1251 // push the scratch stack register on top of the stack 1252 static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx) 1253 { 1254 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 1255 const s8 *rt; 1256 u16 reg_set = 0; 1257 1258 rt = arm_bpf_get_reg64(src, tmp2, ctx); 1259 1260 reg_set = (1 << rt[1]) | (1 << rt[0]); 1261 emit(ARM_PUSH(reg_set), ctx); 1262 } 1263 1264 static void build_prologue(struct jit_ctx *ctx) 1265 { 1266 const s8 r0 = bpf2a32[BPF_REG_0][1]; 1267 const s8 r2 = bpf2a32[BPF_REG_1][1]; 1268 const s8 r3 = bpf2a32[BPF_REG_1][0]; 1269 const s8 r4 = bpf2a32[BPF_REG_6][1]; 1270 const s8 fplo = bpf2a32[BPF_REG_FP][1]; 1271 const s8 fphi = bpf2a32[BPF_REG_FP][0]; 1272 const s8 *tcc = bpf2a32[TCALL_CNT]; 1273 1274 /* Save callee saved registers. */ 1275 #ifdef CONFIG_FRAME_POINTER 1276 u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC; 1277 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); 1278 emit(ARM_PUSH(reg_set), ctx); 1279 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); 1280 #else 1281 emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx); 1282 emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx); 1283 #endif 1284 /* Save frame pointer for later */ 1285 emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx); 1286 1287 ctx->stack_size = imm8m(STACK_SIZE); 1288 1289 /* Set up function call stack */ 1290 emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); 1291 1292 /* Set up BPF prog stack base register */ 1293 emit_a32_mov_r(fplo, ARM_IP, ctx); 1294 emit_a32_mov_i(fphi, 0, ctx); 1295 1296 /* mov r4, 0 */ 1297 emit(ARM_MOV_I(r4, 0), ctx); 1298 1299 /* Move BPF_CTX to BPF_R1 */ 1300 emit(ARM_MOV_R(r3, r4), ctx); 1301 emit(ARM_MOV_R(r2, r0), ctx); 1302 /* Initialize Tail Count */ 1303 emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[0])), ctx); 1304 emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[1])), ctx); 1305 /* end of prologue */ 1306 } 1307 1308 /* restore callee saved registers. */ 1309 static void build_epilogue(struct jit_ctx *ctx) 1310 { 1311 #ifdef CONFIG_FRAME_POINTER 1312 /* When using frame pointers, some additional registers need to 1313 * be loaded. */ 1314 u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP; 1315 emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx); 1316 emit(ARM_LDM(ARM_SP, reg_set), ctx); 1317 #else 1318 /* Restore callee saved registers. */ 1319 emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx); 1320 emit(ARM_POP(CALLEE_POP_MASK), ctx); 1321 #endif 1322 } 1323 1324 /* 1325 * Convert an eBPF instruction to native instruction, i.e 1326 * JITs an eBPF instruction. 1327 * Returns : 1328 * 0 - Successfully JITed an 8-byte eBPF instruction 1329 * >0 - Successfully JITed a 16-byte eBPF instruction 1330 * <0 - Failed to JIT. 1331 */ 1332 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) 1333 { 1334 const u8 code = insn->code; 1335 const s8 *dst = bpf2a32[insn->dst_reg]; 1336 const s8 *src = bpf2a32[insn->src_reg]; 1337 const s8 *tmp = bpf2a32[TMP_REG_1]; 1338 const s8 *tmp2 = bpf2a32[TMP_REG_2]; 1339 const s16 off = insn->off; 1340 const s32 imm = insn->imm; 1341 const int i = insn - ctx->prog->insnsi; 1342 const bool is64 = BPF_CLASS(code) == BPF_ALU64; 1343 const s8 *rd, *rs; 1344 s8 rd_lo, rt, rm, rn; 1345 s32 jmp_offset; 1346 1347 #define check_imm(bits, imm) do { \ 1348 if ((imm) >= (1 << ((bits) - 1)) || \ 1349 (imm) < -(1 << ((bits) - 1))) { \ 1350 pr_info("[%2d] imm=%d(0x%x) out of range\n", \ 1351 i, imm, imm); \ 1352 return -EINVAL; \ 1353 } \ 1354 } while (0) 1355 #define check_imm24(imm) check_imm(24, imm) 1356 1357 switch (code) { 1358 /* ALU operations */ 1359 1360 /* dst = src */ 1361 case BPF_ALU | BPF_MOV | BPF_K: 1362 case BPF_ALU | BPF_MOV | BPF_X: 1363 case BPF_ALU64 | BPF_MOV | BPF_K: 1364 case BPF_ALU64 | BPF_MOV | BPF_X: 1365 switch (BPF_SRC(code)) { 1366 case BPF_X: 1367 if (imm == 1) { 1368 /* Special mov32 for zext */ 1369 emit_a32_mov_i(dst_hi, 0, ctx); 1370 break; 1371 } 1372 emit_a32_mov_r64(is64, dst, src, ctx); 1373 break; 1374 case BPF_K: 1375 /* Sign-extend immediate value to destination reg */ 1376 emit_a32_mov_se_i64(is64, dst, imm, ctx); 1377 break; 1378 } 1379 break; 1380 /* dst = dst + src/imm */ 1381 /* dst = dst - src/imm */ 1382 /* dst = dst | src/imm */ 1383 /* dst = dst & src/imm */ 1384 /* dst = dst ^ src/imm */ 1385 /* dst = dst * src/imm */ 1386 /* dst = dst << src */ 1387 /* dst = dst >> src */ 1388 case BPF_ALU | BPF_ADD | BPF_K: 1389 case BPF_ALU | BPF_ADD | BPF_X: 1390 case BPF_ALU | BPF_SUB | BPF_K: 1391 case BPF_ALU | BPF_SUB | BPF_X: 1392 case BPF_ALU | BPF_OR | BPF_K: 1393 case BPF_ALU | BPF_OR | BPF_X: 1394 case BPF_ALU | BPF_AND | BPF_K: 1395 case BPF_ALU | BPF_AND | BPF_X: 1396 case BPF_ALU | BPF_XOR | BPF_K: 1397 case BPF_ALU | BPF_XOR | BPF_X: 1398 case BPF_ALU | BPF_MUL | BPF_K: 1399 case BPF_ALU | BPF_MUL | BPF_X: 1400 case BPF_ALU | BPF_LSH | BPF_X: 1401 case BPF_ALU | BPF_RSH | BPF_X: 1402 case BPF_ALU | BPF_ARSH | BPF_K: 1403 case BPF_ALU | BPF_ARSH | BPF_X: 1404 case BPF_ALU64 | BPF_ADD | BPF_K: 1405 case BPF_ALU64 | BPF_ADD | BPF_X: 1406 case BPF_ALU64 | BPF_SUB | BPF_K: 1407 case BPF_ALU64 | BPF_SUB | BPF_X: 1408 case BPF_ALU64 | BPF_OR | BPF_K: 1409 case BPF_ALU64 | BPF_OR | BPF_X: 1410 case BPF_ALU64 | BPF_AND | BPF_K: 1411 case BPF_ALU64 | BPF_AND | BPF_X: 1412 case BPF_ALU64 | BPF_XOR | BPF_K: 1413 case BPF_ALU64 | BPF_XOR | BPF_X: 1414 switch (BPF_SRC(code)) { 1415 case BPF_X: 1416 emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code)); 1417 break; 1418 case BPF_K: 1419 /* Move immediate value to the temporary register 1420 * and then do the ALU operation on the temporary 1421 * register as this will sign-extend the immediate 1422 * value into temporary reg and then it would be 1423 * safe to do the operation on it. 1424 */ 1425 emit_a32_mov_se_i64(is64, tmp2, imm, ctx); 1426 emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code)); 1427 break; 1428 } 1429 break; 1430 /* dst = dst / src(imm) */ 1431 /* dst = dst % src(imm) */ 1432 case BPF_ALU | BPF_DIV | BPF_K: 1433 case BPF_ALU | BPF_DIV | BPF_X: 1434 case BPF_ALU | BPF_MOD | BPF_K: 1435 case BPF_ALU | BPF_MOD | BPF_X: 1436 rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx); 1437 switch (BPF_SRC(code)) { 1438 case BPF_X: 1439 rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx); 1440 break; 1441 case BPF_K: 1442 rt = tmp2[0]; 1443 emit_a32_mov_i(rt, imm, ctx); 1444 break; 1445 default: 1446 rt = src_lo; 1447 break; 1448 } 1449 emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code)); 1450 arm_bpf_put_reg32(dst_lo, rd_lo, ctx); 1451 if (!ctx->prog->aux->verifier_zext) 1452 emit_a32_mov_i(dst_hi, 0, ctx); 1453 break; 1454 case BPF_ALU64 | BPF_DIV | BPF_K: 1455 case BPF_ALU64 | BPF_DIV | BPF_X: 1456 case BPF_ALU64 | BPF_MOD | BPF_K: 1457 case BPF_ALU64 | BPF_MOD | BPF_X: 1458 goto notyet; 1459 /* dst = dst >> imm */ 1460 /* dst = dst << imm */ 1461 case BPF_ALU | BPF_RSH | BPF_K: 1462 case BPF_ALU | BPF_LSH | BPF_K: 1463 if (unlikely(imm > 31)) 1464 return -EINVAL; 1465 if (imm) 1466 emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code)); 1467 if (!ctx->prog->aux->verifier_zext) 1468 emit_a32_mov_i(dst_hi, 0, ctx); 1469 break; 1470 /* dst = dst << imm */ 1471 case BPF_ALU64 | BPF_LSH | BPF_K: 1472 if (unlikely(imm > 63)) 1473 return -EINVAL; 1474 emit_a32_lsh_i64(dst, imm, ctx); 1475 break; 1476 /* dst = dst >> imm */ 1477 case BPF_ALU64 | BPF_RSH | BPF_K: 1478 if (unlikely(imm > 63)) 1479 return -EINVAL; 1480 emit_a32_rsh_i64(dst, imm, ctx); 1481 break; 1482 /* dst = dst << src */ 1483 case BPF_ALU64 | BPF_LSH | BPF_X: 1484 emit_a32_lsh_r64(dst, src, ctx); 1485 break; 1486 /* dst = dst >> src */ 1487 case BPF_ALU64 | BPF_RSH | BPF_X: 1488 emit_a32_rsh_r64(dst, src, ctx); 1489 break; 1490 /* dst = dst >> src (signed) */ 1491 case BPF_ALU64 | BPF_ARSH | BPF_X: 1492 emit_a32_arsh_r64(dst, src, ctx); 1493 break; 1494 /* dst = dst >> imm (signed) */ 1495 case BPF_ALU64 | BPF_ARSH | BPF_K: 1496 if (unlikely(imm > 63)) 1497 return -EINVAL; 1498 emit_a32_arsh_i64(dst, imm, ctx); 1499 break; 1500 /* dst = ~dst */ 1501 case BPF_ALU | BPF_NEG: 1502 emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code)); 1503 if (!ctx->prog->aux->verifier_zext) 1504 emit_a32_mov_i(dst_hi, 0, ctx); 1505 break; 1506 /* dst = ~dst (64 bit) */ 1507 case BPF_ALU64 | BPF_NEG: 1508 emit_a32_neg64(dst, ctx); 1509 break; 1510 /* dst = dst * src/imm */ 1511 case BPF_ALU64 | BPF_MUL | BPF_X: 1512 case BPF_ALU64 | BPF_MUL | BPF_K: 1513 switch (BPF_SRC(code)) { 1514 case BPF_X: 1515 emit_a32_mul_r64(dst, src, ctx); 1516 break; 1517 case BPF_K: 1518 /* Move immediate value to the temporary register 1519 * and then do the multiplication on it as this 1520 * will sign-extend the immediate value into temp 1521 * reg then it would be safe to do the operation 1522 * on it. 1523 */ 1524 emit_a32_mov_se_i64(is64, tmp2, imm, ctx); 1525 emit_a32_mul_r64(dst, tmp2, ctx); 1526 break; 1527 } 1528 break; 1529 /* dst = htole(dst) */ 1530 /* dst = htobe(dst) */ 1531 case BPF_ALU | BPF_END | BPF_FROM_LE: 1532 case BPF_ALU | BPF_END | BPF_FROM_BE: 1533 rd = arm_bpf_get_reg64(dst, tmp, ctx); 1534 if (BPF_SRC(code) == BPF_FROM_LE) 1535 goto emit_bswap_uxt; 1536 switch (imm) { 1537 case 16: 1538 emit_rev16(rd[1], rd[1], ctx); 1539 goto emit_bswap_uxt; 1540 case 32: 1541 emit_rev32(rd[1], rd[1], ctx); 1542 goto emit_bswap_uxt; 1543 case 64: 1544 emit_rev32(ARM_LR, rd[1], ctx); 1545 emit_rev32(rd[1], rd[0], ctx); 1546 emit(ARM_MOV_R(rd[0], ARM_LR), ctx); 1547 break; 1548 } 1549 goto exit; 1550 emit_bswap_uxt: 1551 switch (imm) { 1552 case 16: 1553 /* zero-extend 16 bits into 64 bits */ 1554 #if __LINUX_ARM_ARCH__ < 6 1555 emit_a32_mov_i(tmp2[1], 0xffff, ctx); 1556 emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx); 1557 #else /* ARMv6+ */ 1558 emit(ARM_UXTH(rd[1], rd[1]), ctx); 1559 #endif 1560 if (!ctx->prog->aux->verifier_zext) 1561 emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); 1562 break; 1563 case 32: 1564 /* zero-extend 32 bits into 64 bits */ 1565 if (!ctx->prog->aux->verifier_zext) 1566 emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); 1567 break; 1568 case 64: 1569 /* nop */ 1570 break; 1571 } 1572 exit: 1573 arm_bpf_put_reg64(dst, rd, ctx); 1574 break; 1575 /* dst = imm64 */ 1576 case BPF_LD | BPF_IMM | BPF_DW: 1577 { 1578 u64 val = (u32)imm | (u64)insn[1].imm << 32; 1579 1580 emit_a32_mov_i64(dst, val, ctx); 1581 1582 return 1; 1583 } 1584 /* LDX: dst = *(size *)(src + off) */ 1585 case BPF_LDX | BPF_MEM | BPF_W: 1586 case BPF_LDX | BPF_MEM | BPF_H: 1587 case BPF_LDX | BPF_MEM | BPF_B: 1588 case BPF_LDX | BPF_MEM | BPF_DW: 1589 rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); 1590 emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); 1591 break; 1592 /* ST: *(size *)(dst + off) = imm */ 1593 case BPF_ST | BPF_MEM | BPF_W: 1594 case BPF_ST | BPF_MEM | BPF_H: 1595 case BPF_ST | BPF_MEM | BPF_B: 1596 case BPF_ST | BPF_MEM | BPF_DW: 1597 switch (BPF_SIZE(code)) { 1598 case BPF_DW: 1599 /* Sign-extend immediate value into temp reg */ 1600 emit_a32_mov_se_i64(true, tmp2, imm, ctx); 1601 break; 1602 case BPF_W: 1603 case BPF_H: 1604 case BPF_B: 1605 emit_a32_mov_i(tmp2[1], imm, ctx); 1606 break; 1607 } 1608 emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code)); 1609 break; 1610 /* STX XADD: lock *(u32 *)(dst + off) += src */ 1611 case BPF_STX | BPF_XADD | BPF_W: 1612 /* STX XADD: lock *(u64 *)(dst + off) += src */ 1613 case BPF_STX | BPF_XADD | BPF_DW: 1614 goto notyet; 1615 /* STX: *(size *)(dst + off) = src */ 1616 case BPF_STX | BPF_MEM | BPF_W: 1617 case BPF_STX | BPF_MEM | BPF_H: 1618 case BPF_STX | BPF_MEM | BPF_B: 1619 case BPF_STX | BPF_MEM | BPF_DW: 1620 rs = arm_bpf_get_reg64(src, tmp2, ctx); 1621 emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code)); 1622 break; 1623 /* PC += off if dst == src */ 1624 /* PC += off if dst > src */ 1625 /* PC += off if dst >= src */ 1626 /* PC += off if dst < src */ 1627 /* PC += off if dst <= src */ 1628 /* PC += off if dst != src */ 1629 /* PC += off if dst > src (signed) */ 1630 /* PC += off if dst >= src (signed) */ 1631 /* PC += off if dst < src (signed) */ 1632 /* PC += off if dst <= src (signed) */ 1633 /* PC += off if dst & src */ 1634 case BPF_JMP | BPF_JEQ | BPF_X: 1635 case BPF_JMP | BPF_JGT | BPF_X: 1636 case BPF_JMP | BPF_JGE | BPF_X: 1637 case BPF_JMP | BPF_JNE | BPF_X: 1638 case BPF_JMP | BPF_JSGT | BPF_X: 1639 case BPF_JMP | BPF_JSGE | BPF_X: 1640 case BPF_JMP | BPF_JSET | BPF_X: 1641 case BPF_JMP | BPF_JLE | BPF_X: 1642 case BPF_JMP | BPF_JLT | BPF_X: 1643 case BPF_JMP | BPF_JSLT | BPF_X: 1644 case BPF_JMP | BPF_JSLE | BPF_X: 1645 case BPF_JMP32 | BPF_JEQ | BPF_X: 1646 case BPF_JMP32 | BPF_JGT | BPF_X: 1647 case BPF_JMP32 | BPF_JGE | BPF_X: 1648 case BPF_JMP32 | BPF_JNE | BPF_X: 1649 case BPF_JMP32 | BPF_JSGT | BPF_X: 1650 case BPF_JMP32 | BPF_JSGE | BPF_X: 1651 case BPF_JMP32 | BPF_JSET | BPF_X: 1652 case BPF_JMP32 | BPF_JLE | BPF_X: 1653 case BPF_JMP32 | BPF_JLT | BPF_X: 1654 case BPF_JMP32 | BPF_JSLT | BPF_X: 1655 case BPF_JMP32 | BPF_JSLE | BPF_X: 1656 /* Setup source registers */ 1657 rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); 1658 rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); 1659 goto go_jmp; 1660 /* PC += off if dst == imm */ 1661 /* PC += off if dst > imm */ 1662 /* PC += off if dst >= imm */ 1663 /* PC += off if dst < imm */ 1664 /* PC += off if dst <= imm */ 1665 /* PC += off if dst != imm */ 1666 /* PC += off if dst > imm (signed) */ 1667 /* PC += off if dst >= imm (signed) */ 1668 /* PC += off if dst < imm (signed) */ 1669 /* PC += off if dst <= imm (signed) */ 1670 /* PC += off if dst & imm */ 1671 case BPF_JMP | BPF_JEQ | BPF_K: 1672 case BPF_JMP | BPF_JGT | BPF_K: 1673 case BPF_JMP | BPF_JGE | BPF_K: 1674 case BPF_JMP | BPF_JNE | BPF_K: 1675 case BPF_JMP | BPF_JSGT | BPF_K: 1676 case BPF_JMP | BPF_JSGE | BPF_K: 1677 case BPF_JMP | BPF_JSET | BPF_K: 1678 case BPF_JMP | BPF_JLT | BPF_K: 1679 case BPF_JMP | BPF_JLE | BPF_K: 1680 case BPF_JMP | BPF_JSLT | BPF_K: 1681 case BPF_JMP | BPF_JSLE | BPF_K: 1682 case BPF_JMP32 | BPF_JEQ | BPF_K: 1683 case BPF_JMP32 | BPF_JGT | BPF_K: 1684 case BPF_JMP32 | BPF_JGE | BPF_K: 1685 case BPF_JMP32 | BPF_JNE | BPF_K: 1686 case BPF_JMP32 | BPF_JSGT | BPF_K: 1687 case BPF_JMP32 | BPF_JSGE | BPF_K: 1688 case BPF_JMP32 | BPF_JSET | BPF_K: 1689 case BPF_JMP32 | BPF_JLT | BPF_K: 1690 case BPF_JMP32 | BPF_JLE | BPF_K: 1691 case BPF_JMP32 | BPF_JSLT | BPF_K: 1692 case BPF_JMP32 | BPF_JSLE | BPF_K: 1693 if (off == 0) 1694 break; 1695 rm = tmp2[0]; 1696 rn = tmp2[1]; 1697 /* Sign-extend immediate value */ 1698 emit_a32_mov_se_i64(true, tmp2, imm, ctx); 1699 go_jmp: 1700 /* Setup destination register */ 1701 rd = arm_bpf_get_reg64(dst, tmp, ctx); 1702 1703 /* Check for the condition */ 1704 emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code), 1705 BPF_CLASS(code) == BPF_JMP); 1706 1707 /* Setup JUMP instruction */ 1708 jmp_offset = bpf2a32_offset(i+off, i, ctx); 1709 switch (BPF_OP(code)) { 1710 case BPF_JNE: 1711 case BPF_JSET: 1712 _emit(ARM_COND_NE, ARM_B(jmp_offset), ctx); 1713 break; 1714 case BPF_JEQ: 1715 _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); 1716 break; 1717 case BPF_JGT: 1718 _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); 1719 break; 1720 case BPF_JGE: 1721 _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); 1722 break; 1723 case BPF_JSGT: 1724 _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx); 1725 break; 1726 case BPF_JSGE: 1727 _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx); 1728 break; 1729 case BPF_JLE: 1730 _emit(ARM_COND_LS, ARM_B(jmp_offset), ctx); 1731 break; 1732 case BPF_JLT: 1733 _emit(ARM_COND_CC, ARM_B(jmp_offset), ctx); 1734 break; 1735 case BPF_JSLT: 1736 _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx); 1737 break; 1738 case BPF_JSLE: 1739 _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx); 1740 break; 1741 } 1742 break; 1743 /* JMP OFF */ 1744 case BPF_JMP | BPF_JA: 1745 { 1746 if (off == 0) 1747 break; 1748 jmp_offset = bpf2a32_offset(i+off, i, ctx); 1749 check_imm24(jmp_offset); 1750 emit(ARM_B(jmp_offset), ctx); 1751 break; 1752 } 1753 /* tail call */ 1754 case BPF_JMP | BPF_TAIL_CALL: 1755 if (emit_bpf_tail_call(ctx)) 1756 return -EFAULT; 1757 break; 1758 /* function call */ 1759 case BPF_JMP | BPF_CALL: 1760 { 1761 const s8 *r0 = bpf2a32[BPF_REG_0]; 1762 const s8 *r1 = bpf2a32[BPF_REG_1]; 1763 const s8 *r2 = bpf2a32[BPF_REG_2]; 1764 const s8 *r3 = bpf2a32[BPF_REG_3]; 1765 const s8 *r4 = bpf2a32[BPF_REG_4]; 1766 const s8 *r5 = bpf2a32[BPF_REG_5]; 1767 const u32 func = (u32)__bpf_call_base + (u32)imm; 1768 1769 emit_a32_mov_r64(true, r0, r1, ctx); 1770 emit_a32_mov_r64(true, r1, r2, ctx); 1771 emit_push_r64(r5, ctx); 1772 emit_push_r64(r4, ctx); 1773 emit_push_r64(r3, ctx); 1774 1775 emit_a32_mov_i(tmp[1], func, ctx); 1776 emit_blx_r(tmp[1], ctx); 1777 1778 emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean 1779 break; 1780 } 1781 /* function return */ 1782 case BPF_JMP | BPF_EXIT: 1783 /* Optimization: when last instruction is EXIT 1784 * simply fallthrough to epilogue. 1785 */ 1786 if (i == ctx->prog->len - 1) 1787 break; 1788 jmp_offset = epilogue_offset(ctx); 1789 check_imm24(jmp_offset); 1790 emit(ARM_B(jmp_offset), ctx); 1791 break; 1792 notyet: 1793 pr_info_once("*** NOT YET: opcode %02x ***\n", code); 1794 return -EFAULT; 1795 default: 1796 pr_err_once("unknown opcode %02x\n", code); 1797 return -EINVAL; 1798 } 1799 1800 if (ctx->flags & FLAG_IMM_OVERFLOW) 1801 /* 1802 * this instruction generated an overflow when 1803 * trying to access the literal pool, so 1804 * delegate this filter to the kernel interpreter. 1805 */ 1806 return -1; 1807 return 0; 1808 } 1809 1810 static int build_body(struct jit_ctx *ctx) 1811 { 1812 const struct bpf_prog *prog = ctx->prog; 1813 unsigned int i; 1814 1815 for (i = 0; i < prog->len; i++) { 1816 const struct bpf_insn *insn = &(prog->insnsi[i]); 1817 int ret; 1818 1819 ret = build_insn(insn, ctx); 1820 1821 /* It's used with loading the 64 bit immediate value. */ 1822 if (ret > 0) { 1823 i++; 1824 if (ctx->target == NULL) 1825 ctx->offsets[i] = ctx->idx; 1826 continue; 1827 } 1828 1829 if (ctx->target == NULL) 1830 ctx->offsets[i] = ctx->idx; 1831 1832 /* If unsuccesfull, return with error code */ 1833 if (ret) 1834 return ret; 1835 } 1836 return 0; 1837 } 1838 1839 static int validate_code(struct jit_ctx *ctx) 1840 { 1841 int i; 1842 1843 for (i = 0; i < ctx->idx; i++) { 1844 if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF)) 1845 return -1; 1846 } 1847 1848 return 0; 1849 } 1850 1851 void bpf_jit_compile(struct bpf_prog *prog) 1852 { 1853 /* Nothing to do here. We support Internal BPF. */ 1854 } 1855 1856 bool bpf_jit_needs_zext(void) 1857 { 1858 return true; 1859 } 1860 1861 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1862 { 1863 struct bpf_prog *tmp, *orig_prog = prog; 1864 struct bpf_binary_header *header; 1865 bool tmp_blinded = false; 1866 struct jit_ctx ctx; 1867 unsigned int tmp_idx; 1868 unsigned int image_size; 1869 u8 *image_ptr; 1870 1871 /* If BPF JIT was not enabled then we must fall back to 1872 * the interpreter. 1873 */ 1874 if (!prog->jit_requested) 1875 return orig_prog; 1876 1877 /* If constant blinding was enabled and we failed during blinding 1878 * then we must fall back to the interpreter. Otherwise, we save 1879 * the new JITed code. 1880 */ 1881 tmp = bpf_jit_blind_constants(prog); 1882 1883 if (IS_ERR(tmp)) 1884 return orig_prog; 1885 if (tmp != prog) { 1886 tmp_blinded = true; 1887 prog = tmp; 1888 } 1889 1890 memset(&ctx, 0, sizeof(ctx)); 1891 ctx.prog = prog; 1892 ctx.cpu_architecture = cpu_architecture(); 1893 1894 /* Not able to allocate memory for offsets[] , then 1895 * we must fall back to the interpreter 1896 */ 1897 ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL); 1898 if (ctx.offsets == NULL) { 1899 prog = orig_prog; 1900 goto out; 1901 } 1902 1903 /* 1) fake pass to find in the length of the JITed code, 1904 * to compute ctx->offsets and other context variables 1905 * needed to compute final JITed code. 1906 * Also, calculate random starting pointer/start of JITed code 1907 * which is prefixed by random number of fault instructions. 1908 * 1909 * If the first pass fails then there is no chance of it 1910 * being successful in the second pass, so just fall back 1911 * to the interpreter. 1912 */ 1913 if (build_body(&ctx)) { 1914 prog = orig_prog; 1915 goto out_off; 1916 } 1917 1918 tmp_idx = ctx.idx; 1919 build_prologue(&ctx); 1920 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; 1921 1922 ctx.epilogue_offset = ctx.idx; 1923 1924 #if __LINUX_ARM_ARCH__ < 7 1925 tmp_idx = ctx.idx; 1926 build_epilogue(&ctx); 1927 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4; 1928 1929 ctx.idx += ctx.imm_count; 1930 if (ctx.imm_count) { 1931 ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL); 1932 if (ctx.imms == NULL) { 1933 prog = orig_prog; 1934 goto out_off; 1935 } 1936 } 1937 #else 1938 /* there's nothing about the epilogue on ARMv7 */ 1939 build_epilogue(&ctx); 1940 #endif 1941 /* Now we can get the actual image size of the JITed arm code. 1942 * Currently, we are not considering the THUMB-2 instructions 1943 * for jit, although it can decrease the size of the image. 1944 * 1945 * As each arm instruction is of length 32bit, we are translating 1946 * number of JITed intructions into the size required to store these 1947 * JITed code. 1948 */ 1949 image_size = sizeof(u32) * ctx.idx; 1950 1951 /* Now we know the size of the structure to make */ 1952 header = bpf_jit_binary_alloc(image_size, &image_ptr, 1953 sizeof(u32), jit_fill_hole); 1954 /* Not able to allocate memory for the structure then 1955 * we must fall back to the interpretation 1956 */ 1957 if (header == NULL) { 1958 prog = orig_prog; 1959 goto out_imms; 1960 } 1961 1962 /* 2.) Actual pass to generate final JIT code */ 1963 ctx.target = (u32 *) image_ptr; 1964 ctx.idx = 0; 1965 1966 build_prologue(&ctx); 1967 1968 /* If building the body of the JITed code fails somehow, 1969 * we fall back to the interpretation. 1970 */ 1971 if (build_body(&ctx) < 0) { 1972 image_ptr = NULL; 1973 bpf_jit_binary_free(header); 1974 prog = orig_prog; 1975 goto out_imms; 1976 } 1977 build_epilogue(&ctx); 1978 1979 /* 3.) Extra pass to validate JITed Code */ 1980 if (validate_code(&ctx)) { 1981 image_ptr = NULL; 1982 bpf_jit_binary_free(header); 1983 prog = orig_prog; 1984 goto out_imms; 1985 } 1986 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx)); 1987 1988 if (bpf_jit_enable > 1) 1989 /* there are 2 passes here */ 1990 bpf_jit_dump(prog->len, image_size, 2, ctx.target); 1991 1992 bpf_jit_binary_lock_ro(header); 1993 prog->bpf_func = (void *)ctx.target; 1994 prog->jited = 1; 1995 prog->jited_len = image_size; 1996 1997 out_imms: 1998 #if __LINUX_ARM_ARCH__ < 7 1999 if (ctx.imm_count) 2000 kfree(ctx.imms); 2001 #endif 2002 out_off: 2003 kfree(ctx.offsets); 2004 out: 2005 if (tmp_blinded) 2006 bpf_jit_prog_release_other(prog, prog == orig_prog ? 2007 tmp : orig_prog); 2008 return prog; 2009 } 2010 2011