1 /* 2 * Just-In-Time compiler for BPF filters on 32bit ARM 3 * 4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/compiler.h> 13 #include <linux/errno.h> 14 #include <linux/filter.h> 15 #include <linux/netdevice.h> 16 #include <linux/string.h> 17 #include <linux/slab.h> 18 #include <linux/if_vlan.h> 19 20 #include <asm/cacheflush.h> 21 #include <asm/hwcap.h> 22 #include <asm/opcodes.h> 23 24 #include "bpf_jit_32.h" 25 26 /* 27 * ABI: 28 * 29 * r0 scratch register 30 * r4 BPF register A 31 * r5 BPF register X 32 * r6 pointer to the skb 33 * r7 skb->data 34 * r8 skb_headlen(skb) 35 */ 36 37 #define r_scratch ARM_R0 38 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */ 39 #define r_off ARM_R1 40 #define r_A ARM_R4 41 #define r_X ARM_R5 42 #define r_skb ARM_R6 43 #define r_skb_data ARM_R7 44 #define r_skb_hl ARM_R8 45 46 #define SCRATCH_SP_OFFSET 0 47 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k)) 48 49 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) 50 #define SEEN_MEM_WORD(k) (1 << (k)) 51 #define SEEN_X (1 << BPF_MEMWORDS) 52 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1)) 53 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2)) 54 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) 55 56 #define FLAG_NEED_X_RESET (1 << 0) 57 58 struct jit_ctx { 59 const struct bpf_prog *skf; 60 unsigned idx; 61 unsigned prologue_bytes; 62 int ret0_fp_idx; 63 u32 seen; 64 u32 flags; 65 u32 *offsets; 66 u32 *target; 67 #if __LINUX_ARM_ARCH__ < 7 68 u16 epilogue_bytes; 69 u16 imm_count; 70 u32 *imms; 71 #endif 72 }; 73 74 int bpf_jit_enable __read_mostly; 75 76 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) 77 { 78 u8 ret; 79 int err; 80 81 err = skb_copy_bits(skb, offset, &ret, 1); 82 83 return (u64)err << 32 | ret; 84 } 85 86 static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) 87 { 88 u16 ret; 89 int err; 90 91 err = skb_copy_bits(skb, offset, &ret, 2); 92 93 return (u64)err << 32 | ntohs(ret); 94 } 95 96 static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) 97 { 98 u32 ret; 99 int err; 100 101 err = skb_copy_bits(skb, offset, &ret, 4); 102 103 return (u64)err << 32 | ntohl(ret); 104 } 105 106 /* 107 * Wrapper that handles both OABI and EABI and assures Thumb2 interworking 108 * (where the assembly routines like __aeabi_uidiv could cause problems). 109 */ 110 static u32 jit_udiv(u32 dividend, u32 divisor) 111 { 112 return dividend / divisor; 113 } 114 115 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx) 116 { 117 inst |= (cond << 28); 118 inst = __opcode_to_mem_arm(inst); 119 120 if (ctx->target != NULL) 121 ctx->target[ctx->idx] = inst; 122 123 ctx->idx++; 124 } 125 126 /* 127 * Emit an instruction that will be executed unconditionally. 128 */ 129 static inline void emit(u32 inst, struct jit_ctx *ctx) 130 { 131 _emit(ARM_COND_AL, inst, ctx); 132 } 133 134 static u16 saved_regs(struct jit_ctx *ctx) 135 { 136 u16 ret = 0; 137 138 if ((ctx->skf->len > 1) || 139 (ctx->skf->insns[0].code == (BPF_RET | BPF_A))) 140 ret |= 1 << r_A; 141 142 #ifdef CONFIG_FRAME_POINTER 143 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC); 144 #else 145 if (ctx->seen & SEEN_CALL) 146 ret |= 1 << ARM_LR; 147 #endif 148 if (ctx->seen & (SEEN_DATA | SEEN_SKB)) 149 ret |= 1 << r_skb; 150 if (ctx->seen & SEEN_DATA) 151 ret |= (1 << r_skb_data) | (1 << r_skb_hl); 152 if (ctx->seen & SEEN_X) 153 ret |= 1 << r_X; 154 155 return ret; 156 } 157 158 static inline int mem_words_used(struct jit_ctx *ctx) 159 { 160 /* yes, we do waste some stack space IF there are "holes" in the set" */ 161 return fls(ctx->seen & SEEN_MEM); 162 } 163 164 static inline bool is_load_to_a(u16 inst) 165 { 166 switch (inst) { 167 case BPF_LD | BPF_W | BPF_LEN: 168 case BPF_LD | BPF_W | BPF_ABS: 169 case BPF_LD | BPF_H | BPF_ABS: 170 case BPF_LD | BPF_B | BPF_ABS: 171 return true; 172 default: 173 return false; 174 } 175 } 176 177 static void jit_fill_hole(void *area, unsigned int size) 178 { 179 u32 *ptr; 180 /* We are guaranteed to have aligned memory. */ 181 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) 182 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); 183 } 184 185 static void build_prologue(struct jit_ctx *ctx) 186 { 187 u16 reg_set = saved_regs(ctx); 188 u16 first_inst = ctx->skf->insns[0].code; 189 u16 off; 190 191 #ifdef CONFIG_FRAME_POINTER 192 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); 193 emit(ARM_PUSH(reg_set), ctx); 194 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); 195 #else 196 if (reg_set) 197 emit(ARM_PUSH(reg_set), ctx); 198 #endif 199 200 if (ctx->seen & (SEEN_DATA | SEEN_SKB)) 201 emit(ARM_MOV_R(r_skb, ARM_R0), ctx); 202 203 if (ctx->seen & SEEN_DATA) { 204 off = offsetof(struct sk_buff, data); 205 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx); 206 /* headlen = len - data_len */ 207 off = offsetof(struct sk_buff, len); 208 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx); 209 off = offsetof(struct sk_buff, data_len); 210 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); 211 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx); 212 } 213 214 if (ctx->flags & FLAG_NEED_X_RESET) 215 emit(ARM_MOV_I(r_X, 0), ctx); 216 217 /* do not leak kernel data to userspace */ 218 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst))) 219 emit(ARM_MOV_I(r_A, 0), ctx); 220 221 /* stack space for the BPF_MEM words */ 222 if (ctx->seen & SEEN_MEM) 223 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); 224 } 225 226 static void build_epilogue(struct jit_ctx *ctx) 227 { 228 u16 reg_set = saved_regs(ctx); 229 230 if (ctx->seen & SEEN_MEM) 231 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); 232 233 reg_set &= ~(1 << ARM_LR); 234 235 #ifdef CONFIG_FRAME_POINTER 236 /* the first instruction of the prologue was: mov ip, sp */ 237 reg_set &= ~(1 << ARM_IP); 238 reg_set |= (1 << ARM_SP); 239 emit(ARM_LDM(ARM_SP, reg_set), ctx); 240 #else 241 if (reg_set) { 242 if (ctx->seen & SEEN_CALL) 243 reg_set |= 1 << ARM_PC; 244 emit(ARM_POP(reg_set), ctx); 245 } 246 247 if (!(ctx->seen & SEEN_CALL)) 248 emit(ARM_BX(ARM_LR), ctx); 249 #endif 250 } 251 252 static int16_t imm8m(u32 x) 253 { 254 u32 rot; 255 256 for (rot = 0; rot < 16; rot++) 257 if ((x & ~ror32(0xff, 2 * rot)) == 0) 258 return rol32(x, 2 * rot) | (rot << 8); 259 260 return -1; 261 } 262 263 #if __LINUX_ARM_ARCH__ < 7 264 265 static u16 imm_offset(u32 k, struct jit_ctx *ctx) 266 { 267 unsigned i = 0, offset; 268 u16 imm; 269 270 /* on the "fake" run we just count them (duplicates included) */ 271 if (ctx->target == NULL) { 272 ctx->imm_count++; 273 return 0; 274 } 275 276 while ((i < ctx->imm_count) && ctx->imms[i]) { 277 if (ctx->imms[i] == k) 278 break; 279 i++; 280 } 281 282 if (ctx->imms[i] == 0) 283 ctx->imms[i] = k; 284 285 /* constants go just after the epilogue */ 286 offset = ctx->offsets[ctx->skf->len]; 287 offset += ctx->prologue_bytes; 288 offset += ctx->epilogue_bytes; 289 offset += i * 4; 290 291 ctx->target[offset / 4] = k; 292 293 /* PC in ARM mode == address of the instruction + 8 */ 294 imm = offset - (8 + ctx->idx * 4); 295 296 return imm; 297 } 298 299 #endif /* __LINUX_ARM_ARCH__ */ 300 301 /* 302 * Move an immediate that's not an imm8m to a core register. 303 */ 304 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) 305 { 306 #if __LINUX_ARM_ARCH__ < 7 307 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); 308 #else 309 emit(ARM_MOVW(rd, val & 0xffff), ctx); 310 if (val > 0xffff) 311 emit(ARM_MOVT(rd, val >> 16), ctx); 312 #endif 313 } 314 315 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) 316 { 317 int imm12 = imm8m(val); 318 319 if (imm12 >= 0) 320 emit(ARM_MOV_I(rd, imm12), ctx); 321 else 322 emit_mov_i_no8m(rd, val, ctx); 323 } 324 325 #if __LINUX_ARM_ARCH__ < 6 326 327 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 328 { 329 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx); 330 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); 331 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx); 332 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx); 333 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx); 334 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx); 335 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx); 336 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx); 337 } 338 339 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 340 { 341 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); 342 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx); 343 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx); 344 } 345 346 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) 347 { 348 /* r_dst = (r_src << 8) | (r_src >> 8) */ 349 emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx); 350 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx); 351 352 /* 353 * we need to mask out the bits set in r_dst[23:16] due to 354 * the first shift instruction. 355 * 356 * note that 0x8ff is the encoded immediate 0x00ff0000. 357 */ 358 emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx); 359 } 360 361 #else /* ARMv6+ */ 362 363 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 364 { 365 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx); 366 #ifdef __LITTLE_ENDIAN 367 _emit(cond, ARM_REV(r_res, r_res), ctx); 368 #endif 369 } 370 371 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 372 { 373 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx); 374 #ifdef __LITTLE_ENDIAN 375 _emit(cond, ARM_REV16(r_res, r_res), ctx); 376 #endif 377 } 378 379 static inline void emit_swap16(u8 r_dst __maybe_unused, 380 u8 r_src __maybe_unused, 381 struct jit_ctx *ctx __maybe_unused) 382 { 383 #ifdef __LITTLE_ENDIAN 384 emit(ARM_REV16(r_dst, r_src), ctx); 385 #endif 386 } 387 388 #endif /* __LINUX_ARM_ARCH__ < 6 */ 389 390 391 /* Compute the immediate value for a PC-relative branch. */ 392 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx) 393 { 394 u32 imm; 395 396 if (ctx->target == NULL) 397 return 0; 398 /* 399 * BPF allows only forward jumps and the offset of the target is 400 * still the one computed during the first pass. 401 */ 402 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8); 403 404 return imm >> 2; 405 } 406 407 #define OP_IMM3(op, r1, r2, imm_val, ctx) \ 408 do { \ 409 imm12 = imm8m(imm_val); \ 410 if (imm12 < 0) { \ 411 emit_mov_i_no8m(r_scratch, imm_val, ctx); \ 412 emit(op ## _R((r1), (r2), r_scratch), ctx); \ 413 } else { \ 414 emit(op ## _I((r1), (r2), imm12), ctx); \ 415 } \ 416 } while (0) 417 418 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx) 419 { 420 if (ctx->ret0_fp_idx >= 0) { 421 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx); 422 /* NOP to keep the size constant between passes */ 423 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx); 424 } else { 425 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx); 426 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx); 427 } 428 } 429 430 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) 431 { 432 #if __LINUX_ARM_ARCH__ < 5 433 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); 434 435 if (elf_hwcap & HWCAP_THUMB) 436 emit(ARM_BX(tgt_reg), ctx); 437 else 438 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); 439 #else 440 emit(ARM_BLX_R(tgt_reg), ctx); 441 #endif 442 } 443 444 static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx) 445 { 446 #if __LINUX_ARM_ARCH__ == 7 447 if (elf_hwcap & HWCAP_IDIVA) { 448 emit(ARM_UDIV(rd, rm, rn), ctx); 449 return; 450 } 451 #endif 452 if (rm != ARM_R0) 453 emit(ARM_MOV_R(ARM_R0, rm), ctx); 454 if (rn != ARM_R1) 455 emit(ARM_MOV_R(ARM_R1, rn), ctx); 456 457 ctx->seen |= SEEN_CALL; 458 emit_mov_i(ARM_R3, (u32)jit_udiv, ctx); 459 emit_blx_r(ARM_R3, ctx); 460 461 if (rd != ARM_R0) 462 emit(ARM_MOV_R(rd, ARM_R0), ctx); 463 } 464 465 static inline void update_on_xread(struct jit_ctx *ctx) 466 { 467 if (!(ctx->seen & SEEN_X)) 468 ctx->flags |= FLAG_NEED_X_RESET; 469 470 ctx->seen |= SEEN_X; 471 } 472 473 static int build_body(struct jit_ctx *ctx) 474 { 475 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; 476 const struct bpf_prog *prog = ctx->skf; 477 const struct sock_filter *inst; 478 unsigned i, load_order, off, condt; 479 int imm12; 480 u32 k; 481 482 for (i = 0; i < prog->len; i++) { 483 u16 code; 484 485 inst = &(prog->insns[i]); 486 /* K as an immediate value operand */ 487 k = inst->k; 488 code = bpf_anc_helper(inst); 489 490 /* compute offsets only in the fake pass */ 491 if (ctx->target == NULL) 492 ctx->offsets[i] = ctx->idx * 4; 493 494 switch (code) { 495 case BPF_LD | BPF_IMM: 496 emit_mov_i(r_A, k, ctx); 497 break; 498 case BPF_LD | BPF_W | BPF_LEN: 499 ctx->seen |= SEEN_SKB; 500 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 501 emit(ARM_LDR_I(r_A, r_skb, 502 offsetof(struct sk_buff, len)), ctx); 503 break; 504 case BPF_LD | BPF_MEM: 505 /* A = scratch[k] */ 506 ctx->seen |= SEEN_MEM_WORD(k); 507 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 508 break; 509 case BPF_LD | BPF_W | BPF_ABS: 510 load_order = 2; 511 goto load; 512 case BPF_LD | BPF_H | BPF_ABS: 513 load_order = 1; 514 goto load; 515 case BPF_LD | BPF_B | BPF_ABS: 516 load_order = 0; 517 load: 518 /* the interpreter will deal with the negative K */ 519 if ((int)k < 0) 520 return -ENOTSUPP; 521 emit_mov_i(r_off, k, ctx); 522 load_common: 523 ctx->seen |= SEEN_DATA | SEEN_CALL; 524 525 if (load_order > 0) { 526 emit(ARM_SUB_I(r_scratch, r_skb_hl, 527 1 << load_order), ctx); 528 emit(ARM_CMP_R(r_scratch, r_off), ctx); 529 condt = ARM_COND_HS; 530 } else { 531 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 532 condt = ARM_COND_HI; 533 } 534 535 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 536 ctx); 537 538 if (load_order == 0) 539 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0), 540 ctx); 541 else if (load_order == 1) 542 emit_load_be16(condt, r_A, r_scratch, ctx); 543 else if (load_order == 2) 544 emit_load_be32(condt, r_A, r_scratch, ctx); 545 546 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx); 547 548 /* the slowpath */ 549 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx); 550 emit(ARM_MOV_R(ARM_R0, r_skb), ctx); 551 /* the offset is already in R1 */ 552 emit_blx_r(ARM_R3, ctx); 553 /* check the result of skb_copy_bits */ 554 emit(ARM_CMP_I(ARM_R1, 0), ctx); 555 emit_err_ret(ARM_COND_NE, ctx); 556 emit(ARM_MOV_R(r_A, ARM_R0), ctx); 557 break; 558 case BPF_LD | BPF_W | BPF_IND: 559 load_order = 2; 560 goto load_ind; 561 case BPF_LD | BPF_H | BPF_IND: 562 load_order = 1; 563 goto load_ind; 564 case BPF_LD | BPF_B | BPF_IND: 565 load_order = 0; 566 load_ind: 567 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); 568 goto load_common; 569 case BPF_LDX | BPF_IMM: 570 ctx->seen |= SEEN_X; 571 emit_mov_i(r_X, k, ctx); 572 break; 573 case BPF_LDX | BPF_W | BPF_LEN: 574 ctx->seen |= SEEN_X | SEEN_SKB; 575 emit(ARM_LDR_I(r_X, r_skb, 576 offsetof(struct sk_buff, len)), ctx); 577 break; 578 case BPF_LDX | BPF_MEM: 579 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); 580 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 581 break; 582 case BPF_LDX | BPF_B | BPF_MSH: 583 /* x = ((*(frame + k)) & 0xf) << 2; */ 584 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 585 /* the interpreter should deal with the negative K */ 586 if ((int)k < 0) 587 return -1; 588 /* offset in r1: we might have to take the slow path */ 589 emit_mov_i(r_off, k, ctx); 590 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 591 592 /* load in r0: common with the slowpath */ 593 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data, 594 ARM_R1), ctx); 595 /* 596 * emit_mov_i() might generate one or two instructions, 597 * the same holds for emit_blx_r() 598 */ 599 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx); 600 601 emit(ARM_MOV_R(ARM_R0, r_skb), ctx); 602 /* r_off is r1 */ 603 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx); 604 emit_blx_r(ARM_R3, ctx); 605 /* check the return value of skb_copy_bits */ 606 emit(ARM_CMP_I(ARM_R1, 0), ctx); 607 emit_err_ret(ARM_COND_NE, ctx); 608 609 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); 610 emit(ARM_LSL_I(r_X, r_X, 2), ctx); 611 break; 612 case BPF_ST: 613 ctx->seen |= SEEN_MEM_WORD(k); 614 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 615 break; 616 case BPF_STX: 617 update_on_xread(ctx); 618 ctx->seen |= SEEN_MEM_WORD(k); 619 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 620 break; 621 case BPF_ALU | BPF_ADD | BPF_K: 622 /* A += K */ 623 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); 624 break; 625 case BPF_ALU | BPF_ADD | BPF_X: 626 update_on_xread(ctx); 627 emit(ARM_ADD_R(r_A, r_A, r_X), ctx); 628 break; 629 case BPF_ALU | BPF_SUB | BPF_K: 630 /* A -= K */ 631 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); 632 break; 633 case BPF_ALU | BPF_SUB | BPF_X: 634 update_on_xread(ctx); 635 emit(ARM_SUB_R(r_A, r_A, r_X), ctx); 636 break; 637 case BPF_ALU | BPF_MUL | BPF_K: 638 /* A *= K */ 639 emit_mov_i(r_scratch, k, ctx); 640 emit(ARM_MUL(r_A, r_A, r_scratch), ctx); 641 break; 642 case BPF_ALU | BPF_MUL | BPF_X: 643 update_on_xread(ctx); 644 emit(ARM_MUL(r_A, r_A, r_X), ctx); 645 break; 646 case BPF_ALU | BPF_DIV | BPF_K: 647 if (k == 1) 648 break; 649 emit_mov_i(r_scratch, k, ctx); 650 emit_udiv(r_A, r_A, r_scratch, ctx); 651 break; 652 case BPF_ALU | BPF_DIV | BPF_X: 653 update_on_xread(ctx); 654 emit(ARM_CMP_I(r_X, 0), ctx); 655 emit_err_ret(ARM_COND_EQ, ctx); 656 emit_udiv(r_A, r_A, r_X, ctx); 657 break; 658 case BPF_ALU | BPF_OR | BPF_K: 659 /* A |= K */ 660 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); 661 break; 662 case BPF_ALU | BPF_OR | BPF_X: 663 update_on_xread(ctx); 664 emit(ARM_ORR_R(r_A, r_A, r_X), ctx); 665 break; 666 case BPF_ALU | BPF_XOR | BPF_K: 667 /* A ^= K; */ 668 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); 669 break; 670 case BPF_ANC | SKF_AD_ALU_XOR_X: 671 case BPF_ALU | BPF_XOR | BPF_X: 672 /* A ^= X */ 673 update_on_xread(ctx); 674 emit(ARM_EOR_R(r_A, r_A, r_X), ctx); 675 break; 676 case BPF_ALU | BPF_AND | BPF_K: 677 /* A &= K */ 678 OP_IMM3(ARM_AND, r_A, r_A, k, ctx); 679 break; 680 case BPF_ALU | BPF_AND | BPF_X: 681 update_on_xread(ctx); 682 emit(ARM_AND_R(r_A, r_A, r_X), ctx); 683 break; 684 case BPF_ALU | BPF_LSH | BPF_K: 685 if (unlikely(k > 31)) 686 return -1; 687 emit(ARM_LSL_I(r_A, r_A, k), ctx); 688 break; 689 case BPF_ALU | BPF_LSH | BPF_X: 690 update_on_xread(ctx); 691 emit(ARM_LSL_R(r_A, r_A, r_X), ctx); 692 break; 693 case BPF_ALU | BPF_RSH | BPF_K: 694 if (unlikely(k > 31)) 695 return -1; 696 emit(ARM_LSR_I(r_A, r_A, k), ctx); 697 break; 698 case BPF_ALU | BPF_RSH | BPF_X: 699 update_on_xread(ctx); 700 emit(ARM_LSR_R(r_A, r_A, r_X), ctx); 701 break; 702 case BPF_ALU | BPF_NEG: 703 /* A = -A */ 704 emit(ARM_RSB_I(r_A, r_A, 0), ctx); 705 break; 706 case BPF_JMP | BPF_JA: 707 /* pc += K */ 708 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); 709 break; 710 case BPF_JMP | BPF_JEQ | BPF_K: 711 /* pc += (A == K) ? pc->jt : pc->jf */ 712 condt = ARM_COND_EQ; 713 goto cmp_imm; 714 case BPF_JMP | BPF_JGT | BPF_K: 715 /* pc += (A > K) ? pc->jt : pc->jf */ 716 condt = ARM_COND_HI; 717 goto cmp_imm; 718 case BPF_JMP | BPF_JGE | BPF_K: 719 /* pc += (A >= K) ? pc->jt : pc->jf */ 720 condt = ARM_COND_HS; 721 cmp_imm: 722 imm12 = imm8m(k); 723 if (imm12 < 0) { 724 emit_mov_i_no8m(r_scratch, k, ctx); 725 emit(ARM_CMP_R(r_A, r_scratch), ctx); 726 } else { 727 emit(ARM_CMP_I(r_A, imm12), ctx); 728 } 729 cond_jump: 730 if (inst->jt) 731 _emit(condt, ARM_B(b_imm(i + inst->jt + 1, 732 ctx)), ctx); 733 if (inst->jf) 734 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, 735 ctx)), ctx); 736 break; 737 case BPF_JMP | BPF_JEQ | BPF_X: 738 /* pc += (A == X) ? pc->jt : pc->jf */ 739 condt = ARM_COND_EQ; 740 goto cmp_x; 741 case BPF_JMP | BPF_JGT | BPF_X: 742 /* pc += (A > X) ? pc->jt : pc->jf */ 743 condt = ARM_COND_HI; 744 goto cmp_x; 745 case BPF_JMP | BPF_JGE | BPF_X: 746 /* pc += (A >= X) ? pc->jt : pc->jf */ 747 condt = ARM_COND_CS; 748 cmp_x: 749 update_on_xread(ctx); 750 emit(ARM_CMP_R(r_A, r_X), ctx); 751 goto cond_jump; 752 case BPF_JMP | BPF_JSET | BPF_K: 753 /* pc += (A & K) ? pc->jt : pc->jf */ 754 condt = ARM_COND_NE; 755 /* not set iff all zeroes iff Z==1 iff EQ */ 756 757 imm12 = imm8m(k); 758 if (imm12 < 0) { 759 emit_mov_i_no8m(r_scratch, k, ctx); 760 emit(ARM_TST_R(r_A, r_scratch), ctx); 761 } else { 762 emit(ARM_TST_I(r_A, imm12), ctx); 763 } 764 goto cond_jump; 765 case BPF_JMP | BPF_JSET | BPF_X: 766 /* pc += (A & X) ? pc->jt : pc->jf */ 767 update_on_xread(ctx); 768 condt = ARM_COND_NE; 769 emit(ARM_TST_R(r_A, r_X), ctx); 770 goto cond_jump; 771 case BPF_RET | BPF_A: 772 emit(ARM_MOV_R(ARM_R0, r_A), ctx); 773 goto b_epilogue; 774 case BPF_RET | BPF_K: 775 if ((k == 0) && (ctx->ret0_fp_idx < 0)) 776 ctx->ret0_fp_idx = i; 777 emit_mov_i(ARM_R0, k, ctx); 778 b_epilogue: 779 if (i != ctx->skf->len - 1) 780 emit(ARM_B(b_imm(prog->len, ctx)), ctx); 781 break; 782 case BPF_MISC | BPF_TAX: 783 /* X = A */ 784 ctx->seen |= SEEN_X; 785 emit(ARM_MOV_R(r_X, r_A), ctx); 786 break; 787 case BPF_MISC | BPF_TXA: 788 /* A = X */ 789 update_on_xread(ctx); 790 emit(ARM_MOV_R(r_A, r_X), ctx); 791 break; 792 case BPF_ANC | SKF_AD_PROTOCOL: 793 /* A = ntohs(skb->protocol) */ 794 ctx->seen |= SEEN_SKB; 795 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 796 protocol) != 2); 797 off = offsetof(struct sk_buff, protocol); 798 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); 799 emit_swap16(r_A, r_scratch, ctx); 800 break; 801 case BPF_ANC | SKF_AD_CPU: 802 /* r_scratch = current_thread_info() */ 803 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); 804 /* A = current_thread_info()->cpu */ 805 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); 806 off = offsetof(struct thread_info, cpu); 807 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 808 break; 809 case BPF_ANC | SKF_AD_IFINDEX: 810 /* A = skb->dev->ifindex */ 811 ctx->seen |= SEEN_SKB; 812 off = offsetof(struct sk_buff, dev); 813 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); 814 815 emit(ARM_CMP_I(r_scratch, 0), ctx); 816 emit_err_ret(ARM_COND_EQ, ctx); 817 818 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 819 ifindex) != 4); 820 off = offsetof(struct net_device, ifindex); 821 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 822 break; 823 case BPF_ANC | SKF_AD_MARK: 824 ctx->seen |= SEEN_SKB; 825 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 826 off = offsetof(struct sk_buff, mark); 827 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 828 break; 829 case BPF_ANC | SKF_AD_RXHASH: 830 ctx->seen |= SEEN_SKB; 831 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 832 off = offsetof(struct sk_buff, hash); 833 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 834 break; 835 case BPF_ANC | SKF_AD_VLAN_TAG: 836 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: 837 ctx->seen |= SEEN_SKB; 838 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 839 off = offsetof(struct sk_buff, vlan_tci); 840 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 841 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 842 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); 843 else 844 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); 845 break; 846 case BPF_ANC | SKF_AD_QUEUE: 847 ctx->seen |= SEEN_SKB; 848 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 849 queue_mapping) != 2); 850 BUILD_BUG_ON(offsetof(struct sk_buff, 851 queue_mapping) > 0xff); 852 off = offsetof(struct sk_buff, queue_mapping); 853 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 854 break; 855 default: 856 return -1; 857 } 858 } 859 860 /* compute offsets only during the first pass */ 861 if (ctx->target == NULL) 862 ctx->offsets[i] = ctx->idx * 4; 863 864 return 0; 865 } 866 867 868 void bpf_jit_compile(struct bpf_prog *fp) 869 { 870 struct bpf_binary_header *header; 871 struct jit_ctx ctx; 872 unsigned tmp_idx; 873 unsigned alloc_size; 874 u8 *target_ptr; 875 876 if (!bpf_jit_enable) 877 return; 878 879 memset(&ctx, 0, sizeof(ctx)); 880 ctx.skf = fp; 881 ctx.ret0_fp_idx = -1; 882 883 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL); 884 if (ctx.offsets == NULL) 885 return; 886 887 /* fake pass to fill in the ctx->seen */ 888 if (unlikely(build_body(&ctx))) 889 goto out; 890 891 tmp_idx = ctx.idx; 892 build_prologue(&ctx); 893 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; 894 895 #if __LINUX_ARM_ARCH__ < 7 896 tmp_idx = ctx.idx; 897 build_epilogue(&ctx); 898 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4; 899 900 ctx.idx += ctx.imm_count; 901 if (ctx.imm_count) { 902 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL); 903 if (ctx.imms == NULL) 904 goto out; 905 } 906 #else 907 /* there's nothing after the epilogue on ARMv7 */ 908 build_epilogue(&ctx); 909 #endif 910 alloc_size = 4 * ctx.idx; 911 header = bpf_jit_binary_alloc(alloc_size, &target_ptr, 912 4, jit_fill_hole); 913 if (header == NULL) 914 goto out; 915 916 ctx.target = (u32 *) target_ptr; 917 ctx.idx = 0; 918 919 build_prologue(&ctx); 920 build_body(&ctx); 921 build_epilogue(&ctx); 922 923 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); 924 925 #if __LINUX_ARM_ARCH__ < 7 926 if (ctx.imm_count) 927 kfree(ctx.imms); 928 #endif 929 930 if (bpf_jit_enable > 1) 931 /* there are 2 passes here */ 932 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); 933 934 set_memory_ro((unsigned long)header, header->pages); 935 fp->bpf_func = (void *)ctx.target; 936 fp->jited = true; 937 out: 938 kfree(ctx.offsets); 939 return; 940 } 941 942 void bpf_jit_free(struct bpf_prog *fp) 943 { 944 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 945 struct bpf_binary_header *header = (void *)addr; 946 947 if (!fp->jited) 948 goto free_filter; 949 950 set_memory_rw(addr, header->pages); 951 bpf_jit_binary_free(header); 952 953 free_filter: 954 bpf_prog_unlock_free(fp); 955 } 956