1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Just-In-Time compiler for eBPF bytecode on MIPS. 4 * Implementation of JIT functions for 64-bit CPUs. 5 * 6 * Copyright (c) 2021 Anyfi Networks AB. 7 * Author: Johan Almbladh <johan.almbladh@gmail.com> 8 * 9 * Based on code and ideas from 10 * Copyright (c) 2017 Cavium, Inc. 11 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com> 12 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> 13 */ 14 15 #include <linux/errno.h> 16 #include <linux/filter.h> 17 #include <linux/bpf.h> 18 #include <asm/cpu-features.h> 19 #include <asm/isa-rev.h> 20 #include <asm/uasm.h> 21 22 #include "bpf_jit_comp.h" 23 24 /* MIPS t0-t3 are not available in the n64 ABI */ 25 #undef MIPS_R_T0 26 #undef MIPS_R_T1 27 #undef MIPS_R_T2 28 #undef MIPS_R_T3 29 30 /* Stack is 16-byte aligned in n64 ABI */ 31 #define MIPS_STACK_ALIGNMENT 16 32 33 /* Extra 64-bit eBPF registers used by JIT */ 34 #define JIT_REG_TC (MAX_BPF_JIT_REG + 0) 35 #define JIT_REG_ZX (MAX_BPF_JIT_REG + 1) 36 37 /* Number of prologue bytes to skip when doing a tail call */ 38 #define JIT_TCALL_SKIP 4 39 40 /* Callee-saved CPU registers that the JIT must preserve */ 41 #define JIT_CALLEE_REGS \ 42 (BIT(MIPS_R_S0) | \ 43 BIT(MIPS_R_S1) | \ 44 BIT(MIPS_R_S2) | \ 45 BIT(MIPS_R_S3) | \ 46 BIT(MIPS_R_S4) | \ 47 BIT(MIPS_R_S5) | \ 48 BIT(MIPS_R_S6) | \ 49 BIT(MIPS_R_S7) | \ 50 BIT(MIPS_R_GP) | \ 51 BIT(MIPS_R_FP) | \ 52 BIT(MIPS_R_RA)) 53 54 /* Caller-saved CPU registers available for JIT use */ 55 #define JIT_CALLER_REGS \ 56 (BIT(MIPS_R_A5) | \ 57 BIT(MIPS_R_A6) | \ 58 BIT(MIPS_R_A7)) 59 /* 60 * Mapping of 64-bit eBPF registers to 64-bit native MIPS registers. 61 * MIPS registers t4 - t7 may be used by the JIT as temporary registers. 62 * MIPS registers t8 - t9 are reserved for single-register common functions. 63 */ 64 static const u8 bpf2mips64[] = { 65 /* Return value from in-kernel function, and exit value from eBPF */ 66 [BPF_REG_0] = MIPS_R_V0, 67 /* Arguments from eBPF program to in-kernel function */ 68 [BPF_REG_1] = MIPS_R_A0, 69 [BPF_REG_2] = MIPS_R_A1, 70 [BPF_REG_3] = MIPS_R_A2, 71 [BPF_REG_4] = MIPS_R_A3, 72 [BPF_REG_5] = MIPS_R_A4, 73 /* Callee-saved registers that in-kernel function will preserve */ 74 [BPF_REG_6] = MIPS_R_S0, 75 [BPF_REG_7] = MIPS_R_S1, 76 [BPF_REG_8] = MIPS_R_S2, 77 [BPF_REG_9] = MIPS_R_S3, 78 /* Read-only frame pointer to access the eBPF stack */ 79 [BPF_REG_FP] = MIPS_R_FP, 80 /* Temporary register for blinding constants */ 81 [BPF_REG_AX] = MIPS_R_AT, 82 /* Tail call count register, caller-saved */ 83 [JIT_REG_TC] = MIPS_R_A5, 84 /* Constant for register zero-extension */ 85 [JIT_REG_ZX] = MIPS_R_V1, 86 }; 87 88 /* 89 * MIPS 32-bit operations on 64-bit registers generate a sign-extended 90 * result. However, the eBPF ISA mandates zero-extension, so we rely on the 91 * verifier to add that for us (emit_zext_ver). In addition, ALU arithmetic 92 * operations, right shift and byte swap require properly sign-extended 93 * operands or the result is unpredictable. We emit explicit sign-extensions 94 * in those cases. 95 */ 96 97 /* Sign extension */ 98 static void emit_sext(struct jit_context *ctx, u8 dst, u8 src) 99 { 100 emit(ctx, sll, dst, src, 0); 101 clobber_reg(ctx, dst); 102 } 103 104 /* Zero extension */ 105 static void emit_zext(struct jit_context *ctx, u8 dst) 106 { 107 if (cpu_has_mips64r2 || cpu_has_mips64r6) { 108 emit(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 109 } else { 110 emit(ctx, and, dst, dst, bpf2mips64[JIT_REG_ZX]); 111 access_reg(ctx, JIT_REG_ZX); /* We need the ZX register */ 112 } 113 clobber_reg(ctx, dst); 114 } 115 116 /* Zero extension, if verifier does not do it for us */ 117 static void emit_zext_ver(struct jit_context *ctx, u8 dst) 118 { 119 if (!ctx->program->aux->verifier_zext) 120 emit_zext(ctx, dst); 121 } 122 123 /* dst = imm (64-bit) */ 124 static void emit_mov_i64(struct jit_context *ctx, u8 dst, u64 imm64) 125 { 126 if (imm64 >= 0xffffffffffff8000ULL || imm64 < 0x8000ULL) { 127 emit(ctx, daddiu, dst, MIPS_R_ZERO, (s16)imm64); 128 } else if (imm64 >= 0xffffffff80000000ULL || 129 (imm64 < 0x80000000 && imm64 > 0xffff)) { 130 emit(ctx, lui, dst, (s16)(imm64 >> 16)); 131 emit(ctx, ori, dst, dst, (u16)imm64 & 0xffff); 132 } else { 133 u8 acc = MIPS_R_ZERO; 134 int shift = 0; 135 int k; 136 137 for (k = 0; k < 4; k++) { 138 u16 half = imm64 >> (48 - 16 * k); 139 140 if (acc == dst) 141 shift += 16; 142 143 if (half) { 144 if (shift) 145 emit(ctx, dsll_safe, dst, dst, shift); 146 emit(ctx, ori, dst, acc, half); 147 acc = dst; 148 shift = 0; 149 } 150 } 151 if (shift) 152 emit(ctx, dsll_safe, dst, dst, shift); 153 } 154 clobber_reg(ctx, dst); 155 } 156 157 /* ALU immediate operation (64-bit) */ 158 static void emit_alu_i64(struct jit_context *ctx, u8 dst, s32 imm, u8 op) 159 { 160 switch (BPF_OP(op)) { 161 /* dst = dst | imm */ 162 case BPF_OR: 163 emit(ctx, ori, dst, dst, (u16)imm); 164 break; 165 /* dst = dst ^ imm */ 166 case BPF_XOR: 167 emit(ctx, xori, dst, dst, (u16)imm); 168 break; 169 /* dst = -dst */ 170 case BPF_NEG: 171 emit(ctx, dsubu, dst, MIPS_R_ZERO, dst); 172 break; 173 /* dst = dst << imm */ 174 case BPF_LSH: 175 emit(ctx, dsll_safe, dst, dst, imm); 176 break; 177 /* dst = dst >> imm */ 178 case BPF_RSH: 179 emit(ctx, dsrl_safe, dst, dst, imm); 180 break; 181 /* dst = dst >> imm (arithmetic) */ 182 case BPF_ARSH: 183 emit(ctx, dsra_safe, dst, dst, imm); 184 break; 185 /* dst = dst + imm */ 186 case BPF_ADD: 187 emit(ctx, daddiu, dst, dst, imm); 188 break; 189 /* dst = dst - imm */ 190 case BPF_SUB: 191 emit(ctx, daddiu, dst, dst, -imm); 192 break; 193 default: 194 /* Width-generic operations */ 195 emit_alu_i(ctx, dst, imm, op); 196 } 197 clobber_reg(ctx, dst); 198 } 199 200 /* ALU register operation (64-bit) */ 201 static void emit_alu_r64(struct jit_context *ctx, u8 dst, u8 src, u8 op) 202 { 203 switch (BPF_OP(op)) { 204 /* dst = dst << src */ 205 case BPF_LSH: 206 emit(ctx, dsllv, dst, dst, src); 207 break; 208 /* dst = dst >> src */ 209 case BPF_RSH: 210 emit(ctx, dsrlv, dst, dst, src); 211 break; 212 /* dst = dst >> src (arithmetic) */ 213 case BPF_ARSH: 214 emit(ctx, dsrav, dst, dst, src); 215 break; 216 /* dst = dst + src */ 217 case BPF_ADD: 218 emit(ctx, daddu, dst, dst, src); 219 break; 220 /* dst = dst - src */ 221 case BPF_SUB: 222 emit(ctx, dsubu, dst, dst, src); 223 break; 224 /* dst = dst * src */ 225 case BPF_MUL: 226 if (cpu_has_mips64r6) { 227 emit(ctx, dmulu, dst, dst, src); 228 } else { 229 emit(ctx, dmultu, dst, src); 230 emit(ctx, mflo, dst); 231 } 232 break; 233 /* dst = dst / src */ 234 case BPF_DIV: 235 if (cpu_has_mips64r6) { 236 emit(ctx, ddivu_r6, dst, dst, src); 237 } else { 238 emit(ctx, ddivu, dst, src); 239 emit(ctx, mflo, dst); 240 } 241 break; 242 /* dst = dst % src */ 243 case BPF_MOD: 244 if (cpu_has_mips64r6) { 245 emit(ctx, dmodu, dst, dst, src); 246 } else { 247 emit(ctx, ddivu, dst, src); 248 emit(ctx, mfhi, dst); 249 } 250 break; 251 default: 252 /* Width-generic operations */ 253 emit_alu_r(ctx, dst, src, op); 254 } 255 clobber_reg(ctx, dst); 256 } 257 258 /* Swap sub words in a register double word */ 259 static void emit_swap_r64(struct jit_context *ctx, u8 dst, u8 mask, u32 bits) 260 { 261 u8 tmp = MIPS_R_T9; 262 263 emit(ctx, and, tmp, dst, mask); /* tmp = dst & mask */ 264 emit(ctx, dsll, tmp, tmp, bits); /* tmp = tmp << bits */ 265 emit(ctx, dsrl, dst, dst, bits); /* dst = dst >> bits */ 266 emit(ctx, and, dst, dst, mask); /* dst = dst & mask */ 267 emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ 268 } 269 270 /* Swap bytes and truncate a register double word, word or half word */ 271 static void emit_bswap_r64(struct jit_context *ctx, u8 dst, u32 width) 272 { 273 switch (width) { 274 /* Swap bytes in a double word */ 275 case 64: 276 if (cpu_has_mips64r2 || cpu_has_mips64r6) { 277 emit(ctx, dsbh, dst, dst); 278 emit(ctx, dshd, dst, dst); 279 } else { 280 u8 t1 = MIPS_R_T6; 281 u8 t2 = MIPS_R_T7; 282 283 emit(ctx, dsll32, t2, dst, 0); /* t2 = dst << 32 */ 284 emit(ctx, dsrl32, dst, dst, 0); /* dst = dst >> 32 */ 285 emit(ctx, or, dst, dst, t2); /* dst = dst | t2 */ 286 287 emit(ctx, ori, t2, MIPS_R_ZERO, 0xffff); 288 emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ 289 emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ 290 emit_swap_r64(ctx, dst, t1, 16);/* dst = swap16(dst) */ 291 292 emit(ctx, lui, t2, 0xff); /* t2 = 0x00ff0000 */ 293 emit(ctx, ori, t2, t2, 0xff); /* t2 = t2 | 0x00ff */ 294 emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ 295 emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ 296 emit_swap_r64(ctx, dst, t1, 8); /* dst = swap8(dst) */ 297 } 298 break; 299 /* Swap bytes in a half word */ 300 /* Swap bytes in a word */ 301 case 32: 302 case 16: 303 emit_sext(ctx, dst, dst); 304 emit_bswap_r(ctx, dst, width); 305 if (cpu_has_mips64r2 || cpu_has_mips64r6) 306 emit_zext(ctx, dst); 307 break; 308 } 309 clobber_reg(ctx, dst); 310 } 311 312 /* Truncate a register double word, word or half word */ 313 static void emit_trunc_r64(struct jit_context *ctx, u8 dst, u32 width) 314 { 315 switch (width) { 316 case 64: 317 break; 318 /* Zero-extend a word */ 319 case 32: 320 emit_zext(ctx, dst); 321 break; 322 /* Zero-extend a half word */ 323 case 16: 324 emit(ctx, andi, dst, dst, 0xffff); 325 break; 326 } 327 clobber_reg(ctx, dst); 328 } 329 330 /* Load operation: dst = *(size*)(src + off) */ 331 static void emit_ldx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) 332 { 333 switch (size) { 334 /* Load a byte */ 335 case BPF_B: 336 emit(ctx, lbu, dst, off, src); 337 break; 338 /* Load a half word */ 339 case BPF_H: 340 emit(ctx, lhu, dst, off, src); 341 break; 342 /* Load a word */ 343 case BPF_W: 344 emit(ctx, lwu, dst, off, src); 345 break; 346 /* Load a double word */ 347 case BPF_DW: 348 emit(ctx, ld, dst, off, src); 349 break; 350 } 351 clobber_reg(ctx, dst); 352 } 353 354 /* Store operation: *(size *)(dst + off) = src */ 355 static void emit_stx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) 356 { 357 switch (size) { 358 /* Store a byte */ 359 case BPF_B: 360 emit(ctx, sb, src, off, dst); 361 break; 362 /* Store a half word */ 363 case BPF_H: 364 emit(ctx, sh, src, off, dst); 365 break; 366 /* Store a word */ 367 case BPF_W: 368 emit(ctx, sw, src, off, dst); 369 break; 370 /* Store a double word */ 371 case BPF_DW: 372 emit(ctx, sd, src, off, dst); 373 break; 374 } 375 } 376 377 /* Atomic read-modify-write */ 378 static void emit_atomic_r64(struct jit_context *ctx, 379 u8 dst, u8 src, s16 off, u8 code) 380 { 381 u8 t1 = MIPS_R_T6; 382 u8 t2 = MIPS_R_T7; 383 384 LLSC_sync(ctx); 385 emit(ctx, lld, t1, off, dst); 386 switch (code) { 387 case BPF_ADD: 388 case BPF_ADD | BPF_FETCH: 389 emit(ctx, daddu, t2, t1, src); 390 break; 391 case BPF_AND: 392 case BPF_AND | BPF_FETCH: 393 emit(ctx, and, t2, t1, src); 394 break; 395 case BPF_OR: 396 case BPF_OR | BPF_FETCH: 397 emit(ctx, or, t2, t1, src); 398 break; 399 case BPF_XOR: 400 case BPF_XOR | BPF_FETCH: 401 emit(ctx, xor, t2, t1, src); 402 break; 403 case BPF_XCHG: 404 emit(ctx, move, t2, src); 405 break; 406 } 407 emit(ctx, scd, t2, off, dst); 408 emit(ctx, LLSC_beqz, t2, -16 - LLSC_offset); 409 emit(ctx, nop); /* Delay slot */ 410 411 if (code & BPF_FETCH) { 412 emit(ctx, move, src, t1); 413 clobber_reg(ctx, src); 414 } 415 } 416 417 /* Atomic compare-and-exchange */ 418 static void emit_cmpxchg_r64(struct jit_context *ctx, u8 dst, u8 src, s16 off) 419 { 420 u8 r0 = bpf2mips64[BPF_REG_0]; 421 u8 t1 = MIPS_R_T6; 422 u8 t2 = MIPS_R_T7; 423 424 LLSC_sync(ctx); 425 emit(ctx, lld, t1, off, dst); 426 emit(ctx, bne, t1, r0, 12); 427 emit(ctx, move, t2, src); /* Delay slot */ 428 emit(ctx, scd, t2, off, dst); 429 emit(ctx, LLSC_beqz, t2, -20 - LLSC_offset); 430 emit(ctx, move, r0, t1); /* Delay slot */ 431 432 clobber_reg(ctx, r0); 433 } 434 435 /* Function call */ 436 static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn) 437 { 438 u8 zx = bpf2mips64[JIT_REG_ZX]; 439 u8 tmp = MIPS_R_T6; 440 bool fixed; 441 u64 addr; 442 443 /* Decode the call address */ 444 if (bpf_jit_get_func_addr(ctx->program, insn, false, 445 &addr, &fixed) < 0) 446 return -1; 447 if (!fixed) 448 return -1; 449 450 /* Push caller-saved registers on stack */ 451 push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); 452 453 /* Emit function call */ 454 emit_mov_i64(ctx, tmp, addr & JALR_MASK); 455 emit(ctx, jalr, MIPS_R_RA, tmp); 456 emit(ctx, nop); /* Delay slot */ 457 458 /* Restore caller-saved registers */ 459 pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); 460 461 /* Re-initialize the JIT zero-extension register if accessed */ 462 if (ctx->accessed & BIT(JIT_REG_ZX)) { 463 emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); 464 emit(ctx, dsrl32, zx, zx, 0); 465 } 466 467 clobber_reg(ctx, MIPS_R_RA); 468 clobber_reg(ctx, MIPS_R_V0); 469 clobber_reg(ctx, MIPS_R_V1); 470 return 0; 471 } 472 473 /* Function tail call */ 474 static int emit_tail_call(struct jit_context *ctx) 475 { 476 u8 ary = bpf2mips64[BPF_REG_2]; 477 u8 ind = bpf2mips64[BPF_REG_3]; 478 u8 tcc = bpf2mips64[JIT_REG_TC]; 479 u8 tmp = MIPS_R_T6; 480 int off; 481 482 /* 483 * Tail call: 484 * eBPF R1 - function argument (context ptr), passed in a0-a1 485 * eBPF R2 - ptr to object with array of function entry points 486 * eBPF R3 - array index of function to be called 487 */ 488 489 /* if (ind >= ary->map.max_entries) goto out */ 490 off = offsetof(struct bpf_array, map.max_entries); 491 if (off > 0x7fff) 492 return -1; 493 emit(ctx, lwu, tmp, off, ary); /* tmp = ary->map.max_entrs*/ 494 emit(ctx, sltu, tmp, ind, tmp); /* tmp = ind < t1 */ 495 emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ 496 497 /* if (--TCC < 0) goto out */ 498 emit(ctx, daddiu, tcc, tcc, -1); /* tcc-- (delay slot) */ 499 emit(ctx, bltz, tcc, get_offset(ctx, 1)); /* PC += off(1) if tcc < 0 */ 500 /* (next insn delay slot) */ 501 /* prog = ary->ptrs[ind] */ 502 off = offsetof(struct bpf_array, ptrs); 503 if (off > 0x7fff) 504 return -1; 505 emit(ctx, dsll, tmp, ind, 3); /* tmp = ind << 3 */ 506 emit(ctx, daddu, tmp, tmp, ary); /* tmp += ary */ 507 emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ 508 509 /* if (prog == 0) goto out */ 510 emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ 511 emit(ctx, nop); /* Delay slot */ 512 513 /* func = prog->bpf_func + 8 (prologue skip offset) */ 514 off = offsetof(struct bpf_prog, bpf_func); 515 if (off > 0x7fff) 516 return -1; 517 emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ 518 emit(ctx, daddiu, tmp, tmp, JIT_TCALL_SKIP); /* tmp += skip (4) */ 519 520 /* goto func */ 521 build_epilogue(ctx, tmp); 522 access_reg(ctx, JIT_REG_TC); 523 return 0; 524 } 525 526 /* 527 * Stack frame layout for a JITed program (stack grows down). 528 * 529 * Higher address : Previous stack frame : 530 * +===========================+ <--- MIPS sp before call 531 * | Callee-saved registers, | 532 * | including RA and FP | 533 * +---------------------------+ <--- eBPF FP (MIPS fp) 534 * | Local eBPF variables | 535 * | allocated by program | 536 * +---------------------------+ 537 * | Reserved for caller-saved | 538 * | registers | 539 * Lower address +===========================+ <--- MIPS sp 540 */ 541 542 /* Build program prologue to set up the stack and registers */ 543 void build_prologue(struct jit_context *ctx) 544 { 545 u8 fp = bpf2mips64[BPF_REG_FP]; 546 u8 tc = bpf2mips64[JIT_REG_TC]; 547 u8 zx = bpf2mips64[JIT_REG_ZX]; 548 int stack, saved, locals, reserved; 549 550 /* 551 * In the unlikely event that the TCC limit is raised to more 552 * than 16 bits, it is clamped to the maximum value allowed for 553 * the generated code (0xffff). It is better fail to compile 554 * instead of degrading gracefully. 555 */ 556 BUILD_BUG_ON(MAX_TAIL_CALL_CNT > 0xffff); 557 558 /* 559 * The first instruction initializes the tail call count register. 560 * On a tail call, the calling function jumps into the prologue 561 * after this instruction. 562 */ 563 emit(ctx, ori, tc, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); 564 565 /* === Entry-point for tail calls === */ 566 567 /* 568 * If the eBPF frame pointer and tail call count registers were 569 * accessed they must be preserved. Mark them as clobbered here 570 * to save and restore them on the stack as needed. 571 */ 572 if (ctx->accessed & BIT(BPF_REG_FP)) 573 clobber_reg(ctx, fp); 574 if (ctx->accessed & BIT(JIT_REG_TC)) 575 clobber_reg(ctx, tc); 576 if (ctx->accessed & BIT(JIT_REG_ZX)) 577 clobber_reg(ctx, zx); 578 579 /* Compute the stack space needed for callee-saved registers */ 580 saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u64); 581 saved = ALIGN(saved, MIPS_STACK_ALIGNMENT); 582 583 /* Stack space used by eBPF program local data */ 584 locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); 585 586 /* 587 * If we are emitting function calls, reserve extra stack space for 588 * caller-saved registers needed by the JIT. The required space is 589 * computed automatically during resource usage discovery (pass 1). 590 */ 591 reserved = ctx->stack_used; 592 593 /* Allocate the stack frame */ 594 stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT); 595 if (stack) 596 emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack); 597 598 /* Store callee-saved registers on stack */ 599 push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved); 600 601 /* Initialize the eBPF frame pointer if accessed */ 602 if (ctx->accessed & BIT(BPF_REG_FP)) 603 emit(ctx, daddiu, fp, MIPS_R_SP, stack - saved); 604 605 /* Initialize the ePF JIT zero-extension register if accessed */ 606 if (ctx->accessed & BIT(JIT_REG_ZX)) { 607 emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); 608 emit(ctx, dsrl32, zx, zx, 0); 609 } 610 611 ctx->saved_size = saved; 612 ctx->stack_size = stack; 613 } 614 615 /* Build the program epilogue to restore the stack and registers */ 616 void build_epilogue(struct jit_context *ctx, int dest_reg) 617 { 618 /* Restore callee-saved registers from stack */ 619 pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, 620 ctx->stack_size - ctx->saved_size); 621 622 /* Release the stack frame */ 623 if (ctx->stack_size) 624 emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); 625 626 /* Jump to return address and sign-extend the 32-bit return value */ 627 emit(ctx, jr, dest_reg); 628 emit(ctx, sll, MIPS_R_V0, MIPS_R_V0, 0); /* Delay slot */ 629 } 630 631 /* Build one eBPF instruction */ 632 int build_insn(const struct bpf_insn *insn, struct jit_context *ctx) 633 { 634 u8 dst = bpf2mips64[insn->dst_reg]; 635 u8 src = bpf2mips64[insn->src_reg]; 636 u8 res = bpf2mips64[BPF_REG_0]; 637 u8 code = insn->code; 638 s16 off = insn->off; 639 s32 imm = insn->imm; 640 s32 val, rel; 641 u8 alu, jmp; 642 643 switch (code) { 644 /* ALU operations */ 645 /* dst = imm */ 646 case BPF_ALU | BPF_MOV | BPF_K: 647 emit_mov_i(ctx, dst, imm); 648 emit_zext_ver(ctx, dst); 649 break; 650 /* dst = src */ 651 case BPF_ALU | BPF_MOV | BPF_X: 652 if (imm == 1) { 653 /* Special mov32 for zext */ 654 emit_zext(ctx, dst); 655 } else { 656 emit_mov_r(ctx, dst, src); 657 emit_zext_ver(ctx, dst); 658 } 659 break; 660 /* dst = -dst */ 661 case BPF_ALU | BPF_NEG: 662 emit_sext(ctx, dst, dst); 663 emit_alu_i(ctx, dst, 0, BPF_NEG); 664 emit_zext_ver(ctx, dst); 665 break; 666 /* dst = dst & imm */ 667 /* dst = dst | imm */ 668 /* dst = dst ^ imm */ 669 /* dst = dst << imm */ 670 case BPF_ALU | BPF_OR | BPF_K: 671 case BPF_ALU | BPF_AND | BPF_K: 672 case BPF_ALU | BPF_XOR | BPF_K: 673 case BPF_ALU | BPF_LSH | BPF_K: 674 if (!valid_alu_i(BPF_OP(code), imm)) { 675 emit_mov_i(ctx, MIPS_R_T4, imm); 676 emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); 677 } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { 678 emit_alu_i(ctx, dst, val, alu); 679 } 680 emit_zext_ver(ctx, dst); 681 break; 682 /* dst = dst >> imm */ 683 /* dst = dst >> imm (arithmetic) */ 684 /* dst = dst + imm */ 685 /* dst = dst - imm */ 686 /* dst = dst * imm */ 687 /* dst = dst / imm */ 688 /* dst = dst % imm */ 689 case BPF_ALU | BPF_RSH | BPF_K: 690 case BPF_ALU | BPF_ARSH | BPF_K: 691 case BPF_ALU | BPF_ADD | BPF_K: 692 case BPF_ALU | BPF_SUB | BPF_K: 693 case BPF_ALU | BPF_MUL | BPF_K: 694 case BPF_ALU | BPF_DIV | BPF_K: 695 case BPF_ALU | BPF_MOD | BPF_K: 696 if (!valid_alu_i(BPF_OP(code), imm)) { 697 emit_sext(ctx, dst, dst); 698 emit_mov_i(ctx, MIPS_R_T4, imm); 699 emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); 700 } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { 701 emit_sext(ctx, dst, dst); 702 emit_alu_i(ctx, dst, val, alu); 703 } 704 emit_zext_ver(ctx, dst); 705 break; 706 /* dst = dst & src */ 707 /* dst = dst | src */ 708 /* dst = dst ^ src */ 709 /* dst = dst << src */ 710 case BPF_ALU | BPF_AND | BPF_X: 711 case BPF_ALU | BPF_OR | BPF_X: 712 case BPF_ALU | BPF_XOR | BPF_X: 713 case BPF_ALU | BPF_LSH | BPF_X: 714 emit_alu_r(ctx, dst, src, BPF_OP(code)); 715 emit_zext_ver(ctx, dst); 716 break; 717 /* dst = dst >> src */ 718 /* dst = dst >> src (arithmetic) */ 719 /* dst = dst + src */ 720 /* dst = dst - src */ 721 /* dst = dst * src */ 722 /* dst = dst / src */ 723 /* dst = dst % src */ 724 case BPF_ALU | BPF_RSH | BPF_X: 725 case BPF_ALU | BPF_ARSH | BPF_X: 726 case BPF_ALU | BPF_ADD | BPF_X: 727 case BPF_ALU | BPF_SUB | BPF_X: 728 case BPF_ALU | BPF_MUL | BPF_X: 729 case BPF_ALU | BPF_DIV | BPF_X: 730 case BPF_ALU | BPF_MOD | BPF_X: 731 emit_sext(ctx, dst, dst); 732 emit_sext(ctx, MIPS_R_T4, src); 733 emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); 734 emit_zext_ver(ctx, dst); 735 break; 736 /* dst = imm (64-bit) */ 737 case BPF_ALU64 | BPF_MOV | BPF_K: 738 emit_mov_i(ctx, dst, imm); 739 break; 740 /* dst = src (64-bit) */ 741 case BPF_ALU64 | BPF_MOV | BPF_X: 742 emit_mov_r(ctx, dst, src); 743 break; 744 /* dst = -dst (64-bit) */ 745 case BPF_ALU64 | BPF_NEG: 746 emit_alu_i64(ctx, dst, 0, BPF_NEG); 747 break; 748 /* dst = dst & imm (64-bit) */ 749 /* dst = dst | imm (64-bit) */ 750 /* dst = dst ^ imm (64-bit) */ 751 /* dst = dst << imm (64-bit) */ 752 /* dst = dst >> imm (64-bit) */ 753 /* dst = dst >> imm ((64-bit, arithmetic) */ 754 /* dst = dst + imm (64-bit) */ 755 /* dst = dst - imm (64-bit) */ 756 /* dst = dst * imm (64-bit) */ 757 /* dst = dst / imm (64-bit) */ 758 /* dst = dst % imm (64-bit) */ 759 case BPF_ALU64 | BPF_AND | BPF_K: 760 case BPF_ALU64 | BPF_OR | BPF_K: 761 case BPF_ALU64 | BPF_XOR | BPF_K: 762 case BPF_ALU64 | BPF_LSH | BPF_K: 763 case BPF_ALU64 | BPF_RSH | BPF_K: 764 case BPF_ALU64 | BPF_ARSH | BPF_K: 765 case BPF_ALU64 | BPF_ADD | BPF_K: 766 case BPF_ALU64 | BPF_SUB | BPF_K: 767 case BPF_ALU64 | BPF_MUL | BPF_K: 768 case BPF_ALU64 | BPF_DIV | BPF_K: 769 case BPF_ALU64 | BPF_MOD | BPF_K: 770 if (!valid_alu_i(BPF_OP(code), imm)) { 771 emit_mov_i(ctx, MIPS_R_T4, imm); 772 emit_alu_r64(ctx, dst, MIPS_R_T4, BPF_OP(code)); 773 } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { 774 emit_alu_i64(ctx, dst, val, alu); 775 } 776 break; 777 /* dst = dst & src (64-bit) */ 778 /* dst = dst | src (64-bit) */ 779 /* dst = dst ^ src (64-bit) */ 780 /* dst = dst << src (64-bit) */ 781 /* dst = dst >> src (64-bit) */ 782 /* dst = dst >> src (64-bit, arithmetic) */ 783 /* dst = dst + src (64-bit) */ 784 /* dst = dst - src (64-bit) */ 785 /* dst = dst * src (64-bit) */ 786 /* dst = dst / src (64-bit) */ 787 /* dst = dst % src (64-bit) */ 788 case BPF_ALU64 | BPF_AND | BPF_X: 789 case BPF_ALU64 | BPF_OR | BPF_X: 790 case BPF_ALU64 | BPF_XOR | BPF_X: 791 case BPF_ALU64 | BPF_LSH | BPF_X: 792 case BPF_ALU64 | BPF_RSH | BPF_X: 793 case BPF_ALU64 | BPF_ARSH | BPF_X: 794 case BPF_ALU64 | BPF_ADD | BPF_X: 795 case BPF_ALU64 | BPF_SUB | BPF_X: 796 case BPF_ALU64 | BPF_MUL | BPF_X: 797 case BPF_ALU64 | BPF_DIV | BPF_X: 798 case BPF_ALU64 | BPF_MOD | BPF_X: 799 emit_alu_r64(ctx, dst, src, BPF_OP(code)); 800 break; 801 /* dst = htole(dst) */ 802 /* dst = htobe(dst) */ 803 case BPF_ALU | BPF_END | BPF_FROM_LE: 804 case BPF_ALU | BPF_END | BPF_FROM_BE: 805 if (BPF_SRC(code) == 806 #ifdef __BIG_ENDIAN 807 BPF_FROM_LE 808 #else 809 BPF_FROM_BE 810 #endif 811 ) 812 emit_bswap_r64(ctx, dst, imm); 813 else 814 emit_trunc_r64(ctx, dst, imm); 815 break; 816 /* dst = imm64 */ 817 case BPF_LD | BPF_IMM | BPF_DW: 818 emit_mov_i64(ctx, dst, (u32)imm | ((u64)insn[1].imm << 32)); 819 return 1; 820 /* LDX: dst = *(size *)(src + off) */ 821 case BPF_LDX | BPF_MEM | BPF_W: 822 case BPF_LDX | BPF_MEM | BPF_H: 823 case BPF_LDX | BPF_MEM | BPF_B: 824 case BPF_LDX | BPF_MEM | BPF_DW: 825 emit_ldx(ctx, dst, src, off, BPF_SIZE(code)); 826 break; 827 /* ST: *(size *)(dst + off) = imm */ 828 case BPF_ST | BPF_MEM | BPF_W: 829 case BPF_ST | BPF_MEM | BPF_H: 830 case BPF_ST | BPF_MEM | BPF_B: 831 case BPF_ST | BPF_MEM | BPF_DW: 832 emit_mov_i(ctx, MIPS_R_T4, imm); 833 emit_stx(ctx, dst, MIPS_R_T4, off, BPF_SIZE(code)); 834 break; 835 /* STX: *(size *)(dst + off) = src */ 836 case BPF_STX | BPF_MEM | BPF_W: 837 case BPF_STX | BPF_MEM | BPF_H: 838 case BPF_STX | BPF_MEM | BPF_B: 839 case BPF_STX | BPF_MEM | BPF_DW: 840 emit_stx(ctx, dst, src, off, BPF_SIZE(code)); 841 break; 842 /* Speculation barrier */ 843 case BPF_ST | BPF_NOSPEC: 844 break; 845 /* Atomics */ 846 case BPF_STX | BPF_ATOMIC | BPF_W: 847 case BPF_STX | BPF_ATOMIC | BPF_DW: 848 switch (imm) { 849 case BPF_ADD: 850 case BPF_ADD | BPF_FETCH: 851 case BPF_AND: 852 case BPF_AND | BPF_FETCH: 853 case BPF_OR: 854 case BPF_OR | BPF_FETCH: 855 case BPF_XOR: 856 case BPF_XOR | BPF_FETCH: 857 case BPF_XCHG: 858 if (BPF_SIZE(code) == BPF_DW) { 859 emit_atomic_r64(ctx, dst, src, off, imm); 860 } else if (imm & BPF_FETCH) { 861 u8 tmp = dst; 862 863 if (src == dst) { /* Don't overwrite dst */ 864 emit_mov_r(ctx, MIPS_R_T4, dst); 865 tmp = MIPS_R_T4; 866 } 867 emit_sext(ctx, src, src); 868 emit_atomic_r(ctx, tmp, src, off, imm); 869 emit_zext_ver(ctx, src); 870 } else { /* 32-bit, no fetch */ 871 emit_sext(ctx, MIPS_R_T4, src); 872 emit_atomic_r(ctx, dst, MIPS_R_T4, off, imm); 873 } 874 break; 875 case BPF_CMPXCHG: 876 if (BPF_SIZE(code) == BPF_DW) { 877 emit_cmpxchg_r64(ctx, dst, src, off); 878 } else { 879 u8 tmp = res; 880 881 if (res == dst) /* Don't overwrite dst */ 882 tmp = MIPS_R_T4; 883 emit_sext(ctx, tmp, res); 884 emit_sext(ctx, MIPS_R_T5, src); 885 emit_cmpxchg_r(ctx, dst, MIPS_R_T5, tmp, off); 886 if (res == dst) /* Restore result */ 887 emit_mov_r(ctx, res, MIPS_R_T4); 888 /* Result zext inserted by verifier */ 889 } 890 break; 891 default: 892 goto notyet; 893 } 894 break; 895 /* PC += off if dst == src */ 896 /* PC += off if dst != src */ 897 /* PC += off if dst & src */ 898 /* PC += off if dst > src */ 899 /* PC += off if dst >= src */ 900 /* PC += off if dst < src */ 901 /* PC += off if dst <= src */ 902 /* PC += off if dst > src (signed) */ 903 /* PC += off if dst >= src (signed) */ 904 /* PC += off if dst < src (signed) */ 905 /* PC += off if dst <= src (signed) */ 906 case BPF_JMP32 | BPF_JEQ | BPF_X: 907 case BPF_JMP32 | BPF_JNE | BPF_X: 908 case BPF_JMP32 | BPF_JSET | BPF_X: 909 case BPF_JMP32 | BPF_JGT | BPF_X: 910 case BPF_JMP32 | BPF_JGE | BPF_X: 911 case BPF_JMP32 | BPF_JLT | BPF_X: 912 case BPF_JMP32 | BPF_JLE | BPF_X: 913 case BPF_JMP32 | BPF_JSGT | BPF_X: 914 case BPF_JMP32 | BPF_JSGE | BPF_X: 915 case BPF_JMP32 | BPF_JSLT | BPF_X: 916 case BPF_JMP32 | BPF_JSLE | BPF_X: 917 if (off == 0) 918 break; 919 setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); 920 emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ 921 emit_sext(ctx, MIPS_R_T5, src); /* Sign-extended src */ 922 emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); 923 if (finish_jmp(ctx, jmp, off) < 0) 924 goto toofar; 925 break; 926 /* PC += off if dst == imm */ 927 /* PC += off if dst != imm */ 928 /* PC += off if dst & imm */ 929 /* PC += off if dst > imm */ 930 /* PC += off if dst >= imm */ 931 /* PC += off if dst < imm */ 932 /* PC += off if dst <= imm */ 933 /* PC += off if dst > imm (signed) */ 934 /* PC += off if dst >= imm (signed) */ 935 /* PC += off if dst < imm (signed) */ 936 /* PC += off if dst <= imm (signed) */ 937 case BPF_JMP32 | BPF_JEQ | BPF_K: 938 case BPF_JMP32 | BPF_JNE | BPF_K: 939 case BPF_JMP32 | BPF_JSET | BPF_K: 940 case BPF_JMP32 | BPF_JGT | BPF_K: 941 case BPF_JMP32 | BPF_JGE | BPF_K: 942 case BPF_JMP32 | BPF_JLT | BPF_K: 943 case BPF_JMP32 | BPF_JLE | BPF_K: 944 case BPF_JMP32 | BPF_JSGT | BPF_K: 945 case BPF_JMP32 | BPF_JSGE | BPF_K: 946 case BPF_JMP32 | BPF_JSLT | BPF_K: 947 case BPF_JMP32 | BPF_JSLE | BPF_K: 948 if (off == 0) 949 break; 950 setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel); 951 emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ 952 if (valid_jmp_i(jmp, imm)) { 953 emit_jmp_i(ctx, MIPS_R_T4, imm, rel, jmp); 954 } else { 955 /* Move large immediate to register, sign-extended */ 956 emit_mov_i(ctx, MIPS_R_T5, imm); 957 emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); 958 } 959 if (finish_jmp(ctx, jmp, off) < 0) 960 goto toofar; 961 break; 962 /* PC += off if dst == src */ 963 /* PC += off if dst != src */ 964 /* PC += off if dst & src */ 965 /* PC += off if dst > src */ 966 /* PC += off if dst >= src */ 967 /* PC += off if dst < src */ 968 /* PC += off if dst <= src */ 969 /* PC += off if dst > src (signed) */ 970 /* PC += off if dst >= src (signed) */ 971 /* PC += off if dst < src (signed) */ 972 /* PC += off if dst <= src (signed) */ 973 case BPF_JMP | BPF_JEQ | BPF_X: 974 case BPF_JMP | BPF_JNE | BPF_X: 975 case BPF_JMP | BPF_JSET | BPF_X: 976 case BPF_JMP | BPF_JGT | BPF_X: 977 case BPF_JMP | BPF_JGE | BPF_X: 978 case BPF_JMP | BPF_JLT | BPF_X: 979 case BPF_JMP | BPF_JLE | BPF_X: 980 case BPF_JMP | BPF_JSGT | BPF_X: 981 case BPF_JMP | BPF_JSGE | BPF_X: 982 case BPF_JMP | BPF_JSLT | BPF_X: 983 case BPF_JMP | BPF_JSLE | BPF_X: 984 if (off == 0) 985 break; 986 setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); 987 emit_jmp_r(ctx, dst, src, rel, jmp); 988 if (finish_jmp(ctx, jmp, off) < 0) 989 goto toofar; 990 break; 991 /* PC += off if dst == imm */ 992 /* PC += off if dst != imm */ 993 /* PC += off if dst & imm */ 994 /* PC += off if dst > imm */ 995 /* PC += off if dst >= imm */ 996 /* PC += off if dst < imm */ 997 /* PC += off if dst <= imm */ 998 /* PC += off if dst > imm (signed) */ 999 /* PC += off if dst >= imm (signed) */ 1000 /* PC += off if dst < imm (signed) */ 1001 /* PC += off if dst <= imm (signed) */ 1002 case BPF_JMP | BPF_JEQ | BPF_K: 1003 case BPF_JMP | BPF_JNE | BPF_K: 1004 case BPF_JMP | BPF_JSET | BPF_K: 1005 case BPF_JMP | BPF_JGT | BPF_K: 1006 case BPF_JMP | BPF_JGE | BPF_K: 1007 case BPF_JMP | BPF_JLT | BPF_K: 1008 case BPF_JMP | BPF_JLE | BPF_K: 1009 case BPF_JMP | BPF_JSGT | BPF_K: 1010 case BPF_JMP | BPF_JSGE | BPF_K: 1011 case BPF_JMP | BPF_JSLT | BPF_K: 1012 case BPF_JMP | BPF_JSLE | BPF_K: 1013 if (off == 0) 1014 break; 1015 setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel); 1016 if (valid_jmp_i(jmp, imm)) { 1017 emit_jmp_i(ctx, dst, imm, rel, jmp); 1018 } else { 1019 /* Move large immediate to register */ 1020 emit_mov_i(ctx, MIPS_R_T4, imm); 1021 emit_jmp_r(ctx, dst, MIPS_R_T4, rel, jmp); 1022 } 1023 if (finish_jmp(ctx, jmp, off) < 0) 1024 goto toofar; 1025 break; 1026 /* PC += off */ 1027 case BPF_JMP | BPF_JA: 1028 if (off == 0) 1029 break; 1030 if (emit_ja(ctx, off) < 0) 1031 goto toofar; 1032 break; 1033 /* Tail call */ 1034 case BPF_JMP | BPF_TAIL_CALL: 1035 if (emit_tail_call(ctx) < 0) 1036 goto invalid; 1037 break; 1038 /* Function call */ 1039 case BPF_JMP | BPF_CALL: 1040 if (emit_call(ctx, insn) < 0) 1041 goto invalid; 1042 break; 1043 /* Function return */ 1044 case BPF_JMP | BPF_EXIT: 1045 /* 1046 * Optimization: when last instruction is EXIT 1047 * simply continue to epilogue. 1048 */ 1049 if (ctx->bpf_index == ctx->program->len - 1) 1050 break; 1051 if (emit_exit(ctx) < 0) 1052 goto toofar; 1053 break; 1054 1055 default: 1056 invalid: 1057 pr_err_once("unknown opcode %02x\n", code); 1058 return -EINVAL; 1059 notyet: 1060 pr_info_once("*** NOT YET: opcode %02x ***\n", code); 1061 return -EFAULT; 1062 toofar: 1063 pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n", 1064 ctx->bpf_index, code); 1065 return -E2BIG; 1066 } 1067 return 0; 1068 } 1069