Lines Matching +full:src +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Just-In-Time compiler for eBPF bytecode on MIPS.
4 * Implementation of JIT functions for 32-bit CPUs.
19 #include <asm/cpu-features.h>
20 #include <asm/isa-rev.h>
25 /* MIPS a4-a7 are not available in the o32 ABI */
31 /* Stack is 8-byte aligned in o32 ABI */
36 * This corresponds to stack space for register arguments a0-a3.
40 /* Temporary 64-bit register used by JIT */
46 * R0-to-v0 assignment (4 bytes) if big endian.
75 /* Caller-saved CPU registers */
81 /* Callee-saved CPU registers */
96 * Mapping of 64-bit eBPF registers to 32-bit native MIPS registers.
99 * the MIPS convention for passing 64-bit arguments and return values.
100 * 2) The eBPF return value, arguments and callee-saved registers are mapped
103 * only one general-purpose register is actually needed for the mapping.
107 * for constant blinding. The gp register is callee-saved.
108 * 5) One 64-bit temporary register is mapped for use when sign-extending
109 * immediate operands. MIPS registers t6-t9 are available to the JIT
110 * for as temporaries when implementing complex 64-bit operations.
117 /* Return value from in-kernel function, and exit value from eBPF */
119 /* Arguments from eBPF program to in-kernel function */
126 /* Callee-saved registers that in-kernel function will preserve */
131 /* Read-only frame pointer to access the eBPF stack */
143 /* Get low CPU register for a 64-bit eBPF register mapping */
153 /* Get high CPU register for a 64-bit eBPF register mapping */
164 * Mark a 64-bit CPU register pair as clobbered, it needs to be
165 * saved/restored by the program if callee-saved.
173 /* dst = imm (sign-extended) */
178 emit(ctx, addiu, hi(dst), MIPS_R_ZERO, -1); in emit_mov_se_i64()
187 if (!ctx->program->aux->verifier_zext) { in emit_zext_ver()
200 /* ALU immediate operation (64-bit) */
204 u8 src = MIPS_R_T6; in emit_alu_i64() local
214 imm = -imm; in emit_alu_i64()
218 imm = -imm; in emit_alu_i64()
223 emit_mov_i(ctx, src, imm); in emit_alu_i64()
228 emit(ctx, addu, lo(dst), lo(dst), src); in emit_alu_i64()
229 emit(ctx, sltu, MIPS_R_T9, lo(dst), src); in emit_alu_i64()
232 emit(ctx, addiu, hi(dst), hi(dst), -1); in emit_alu_i64()
234 /* dst = dst - imm */ in emit_alu_i64()
236 emit(ctx, sltu, MIPS_R_T9, lo(dst), src); in emit_alu_i64()
237 emit(ctx, subu, lo(dst), lo(dst), src); in emit_alu_i64()
244 emit(ctx, or, lo(dst), lo(dst), src); in emit_alu_i64()
246 emit(ctx, addiu, hi(dst), MIPS_R_ZERO, -1); in emit_alu_i64()
250 emit(ctx, and, lo(dst), lo(dst), src); in emit_alu_i64()
256 emit(ctx, xor, lo(dst), lo(dst), src); in emit_alu_i64()
259 emit(ctx, addiu, hi(dst), hi(dst), -1); in emit_alu_i64()
266 /* ALU register operation (64-bit) */
268 const u8 dst[], const u8 src[], u8 op) in emit_alu_r64() argument
271 /* dst = dst + src */ in emit_alu_r64()
273 if (src == dst) { in emit_alu_r64()
277 emit(ctx, addu, lo(dst), lo(dst), lo(src)); in emit_alu_r64()
278 emit(ctx, sltu, MIPS_R_T9, lo(dst), lo(src)); in emit_alu_r64()
280 emit(ctx, addu, hi(dst), hi(dst), hi(src)); in emit_alu_r64()
283 /* dst = dst - src */ in emit_alu_r64()
285 emit(ctx, sltu, MIPS_R_T9, lo(dst), lo(src)); in emit_alu_r64()
286 emit(ctx, subu, lo(dst), lo(dst), lo(src)); in emit_alu_r64()
287 emit(ctx, subu, hi(dst), hi(dst), hi(src)); in emit_alu_r64()
290 /* dst = dst | src */ in emit_alu_r64()
292 emit(ctx, or, lo(dst), lo(dst), lo(src)); in emit_alu_r64()
293 emit(ctx, or, hi(dst), hi(dst), hi(src)); in emit_alu_r64()
295 /* dst = dst & src */ in emit_alu_r64()
297 emit(ctx, and, lo(dst), lo(dst), lo(src)); in emit_alu_r64()
298 emit(ctx, and, hi(dst), hi(dst), hi(src)); in emit_alu_r64()
300 /* dst = dst ^ src */ in emit_alu_r64()
302 emit(ctx, xor, lo(dst), lo(dst), lo(src)); in emit_alu_r64()
303 emit(ctx, xor, hi(dst), hi(dst), hi(src)); in emit_alu_r64()
309 /* ALU invert (64-bit) */
320 /* ALU shift immediate (64-bit) */
328 emit(ctx, srl, MIPS_R_T9, lo(dst), 32 - imm); in emit_shift_i64()
333 emit(ctx, sll, hi(dst), lo(dst), imm - 32); in emit_shift_i64()
340 emit(ctx, sll, MIPS_R_T9, hi(dst), 32 - imm); in emit_shift_i64()
345 emit(ctx, srl, lo(dst), hi(dst), imm - 32); in emit_shift_i64()
352 emit(ctx, sll, MIPS_R_T9, hi(dst), 32 - imm); in emit_shift_i64()
357 emit(ctx, sra, lo(dst), hi(dst), imm - 32); in emit_shift_i64()
365 /* ALU shift register (64-bit) */
367 const u8 dst[], u8 src, u8 op) in emit_shift_r64() argument
372 emit(ctx, andi, t1, src, 32); /* t1 = src & 32 */ in emit_shift_r64()
374 emit(ctx, nor, t2, src, MIPS_R_ZERO); /* t2 = ~src (delay slot) */ in emit_shift_r64()
377 /* dst = dst << src */ in emit_shift_r64()
380 emit(ctx, sllv, hi(dst), lo(dst), src); /* dh = dl << src */ in emit_shift_r64()
386 emit(ctx, sllv, lo(dst), lo(dst), src); /* dl = dl << src */ in emit_shift_r64()
387 emit(ctx, sllv, hi(dst), hi(dst), src); /* dh = dh << src */ in emit_shift_r64()
390 /* dst = dst >> src */ in emit_shift_r64()
393 emit(ctx, srlv, lo(dst), hi(dst), src); /* dl = dh >> src */ in emit_shift_r64()
399 emit(ctx, srlv, lo(dst), lo(dst), src); /* dl = dl >> src */ in emit_shift_r64()
400 emit(ctx, srlv, hi(dst), hi(dst), src); /* dh = dh >> src */ in emit_shift_r64()
403 /* dst = dst >> src (arithmetic) */ in emit_shift_r64()
406 emit(ctx, srav, lo(dst), hi(dst), src); /* dl = dh >>a src */ in emit_shift_r64()
412 emit(ctx, srlv, lo(dst), lo(dst), src); /* dl = dl >>a src */ in emit_shift_r64()
413 emit(ctx, srav, hi(dst), hi(dst), src); /* dh = dh >> src */ in emit_shift_r64()
422 /* ALU mul immediate (64x32-bit) */
425 u8 src = MIPS_R_T6; in emit_mul_i64() local
429 /* dst = dst * 1 is a no-op */ in emit_mul_i64()
432 /* dst = dst * -1 */ in emit_mul_i64()
433 case -1: in emit_mul_i64()
442 /* hi(dst) = hi(dst) * src(imm) */ in emit_mul_i64()
443 emit_mov_i(ctx, src, imm); in emit_mul_i64()
445 emit(ctx, mul, hi(dst), hi(dst), src); in emit_mul_i64()
447 emit(ctx, multu, hi(dst), src); in emit_mul_i64()
451 /* hi(dst) = hi(dst) - lo(dst) */ in emit_mul_i64()
455 /* tmp = lo(dst) * src(imm) >> 32 */ in emit_mul_i64()
456 /* lo(dst) = lo(dst) * src(imm) */ in emit_mul_i64()
458 emit(ctx, muhu, tmp, lo(dst), src); in emit_mul_i64()
459 emit(ctx, mulu, lo(dst), lo(dst), src); in emit_mul_i64()
461 emit(ctx, multu, lo(dst), src); in emit_mul_i64()
473 /* ALU mul register (64x64-bit) */
475 const u8 dst[], const u8 src[]) in emit_mul_r64() argument
480 /* acc = hi(dst) * lo(src) */ in emit_mul_r64()
482 emit(ctx, mul, acc, hi(dst), lo(src)); in emit_mul_r64()
484 emit(ctx, multu, hi(dst), lo(src)); in emit_mul_r64()
488 /* tmp = lo(dst) * hi(src) */ in emit_mul_r64()
490 emit(ctx, mul, tmp, lo(dst), hi(src)); in emit_mul_r64()
492 emit(ctx, multu, lo(dst), hi(src)); in emit_mul_r64()
499 /* tmp = lo(dst) * lo(src) >> 32 */ in emit_mul_r64()
500 /* lo(dst) = lo(dst) * lo(src) */ in emit_mul_r64()
502 emit(ctx, muhu, tmp, lo(dst), lo(src)); in emit_mul_r64()
503 emit(ctx, mulu, lo(dst), lo(dst), lo(src)); in emit_mul_r64()
505 emit(ctx, multu, lo(dst), lo(src)); in emit_mul_r64()
515 /* Helper function for 64-bit modulo */
524 /* ALU div/mod register (64-bit) */
526 const u8 dst[], const u8 src[], u8 op) in emit_divmod_r64() argument
528 const u8 *r0 = bpf2mips32[BPF_REG_0]; /* Mapped to v0-v1 */ in emit_divmod_r64()
529 const u8 *r1 = bpf2mips32[BPF_REG_1]; /* Mapped to a0-a1 */ in emit_divmod_r64()
530 const u8 *r2 = bpf2mips32[BPF_REG_2]; /* Mapped to a2-a3 */ in emit_divmod_r64()
534 /* Push caller-saved registers on stack */ in emit_divmod_r64()
535 push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, in emit_divmod_r64()
538 /* Put 64-bit arguments 1 and 2 in registers a0-a3 */ in emit_divmod_r64()
540 emit(ctx, move, MIPS_R_T9, src[k]); in emit_divmod_r64()
547 /* dst = dst / src */ in emit_divmod_r64()
551 /* dst = dst % src */ in emit_divmod_r64()
560 /* Store the 64-bit result in dst */ in emit_divmod_r64()
564 /* Restore caller-saved registers, excluding the computed result */ in emit_divmod_r64()
566 pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, in emit_divmod_r64()
577 static void emit_swap8_r(struct jit_context *ctx, u8 dst, u8 src, u8 mask) in emit_swap8_r() argument
581 emit(ctx, and, tmp, src, mask); /* tmp = src & 0x00ff00ff */ in emit_swap8_r()
583 emit(ctx, srl, dst, src, 8); /* dst = src >> 8 */ in emit_swap8_r()
589 static void emit_swap16_r(struct jit_context *ctx, u8 dst, u8 src) in emit_swap16_r() argument
593 emit(ctx, sll, tmp, src, 16); /* tmp = src << 16 */ in emit_swap16_r()
594 emit(ctx, srl, dst, src, 16); /* dst = src >> 16 */ in emit_swap16_r()
639 /* Zero-extend a word */ in emit_trunc_r64()
644 /* Zero-extend a half word */ in emit_trunc_r64()
653 /* Load operation: dst = *(size*)(src + off) */
655 const u8 dst[], u8 src, s16 off, u8 size) in emit_ldx() argument
660 emit(ctx, lbu, lo(dst), off, src); in emit_ldx()
665 emit(ctx, lhu, lo(dst), off, src); in emit_ldx()
670 emit(ctx, lw, lo(dst), off, src); in emit_ldx()
675 if (dst[1] == src) { in emit_ldx()
676 emit(ctx, lw, dst[0], off + 4, src); in emit_ldx()
677 emit(ctx, lw, dst[1], off, src); in emit_ldx()
679 emit(ctx, lw, dst[1], off, src); in emit_ldx()
680 emit(ctx, lw, dst[0], off + 4, src); in emit_ldx()
688 /* Store operation: *(size *)(dst + off) = src */
690 const u8 dst, const u8 src[], s16 off, u8 size) in emit_stx() argument
695 emit(ctx, sb, lo(src), off, dst); in emit_stx()
699 emit(ctx, sh, lo(src), off, dst); in emit_stx()
703 emit(ctx, sw, lo(src), off, dst); in emit_stx()
707 emit(ctx, sw, src[1], off, dst); in emit_stx()
708 emit(ctx, sw, src[0], off + 4, dst); in emit_stx()
713 /* Atomic read-modify-write (32-bit, non-ll/sc fallback) */
715 u8 dst, u8 src, s16 off, u8 code) in emit_atomic_r32() argument
720 /* Push caller-saved registers on stack */ in emit_atomic_r32()
721 push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, in emit_atomic_r32()
724 * Argument 1: dst+off if xchg, otherwise src, passed in register a0 in emit_atomic_r32()
725 * Argument 2: src if xchg, otherwise dst+off, passed in register a1 in emit_atomic_r32()
729 emit(ctx, move, MIPS_R_A1, src); in emit_atomic_r32()
732 emit(ctx, move, MIPS_R_A0, src); in emit_atomic_r32()
776 /* Update src register with old value, if specified */ in emit_atomic_r32()
778 emit(ctx, move, src, MIPS_R_V0); in emit_atomic_r32()
779 exclude = BIT(src); in emit_atomic_r32()
780 clobber_reg(ctx, src); in emit_atomic_r32()
783 /* Restore caller-saved registers, except any fetched value */ in emit_atomic_r32()
784 pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, in emit_atomic_r32()
790 /* Helper function for 64-bit atomic exchange */
796 /* Atomic read-modify-write (64-bit) */
798 u8 dst, const u8 src[], s16 off, u8 code) in emit_atomic_r64() argument
800 const u8 *r0 = bpf2mips32[BPF_REG_0]; /* Mapped to v0-v1 */ in emit_atomic_r64()
801 const u8 *r1 = bpf2mips32[BPF_REG_1]; /* Mapped to a0-a1 */ in emit_atomic_r64()
805 /* Push caller-saved registers on stack */ in emit_atomic_r64()
806 push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, in emit_atomic_r64()
809 * Argument 1: 64-bit src, passed in registers a0-a1 in emit_atomic_r64()
810 * Argument 2: 32-bit dst+off, passed in register a2 in emit_atomic_r64()
813 emit(ctx, move, r1[0], src[0]); in emit_atomic_r64()
814 emit(ctx, move, r1[1], src[1]); in emit_atomic_r64()
857 /* Update src register with old value, if specified */ in emit_atomic_r64()
859 emit(ctx, move, lo(src), lo(r0)); in emit_atomic_r64()
860 emit(ctx, move, hi(src), hi(r0)); in emit_atomic_r64()
861 exclude = BIT(src[0]) | BIT(src[1]); in emit_atomic_r64()
862 clobber_reg64(ctx, src); in emit_atomic_r64()
865 /* Restore caller-saved registers, except any fetched value */ in emit_atomic_r64()
866 pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, in emit_atomic_r64()
872 /* Atomic compare-and-exchange (32-bit, non-ll/sc fallback) */
873 static void emit_cmpxchg_r32(struct jit_context *ctx, u8 dst, u8 src, s16 off) in emit_cmpxchg_r32() argument
877 /* Push caller-saved registers on stack */ in emit_cmpxchg_r32()
878 push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, in emit_cmpxchg_r32()
881 * Argument 1: 32-bit dst+off, passed in register a0 in emit_cmpxchg_r32()
882 * Argument 2: 32-bit r0, passed in register a1 in emit_cmpxchg_r32()
883 * Argument 3: 32-bit src, passed in register a2 in emit_cmpxchg_r32()
886 emit(ctx, move, MIPS_R_T8, src); in emit_cmpxchg_r32()
899 /* Restore caller-saved registers, except the return value */ in emit_cmpxchg_r32()
900 pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, in emit_cmpxchg_r32()
908 /* Atomic compare-and-exchange (64-bit) */
910 u8 dst, const u8 src[], s16 off) in emit_cmpxchg_r64() argument
915 /* Push caller-saved registers on stack */ in emit_cmpxchg_r64()
916 push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, in emit_cmpxchg_r64()
919 * Argument 1: 32-bit dst+off, passed in register a0 (a1 unused) in emit_cmpxchg_r64()
920 * Argument 2: 64-bit r0, passed in registers a2-a3 in emit_cmpxchg_r64()
921 * Argument 3: 64-bit src, passed on stack in emit_cmpxchg_r64()
923 push_regs(ctx, BIT(src[0]) | BIT(src[1]), 0, JIT_RESERVED_STACK); in emit_cmpxchg_r64()
934 /* Restore caller-saved registers, except the return value */ in emit_cmpxchg_r64()
935 pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, in emit_cmpxchg_r64()
989 /* Emulation of 64-bit sltiu rd, rs, imm, where imm may be S32_MAX + 1 */
998 emit(ctx, sltiu, tmp, hi(rs), -1); /* tmp = rsh < ~0U */ in emit_sltiu_r64()
1011 /* Emulation of 64-bit sltu rd, rs, rt */
1018 emit(ctx, subu, tmp, hi(rs), hi(rt)); /* tmp = rsh - rth */ in emit_sltu_r64()
1024 /* Emulation of 64-bit slti rd, rs, imm, where imm may be S32_MAX + 1 */
1058 * if (imm < 0) rd = rsh < -1 in emit_slti_r64()
1062 emit(ctx, slti, rd, hi(rs), imm < 0 ? -1 : 0); /* rd = rsh < hi(imm) */ in emit_slti_r64()
1066 /* Emulation of 64-bit(slt rd, rs, rt) */
1091 /* Jump immediate (64-bit) */
1098 /* No-op, used internally for branch optimization */ in emit_jmp_i64()
1105 if (imm >= -0x7fff && imm <= 0x8000) { in emit_jmp_i64()
1106 emit(ctx, addiu, tmp, lo(dst), -imm); in emit_jmp_i64()
1134 if (imm < 0) /* Sign-extension pulls in high word */ in emit_jmp_i64()
1184 /* Jump register (64-bit) */
1186 const u8 dst[], const u8 src[], s32 off, u8 op) in emit_jmp_r64() argument
1192 /* No-op, used internally for branch optimization */ in emit_jmp_r64()
1195 /* PC += off if dst == src */ in emit_jmp_r64()
1196 /* PC += off if dst != src */ in emit_jmp_r64()
1199 emit(ctx, subu, t1, lo(dst), lo(src)); in emit_jmp_r64()
1200 emit(ctx, subu, t2, hi(dst), hi(src)); in emit_jmp_r64()
1207 /* PC += off if dst & src */ in emit_jmp_r64()
1211 emit(ctx, and, t1, lo(dst), lo(src)); in emit_jmp_r64()
1212 emit(ctx, and, t2, hi(dst), hi(src)); in emit_jmp_r64()
1219 /* PC += off if dst > src */ in emit_jmp_r64()
1221 emit_sltu_r64(ctx, t1, src, dst); in emit_jmp_r64()
1224 /* PC += off if dst >= src */ in emit_jmp_r64()
1226 emit_sltu_r64(ctx, t1, dst, src); in emit_jmp_r64()
1229 /* PC += off if dst < src */ in emit_jmp_r64()
1231 emit_sltu_r64(ctx, t1, dst, src); in emit_jmp_r64()
1234 /* PC += off if dst <= src */ in emit_jmp_r64()
1236 emit_sltu_r64(ctx, t1, src, dst); in emit_jmp_r64()
1239 /* PC += off if dst > src (signed) */ in emit_jmp_r64()
1241 emit_slt_r64(ctx, t1, src, dst); in emit_jmp_r64()
1244 /* PC += off if dst >= src (signed) */ in emit_jmp_r64()
1246 emit_slt_r64(ctx, t1, dst, src); in emit_jmp_r64()
1249 /* PC += off if dst < src (signed) */ in emit_jmp_r64()
1251 emit_slt_r64(ctx, t1, dst, src); in emit_jmp_r64()
1254 /* PC += off if dst <= src (signed) */ in emit_jmp_r64()
1256 emit_slt_r64(ctx, t1, src, dst); in emit_jmp_r64()
1269 if (bpf_jit_get_func_addr(ctx->program, insn, false, in emit_call()
1271 return -1; in emit_call()
1273 return -1; in emit_call()
1300 * eBPF R1 - function argument (context ptr), passed in a0-a1 in emit_tail_call()
1301 * eBPF R2 - ptr to object with array of function entry points in emit_tail_call()
1302 * eBPF R3 - array index of function to be called in emit_tail_call()
1303 * stack[sz] - remaining tail call count, initialized in prologue in emit_tail_call()
1306 /* if (ind >= ary->map.max_entries) goto out */ in emit_tail_call()
1309 return -1; in emit_tail_call()
1310 emit(ctx, lw, t1, off, ary); /* t1 = ary->map.max_entries*/ in emit_tail_call()
1315 /* if (TCC-- <= 0) goto out */ in emit_tail_call()
1316 emit(ctx, lw, t2, ctx->stack_size, MIPS_R_SP); /* t2 = *(SP + size) */ in emit_tail_call()
1319 emit(ctx, addiu, t2, t2, -1); /* t2-- (delay slot) */ in emit_tail_call()
1320 emit(ctx, sw, t2, ctx->stack_size, MIPS_R_SP); /* *(SP + size) = t2 */ in emit_tail_call()
1322 /* prog = ary->ptrs[ind] */ in emit_tail_call()
1325 return -1; in emit_tail_call()
1335 /* func = prog->bpf_func + 8 (prologue skip offset) */ in emit_tail_call()
1338 return -1; in emit_tail_call()
1352 * :----------------------------:
1353 * : 64-bit eBPF args r3-r5 :
1354 * :----------------------------:
1356 * +============================+ <--- MIPS sp before call
1357 * | Callee-saved registers, |
1359 * +----------------------------+ <--- eBPF FP (MIPS zero,fp)
1362 * +----------------------------+
1363 * | Reserved for caller-saved |
1365 * +----------------------------+
1366 * | Reserved for 64-bit eBPF |
1367 * | args r3-r5 & args passed |
1369 * Lower address +============================+ <--- MIPS sp
1389 * 16-byte area in the parent's stack frame. On a tail call, the in build_prologue()
1396 * Register eBPF R1 contains the 32-bit context pointer argument. in build_prologue()
1397 * A 32-bit argument is always passed in MIPS register a0, regardless in build_prologue()
1398 * of CPU endianness. Initialize R1 accordingly and zero-extend. in build_prologue()
1404 /* === Entry-point for tail calls === */ in build_prologue()
1406 /* Zero-extend the 32-bit argument */ in build_prologue()
1410 if (ctx->accessed & BIT(BPF_REG_FP)) in build_prologue()
1413 /* Compute the stack space needed for callee-saved registers */ in build_prologue()
1414 saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u32); in build_prologue()
1418 locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); in build_prologue()
1422 * caller-saved registers and function arguments passed on the stack. in build_prologue()
1426 reserved = ctx->stack_used; in build_prologue()
1430 emit(ctx, addiu, MIPS_R_SP, MIPS_R_SP, -stack); in build_prologue()
1432 /* Store callee-saved registers on stack */ in build_prologue()
1433 push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved); in build_prologue()
1436 if (ctx->accessed & BIT(BPF_REG_FP)) in build_prologue()
1437 emit(ctx, addiu, lo(fp), MIPS_R_SP, stack - saved); in build_prologue()
1439 ctx->saved_size = saved; in build_prologue()
1440 ctx->stack_size = stack; in build_prologue()
1446 /* Restore callee-saved registers from stack */ in build_epilogue()
1447 pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, in build_epilogue()
1448 ctx->stack_size - ctx->saved_size); in build_epilogue()
1450 * A 32-bit return value is always passed in MIPS register v0, in build_epilogue()
1451 * but on big-endian targets the low part of R0 is mapped to v1. in build_epilogue()
1459 emit(ctx, addiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); in build_epilogue()
1465 const u8 *dst = bpf2mips32[insn->dst_reg]; in build_insn()
1466 const u8 *src = bpf2mips32[insn->src_reg]; in build_insn() local
1469 u8 code = insn->code; in build_insn()
1470 s16 off = insn->off; in build_insn()
1471 s32 imm = insn->imm; in build_insn()
1482 /* dst = src */ in build_insn()
1488 emit_mov_r(ctx, lo(dst), lo(src)); in build_insn()
1492 /* dst = -dst */ in build_insn()
1504 /* dst = dst - imm */ in build_insn()
1527 /* dst = dst & src */ in build_insn()
1528 /* dst = dst | src */ in build_insn()
1529 /* dst = dst ^ src */ in build_insn()
1530 /* dst = dst << src */ in build_insn()
1531 /* dst = dst >> src */ in build_insn()
1532 /* dst = dst >> src (arithmetic) */ in build_insn()
1533 /* dst = dst + src */ in build_insn()
1534 /* dst = dst - src */ in build_insn()
1535 /* dst = dst * src */ in build_insn()
1536 /* dst = dst / src */ in build_insn()
1537 /* dst = dst % src */ in build_insn()
1549 emit_alu_r(ctx, lo(dst), lo(src), BPF_OP(code)); in build_insn()
1552 /* dst = imm (64-bit) */ in build_insn()
1556 /* dst = src (64-bit) */ in build_insn()
1558 emit_mov_r(ctx, lo(dst), lo(src)); in build_insn()
1559 emit_mov_r(ctx, hi(dst), hi(src)); in build_insn()
1561 /* dst = -dst (64-bit) */ in build_insn()
1565 /* dst = dst & imm (64-bit) */ in build_insn()
1569 /* dst = dst | imm (64-bit) */ in build_insn()
1570 /* dst = dst ^ imm (64-bit) */ in build_insn()
1571 /* dst = dst + imm (64-bit) */ in build_insn()
1572 /* dst = dst - imm (64-bit) */ in build_insn()
1580 /* dst = dst << imm (64-bit) */ in build_insn()
1581 /* dst = dst >> imm (64-bit) */ in build_insn()
1582 /* dst = dst >> imm (64-bit, arithmetic) */ in build_insn()
1589 /* dst = dst * imm (64-bit) */ in build_insn()
1593 /* dst = dst / imm (64-bit) */ in build_insn()
1594 /* dst = dst % imm (64-bit) */ in build_insn()
1598 * Sign-extend the immediate value into a temporary register, in build_insn()
1604 /* dst = dst & src (64-bit) */ in build_insn()
1605 /* dst = dst | src (64-bit) */ in build_insn()
1606 /* dst = dst ^ src (64-bit) */ in build_insn()
1607 /* dst = dst + src (64-bit) */ in build_insn()
1608 /* dst = dst - src (64-bit) */ in build_insn()
1614 emit_alu_r64(ctx, dst, src, BPF_OP(code)); in build_insn()
1616 /* dst = dst << src (64-bit) */ in build_insn()
1617 /* dst = dst >> src (64-bit) */ in build_insn()
1618 /* dst = dst >> src (64-bit, arithmetic) */ in build_insn()
1622 emit_shift_r64(ctx, dst, lo(src), BPF_OP(code)); in build_insn()
1624 /* dst = dst * src (64-bit) */ in build_insn()
1626 emit_mul_r64(ctx, dst, src); in build_insn()
1628 /* dst = dst / src (64-bit) */ in build_insn()
1629 /* dst = dst % src (64-bit) */ in build_insn()
1632 emit_divmod_r64(ctx, dst, src, BPF_OP(code)); in build_insn()
1654 /* LDX: dst = *(size *)(src + off) */ in build_insn()
1659 emit_ldx(ctx, dst, lo(src), off, BPF_SIZE(code)); in build_insn()
1668 /* Sign-extend immediate value into temporary reg */ in build_insn()
1679 /* STX: *(size *)(dst + off) = src */ in build_insn()
1684 emit_stx(ctx, lo(dst), src, off, BPF_SIZE(code)); in build_insn()
1702 emit_atomic_r(ctx, lo(dst), lo(src), off, imm); in build_insn()
1703 else /* Non-ll/sc fallback */ in build_insn()
1704 emit_atomic_r32(ctx, lo(dst), lo(src), in build_insn()
1707 emit_zext_ver(ctx, src); in build_insn()
1711 emit_cmpxchg_r(ctx, lo(dst), lo(src), in build_insn()
1713 else /* Non-ll/sc fallback */ in build_insn()
1714 emit_cmpxchg_r32(ctx, lo(dst), lo(src), off); in build_insn()
1715 /* Result zero-extension inserted by verifier */ in build_insn()
1721 /* Atomics (64-bit) */ in build_insn()
1733 emit_atomic_r64(ctx, lo(dst), src, off, imm); in build_insn()
1736 emit_cmpxchg_r64(ctx, lo(dst), src, off); in build_insn()
1742 /* PC += off if dst == src */ in build_insn()
1743 /* PC += off if dst != src */ in build_insn()
1744 /* PC += off if dst & src */ in build_insn()
1745 /* PC += off if dst > src */ in build_insn()
1746 /* PC += off if dst >= src */ in build_insn()
1747 /* PC += off if dst < src */ in build_insn()
1748 /* PC += off if dst <= src */ in build_insn()
1749 /* PC += off if dst > src (signed) */ in build_insn()
1750 /* PC += off if dst >= src (signed) */ in build_insn()
1751 /* PC += off if dst < src (signed) */ in build_insn()
1752 /* PC += off if dst <= src (signed) */ in build_insn()
1766 setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); in build_insn()
1767 emit_jmp_r(ctx, lo(dst), lo(src), rel, jmp); in build_insn()
1806 /* PC += off if dst == src */ in build_insn()
1807 /* PC += off if dst != src */ in build_insn()
1808 /* PC += off if dst & src */ in build_insn()
1809 /* PC += off if dst > src */ in build_insn()
1810 /* PC += off if dst >= src */ in build_insn()
1811 /* PC += off if dst < src */ in build_insn()
1812 /* PC += off if dst <= src */ in build_insn()
1813 /* PC += off if dst > src (signed) */ in build_insn()
1814 /* PC += off if dst >= src (signed) */ in build_insn()
1815 /* PC += off if dst < src (signed) */ in build_insn()
1816 /* PC += off if dst <= src (signed) */ in build_insn()
1830 setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); in build_insn()
1831 emit_jmp_r64(ctx, dst, src, rel, jmp); in build_insn()
1887 if (ctx->bpf_index == ctx->program->len - 1) in build_insn()
1896 return -EINVAL; in build_insn()
1899 return -EFAULT; in build_insn()
1902 ctx->bpf_index, code); in build_insn()
1903 return -E2BIG; in build_insn()