1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * BPF JIT compiler for LoongArch 4 * 5 * Copyright (C) 2022 Loongson Technology Corporation Limited 6 */ 7 #include <linux/bpf.h> 8 #include <linux/filter.h> 9 #include <asm/cacheflush.h> 10 #include <asm/inst.h> 11 12 struct jit_ctx { 13 const struct bpf_prog *prog; 14 unsigned int idx; 15 unsigned int flags; 16 unsigned int epilogue_offset; 17 u32 *offset; 18 union loongarch_instruction *image; 19 u32 stack_size; 20 }; 21 22 struct jit_data { 23 struct bpf_binary_header *header; 24 u8 *image; 25 struct jit_ctx ctx; 26 }; 27 28 #define emit_insn(ctx, func, ...) \ 29 do { \ 30 if (ctx->image != NULL) { \ 31 union loongarch_instruction *insn = &ctx->image[ctx->idx]; \ 32 emit_##func(insn, ##__VA_ARGS__); \ 33 } \ 34 ctx->idx++; \ 35 } while (0) 36 37 #define is_signed_imm12(val) signed_imm_check(val, 12) 38 #define is_signed_imm14(val) signed_imm_check(val, 14) 39 #define is_signed_imm16(val) signed_imm_check(val, 16) 40 #define is_signed_imm26(val) signed_imm_check(val, 26) 41 #define is_signed_imm32(val) signed_imm_check(val, 32) 42 #define is_signed_imm52(val) signed_imm_check(val, 52) 43 #define is_unsigned_imm12(val) unsigned_imm_check(val, 12) 44 45 static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx) 46 { 47 /* BPF JMP offset is relative to the next instruction */ 48 bpf_insn++; 49 /* 50 * Whereas LoongArch branch instructions encode the offset 51 * from the branch itself, so we must subtract 1 from the 52 * instruction offset. 53 */ 54 return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1)); 55 } 56 57 static inline int epilogue_offset(const struct jit_ctx *ctx) 58 { 59 int from = ctx->idx; 60 int to = ctx->epilogue_offset; 61 62 return (to - from); 63 } 64 65 /* Zero-extend 32 bits into 64 bits */ 66 static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32) 67 { 68 if (!is32) 69 return; 70 71 emit_insn(ctx, lu32id, reg, 0); 72 } 73 74 /* Signed-extend 32 bits into 64 bits */ 75 static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32) 76 { 77 if (!is32) 78 return; 79 80 emit_insn(ctx, addiw, reg, reg, 0); 81 } 82 83 static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32) 84 { 85 long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31; 86 87 /* or rd, $zero, $zero */ 88 if (imm == 0) { 89 emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO); 90 return; 91 } 92 93 /* addiw rd, $zero, imm_11_0 */ 94 if (is_signed_imm12(imm)) { 95 emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm); 96 goto zext; 97 } 98 99 /* ori rd, $zero, imm_11_0 */ 100 if (is_unsigned_imm12(imm)) { 101 emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm); 102 goto zext; 103 } 104 105 /* lu52id rd, $zero, imm_63_52 */ 106 imm_63_52 = (imm >> 52) & 0xfff; 107 imm_51_0 = imm & 0xfffffffffffff; 108 if (imm_63_52 != 0 && imm_51_0 == 0) { 109 emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52); 110 return; 111 } 112 113 /* lu12iw rd, imm_31_12 */ 114 imm_31_12 = (imm >> 12) & 0xfffff; 115 emit_insn(ctx, lu12iw, rd, imm_31_12); 116 117 /* ori rd, rd, imm_11_0 */ 118 imm_11_0 = imm & 0xfff; 119 if (imm_11_0 != 0) 120 emit_insn(ctx, ori, rd, rd, imm_11_0); 121 122 if (!is_signed_imm32(imm)) { 123 if (imm_51_0 != 0) { 124 /* 125 * If bit[51:31] is all 0 or all 1, 126 * it means bit[51:32] is sign extended by lu12iw, 127 * no need to call lu32id to do a new filled operation. 128 */ 129 imm_51_31 = (imm >> 31) & 0x1fffff; 130 if (imm_51_31 != 0 || imm_51_31 != 0x1fffff) { 131 /* lu32id rd, imm_51_32 */ 132 imm_51_32 = (imm >> 32) & 0xfffff; 133 emit_insn(ctx, lu32id, rd, imm_51_32); 134 } 135 } 136 137 /* lu52id rd, rd, imm_63_52 */ 138 if (!is_signed_imm52(imm)) 139 emit_insn(ctx, lu52id, rd, rd, imm_63_52); 140 } 141 142 zext: 143 emit_zext_32(ctx, rd, is32); 144 } 145 146 static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd, 147 enum loongarch_gpr rj) 148 { 149 emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO); 150 } 151 152 static inline int invert_jmp_cond(u8 cond) 153 { 154 switch (cond) { 155 case BPF_JEQ: 156 return BPF_JNE; 157 case BPF_JNE: 158 case BPF_JSET: 159 return BPF_JEQ; 160 case BPF_JGT: 161 return BPF_JLE; 162 case BPF_JGE: 163 return BPF_JLT; 164 case BPF_JLT: 165 return BPF_JGE; 166 case BPF_JLE: 167 return BPF_JGT; 168 case BPF_JSGT: 169 return BPF_JSLE; 170 case BPF_JSGE: 171 return BPF_JSLT; 172 case BPF_JSLT: 173 return BPF_JSGE; 174 case BPF_JSLE: 175 return BPF_JSGT; 176 } 177 return -1; 178 } 179 180 static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, 181 enum loongarch_gpr rd, int jmp_offset) 182 { 183 switch (cond) { 184 case BPF_JEQ: 185 /* PC += jmp_offset if rj == rd */ 186 emit_insn(ctx, beq, rj, rd, jmp_offset); 187 return; 188 case BPF_JNE: 189 case BPF_JSET: 190 /* PC += jmp_offset if rj != rd */ 191 emit_insn(ctx, bne, rj, rd, jmp_offset); 192 return; 193 case BPF_JGT: 194 /* PC += jmp_offset if rj > rd (unsigned) */ 195 emit_insn(ctx, bltu, rd, rj, jmp_offset); 196 return; 197 case BPF_JLT: 198 /* PC += jmp_offset if rj < rd (unsigned) */ 199 emit_insn(ctx, bltu, rj, rd, jmp_offset); 200 return; 201 case BPF_JGE: 202 /* PC += jmp_offset if rj >= rd (unsigned) */ 203 emit_insn(ctx, bgeu, rj, rd, jmp_offset); 204 return; 205 case BPF_JLE: 206 /* PC += jmp_offset if rj <= rd (unsigned) */ 207 emit_insn(ctx, bgeu, rd, rj, jmp_offset); 208 return; 209 case BPF_JSGT: 210 /* PC += jmp_offset if rj > rd (signed) */ 211 emit_insn(ctx, blt, rd, rj, jmp_offset); 212 return; 213 case BPF_JSLT: 214 /* PC += jmp_offset if rj < rd (signed) */ 215 emit_insn(ctx, blt, rj, rd, jmp_offset); 216 return; 217 case BPF_JSGE: 218 /* PC += jmp_offset if rj >= rd (signed) */ 219 emit_insn(ctx, bge, rj, rd, jmp_offset); 220 return; 221 case BPF_JSLE: 222 /* PC += jmp_offset if rj <= rd (signed) */ 223 emit_insn(ctx, bge, rd, rj, jmp_offset); 224 return; 225 } 226 } 227 228 static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, 229 enum loongarch_gpr rd, int jmp_offset) 230 { 231 cond = invert_jmp_cond(cond); 232 cond_jmp_offset(ctx, cond, rj, rd, 2); 233 emit_insn(ctx, b, jmp_offset); 234 } 235 236 static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset) 237 { 238 emit_insn(ctx, b, jmp_offset); 239 } 240 241 static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, 242 enum loongarch_gpr rd, int jmp_offset) 243 { 244 /* 245 * A large PC-relative jump offset may overflow the immediate field of 246 * the native conditional branch instruction, triggering a conversion 247 * to use an absolute jump instead, this jump sequence is particularly 248 * nasty. For now, use cond_jmp_offs26() directly to keep it simple. 249 * In the future, maybe we can add support for far branching, the branch 250 * relaxation requires more than two passes to converge, the code seems 251 * too complex to understand, not quite sure whether it is necessary and 252 * worth the extra pain. Anyway, just leave it as it is to enhance code 253 * readability now. 254 */ 255 if (is_signed_imm26(jmp_offset)) { 256 cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset); 257 return 0; 258 } 259 260 return -EINVAL; 261 } 262 263 static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset) 264 { 265 if (is_signed_imm26(jmp_offset)) { 266 uncond_jmp_offs26(ctx, jmp_offset); 267 return 0; 268 } 269 270 return -EINVAL; 271 } 272 273 static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, 274 enum loongarch_gpr rd, int jmp_offset) 275 { 276 if (is_signed_imm16(jmp_offset)) { 277 cond_jmp_offset(ctx, cond, rj, rd, jmp_offset); 278 return 0; 279 } 280 281 return -EINVAL; 282 } 283