1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * BPF JIT compiler for LoongArch 4 * 5 * Copyright (C) 2022 Loongson Technology Corporation Limited 6 */ 7 #include <linux/bitfield.h> 8 #include <linux/bpf.h> 9 #include <linux/filter.h> 10 #include <asm/cacheflush.h> 11 #include <asm/inst.h> 12 13 struct jit_ctx { 14 const struct bpf_prog *prog; 15 unsigned int idx; 16 unsigned int flags; 17 unsigned int epilogue_offset; 18 u32 *offset; 19 int num_exentries; 20 union loongarch_instruction *image; 21 u32 stack_size; 22 }; 23 24 struct jit_data { 25 struct bpf_binary_header *header; 26 u8 *image; 27 struct jit_ctx ctx; 28 }; 29 30 #define emit_insn(ctx, func, ...) \ 31 do { \ 32 if (ctx->image != NULL) { \ 33 union loongarch_instruction *insn = &ctx->image[ctx->idx]; \ 34 emit_##func(insn, ##__VA_ARGS__); \ 35 } \ 36 ctx->idx++; \ 37 } while (0) 38 39 #define is_signed_imm12(val) signed_imm_check(val, 12) 40 #define is_signed_imm14(val) signed_imm_check(val, 14) 41 #define is_signed_imm16(val) signed_imm_check(val, 16) 42 #define is_signed_imm26(val) signed_imm_check(val, 26) 43 #define is_signed_imm32(val) signed_imm_check(val, 32) 44 #define is_signed_imm52(val) signed_imm_check(val, 52) 45 #define is_unsigned_imm12(val) unsigned_imm_check(val, 12) 46 47 static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx) 48 { 49 /* BPF JMP offset is relative to the next instruction */ 50 bpf_insn++; 51 /* 52 * Whereas LoongArch branch instructions encode the offset 53 * from the branch itself, so we must subtract 1 from the 54 * instruction offset. 55 */ 56 return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1)); 57 } 58 59 static inline int epilogue_offset(const struct jit_ctx *ctx) 60 { 61 int from = ctx->idx; 62 int to = ctx->epilogue_offset; 63 64 return (to - from); 65 } 66 67 /* Zero-extend 32 bits into 64 bits */ 68 static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32) 69 { 70 if (!is32) 71 return; 72 73 emit_insn(ctx, lu32id, reg, 0); 74 } 75 76 /* Signed-extend 32 bits into 64 bits */ 77 static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32) 78 { 79 if (!is32) 80 return; 81 82 emit_insn(ctx, addiw, reg, reg, 0); 83 } 84 85 static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32) 86 { 87 long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31; 88 89 /* or rd, $zero, $zero */ 90 if (imm == 0) { 91 emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO); 92 return; 93 } 94 95 /* addiw rd, $zero, imm_11_0 */ 96 if (is_signed_imm12(imm)) { 97 emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm); 98 goto zext; 99 } 100 101 /* ori rd, $zero, imm_11_0 */ 102 if (is_unsigned_imm12(imm)) { 103 emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm); 104 goto zext; 105 } 106 107 /* lu52id rd, $zero, imm_63_52 */ 108 imm_63_52 = (imm >> 52) & 0xfff; 109 imm_51_0 = imm & 0xfffffffffffff; 110 if (imm_63_52 != 0 && imm_51_0 == 0) { 111 emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52); 112 return; 113 } 114 115 /* lu12iw rd, imm_31_12 */ 116 imm_31_12 = (imm >> 12) & 0xfffff; 117 emit_insn(ctx, lu12iw, rd, imm_31_12); 118 119 /* ori rd, rd, imm_11_0 */ 120 imm_11_0 = imm & 0xfff; 121 if (imm_11_0 != 0) 122 emit_insn(ctx, ori, rd, rd, imm_11_0); 123 124 if (!is_signed_imm32(imm)) { 125 if (imm_51_0 != 0) { 126 /* 127 * If bit[51:31] is all 0 or all 1, 128 * it means bit[51:32] is sign extended by lu12iw, 129 * no need to call lu32id to do a new filled operation. 130 */ 131 imm_51_31 = (imm >> 31) & 0x1fffff; 132 if (imm_51_31 != 0 || imm_51_31 != 0x1fffff) { 133 /* lu32id rd, imm_51_32 */ 134 imm_51_32 = (imm >> 32) & 0xfffff; 135 emit_insn(ctx, lu32id, rd, imm_51_32); 136 } 137 } 138 139 /* lu52id rd, rd, imm_63_52 */ 140 if (!is_signed_imm52(imm)) 141 emit_insn(ctx, lu52id, rd, rd, imm_63_52); 142 } 143 144 zext: 145 emit_zext_32(ctx, rd, is32); 146 } 147 148 static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd, 149 enum loongarch_gpr rj) 150 { 151 emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO); 152 } 153 154 static inline int invert_jmp_cond(u8 cond) 155 { 156 switch (cond) { 157 case BPF_JEQ: 158 return BPF_JNE; 159 case BPF_JNE: 160 case BPF_JSET: 161 return BPF_JEQ; 162 case BPF_JGT: 163 return BPF_JLE; 164 case BPF_JGE: 165 return BPF_JLT; 166 case BPF_JLT: 167 return BPF_JGE; 168 case BPF_JLE: 169 return BPF_JGT; 170 case BPF_JSGT: 171 return BPF_JSLE; 172 case BPF_JSGE: 173 return BPF_JSLT; 174 case BPF_JSLT: 175 return BPF_JSGE; 176 case BPF_JSLE: 177 return BPF_JSGT; 178 } 179 return -1; 180 } 181 182 static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, 183 enum loongarch_gpr rd, int jmp_offset) 184 { 185 switch (cond) { 186 case BPF_JEQ: 187 /* PC += jmp_offset if rj == rd */ 188 emit_insn(ctx, beq, rj, rd, jmp_offset); 189 return; 190 case BPF_JNE: 191 case BPF_JSET: 192 /* PC += jmp_offset if rj != rd */ 193 emit_insn(ctx, bne, rj, rd, jmp_offset); 194 return; 195 case BPF_JGT: 196 /* PC += jmp_offset if rj > rd (unsigned) */ 197 emit_insn(ctx, bltu, rd, rj, jmp_offset); 198 return; 199 case BPF_JLT: 200 /* PC += jmp_offset if rj < rd (unsigned) */ 201 emit_insn(ctx, bltu, rj, rd, jmp_offset); 202 return; 203 case BPF_JGE: 204 /* PC += jmp_offset if rj >= rd (unsigned) */ 205 emit_insn(ctx, bgeu, rj, rd, jmp_offset); 206 return; 207 case BPF_JLE: 208 /* PC += jmp_offset if rj <= rd (unsigned) */ 209 emit_insn(ctx, bgeu, rd, rj, jmp_offset); 210 return; 211 case BPF_JSGT: 212 /* PC += jmp_offset if rj > rd (signed) */ 213 emit_insn(ctx, blt, rd, rj, jmp_offset); 214 return; 215 case BPF_JSLT: 216 /* PC += jmp_offset if rj < rd (signed) */ 217 emit_insn(ctx, blt, rj, rd, jmp_offset); 218 return; 219 case BPF_JSGE: 220 /* PC += jmp_offset if rj >= rd (signed) */ 221 emit_insn(ctx, bge, rj, rd, jmp_offset); 222 return; 223 case BPF_JSLE: 224 /* PC += jmp_offset if rj <= rd (signed) */ 225 emit_insn(ctx, bge, rd, rj, jmp_offset); 226 return; 227 } 228 } 229 230 static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, 231 enum loongarch_gpr rd, int jmp_offset) 232 { 233 cond = invert_jmp_cond(cond); 234 cond_jmp_offset(ctx, cond, rj, rd, 2); 235 emit_insn(ctx, b, jmp_offset); 236 } 237 238 static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset) 239 { 240 emit_insn(ctx, b, jmp_offset); 241 } 242 243 static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, 244 enum loongarch_gpr rd, int jmp_offset) 245 { 246 /* 247 * A large PC-relative jump offset may overflow the immediate field of 248 * the native conditional branch instruction, triggering a conversion 249 * to use an absolute jump instead, this jump sequence is particularly 250 * nasty. For now, use cond_jmp_offs26() directly to keep it simple. 251 * In the future, maybe we can add support for far branching, the branch 252 * relaxation requires more than two passes to converge, the code seems 253 * too complex to understand, not quite sure whether it is necessary and 254 * worth the extra pain. Anyway, just leave it as it is to enhance code 255 * readability now. 256 */ 257 if (is_signed_imm26(jmp_offset)) { 258 cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset); 259 return 0; 260 } 261 262 return -EINVAL; 263 } 264 265 static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset) 266 { 267 if (is_signed_imm26(jmp_offset)) { 268 uncond_jmp_offs26(ctx, jmp_offset); 269 return 0; 270 } 271 272 return -EINVAL; 273 } 274 275 static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, 276 enum loongarch_gpr rd, int jmp_offset) 277 { 278 if (is_signed_imm16(jmp_offset)) { 279 cond_jmp_offset(ctx, cond, rj, rd, jmp_offset); 280 return 0; 281 } 282 283 return -EINVAL; 284 } 285