xref: /linux/arch/loongarch/net/bpf_jit.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * BPF JIT compiler for LoongArch
4  *
5  * Copyright (C) 2022 Loongson Technology Corporation Limited
6  */
7 #include <linux/bitfield.h>
8 #include <linux/bpf.h>
9 #include <linux/filter.h>
10 #include <asm/cacheflush.h>
11 #include <asm/inst.h>
12 
13 struct jit_ctx {
14 	const struct bpf_prog *prog;
15 	unsigned int idx;
16 	unsigned int flags;
17 	unsigned int epilogue_offset;
18 	u32 *offset;
19 	int num_exentries;
20 	union loongarch_instruction *image;
21 	u32 stack_size;
22 };
23 
24 struct jit_data {
25 	struct bpf_binary_header *header;
26 	u8 *image;
27 	struct jit_ctx ctx;
28 };
29 
30 #define emit_insn(ctx, func, ...)						\
31 do {										\
32 	if (ctx->image != NULL) {						\
33 		union loongarch_instruction *insn = &ctx->image[ctx->idx];	\
34 		emit_##func(insn, ##__VA_ARGS__);				\
35 	}									\
36 	ctx->idx++;								\
37 } while (0)
38 
39 #define is_signed_imm12(val)	signed_imm_check(val, 12)
40 #define is_signed_imm14(val)	signed_imm_check(val, 14)
41 #define is_signed_imm16(val)	signed_imm_check(val, 16)
42 #define is_signed_imm26(val)	signed_imm_check(val, 26)
43 #define is_signed_imm32(val)	signed_imm_check(val, 32)
44 #define is_signed_imm52(val)	signed_imm_check(val, 52)
45 #define is_unsigned_imm12(val)	unsigned_imm_check(val, 12)
46 
47 static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx)
48 {
49 	/* BPF JMP offset is relative to the next instruction */
50 	bpf_insn++;
51 	/*
52 	 * Whereas LoongArch branch instructions encode the offset
53 	 * from the branch itself, so we must subtract 1 from the
54 	 * instruction offset.
55 	 */
56 	return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1));
57 }
58 
59 static inline int epilogue_offset(const struct jit_ctx *ctx)
60 {
61 	int from = ctx->idx;
62 	int to = ctx->epilogue_offset;
63 
64 	return (to - from);
65 }
66 
67 /* Zero-extend 32 bits into 64 bits */
68 static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
69 {
70 	if (!is32)
71 		return;
72 
73 	emit_insn(ctx, lu32id, reg, 0);
74 }
75 
76 /* Signed-extend 32 bits into 64 bits */
77 static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
78 {
79 	if (!is32)
80 		return;
81 
82 	emit_insn(ctx, addiw, reg, reg, 0);
83 }
84 
85 static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
86 {
87 	u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
88 
89 	/* lu12iw rd, imm_31_12 */
90 	imm_31_12 = (addr >> 12) & 0xfffff;
91 	emit_insn(ctx, lu12iw, rd, imm_31_12);
92 
93 	/* ori rd, rd, imm_11_0 */
94 	imm_11_0 = addr & 0xfff;
95 	emit_insn(ctx, ori, rd, rd, imm_11_0);
96 
97 	/* lu32id rd, imm_51_32 */
98 	imm_51_32 = (addr >> 32) & 0xfffff;
99 	emit_insn(ctx, lu32id, rd, imm_51_32);
100 
101 	/* lu52id rd, rd, imm_63_52 */
102 	imm_63_52 = (addr >> 52) & 0xfff;
103 	emit_insn(ctx, lu52id, rd, rd, imm_63_52);
104 }
105 
106 static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
107 {
108 	long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;
109 
110 	/* or rd, $zero, $zero */
111 	if (imm == 0) {
112 		emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO);
113 		return;
114 	}
115 
116 	/* addiw rd, $zero, imm_11_0 */
117 	if (is_signed_imm12(imm)) {
118 		emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm);
119 		goto zext;
120 	}
121 
122 	/* ori rd, $zero, imm_11_0 */
123 	if (is_unsigned_imm12(imm)) {
124 		emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm);
125 		goto zext;
126 	}
127 
128 	/* lu52id rd, $zero, imm_63_52 */
129 	imm_63_52 = (imm >> 52) & 0xfff;
130 	imm_51_0 = imm & 0xfffffffffffff;
131 	if (imm_63_52 != 0 && imm_51_0 == 0) {
132 		emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52);
133 		return;
134 	}
135 
136 	/* lu12iw rd, imm_31_12 */
137 	imm_31_12 = (imm >> 12) & 0xfffff;
138 	emit_insn(ctx, lu12iw, rd, imm_31_12);
139 
140 	/* ori rd, rd, imm_11_0 */
141 	imm_11_0 = imm & 0xfff;
142 	if (imm_11_0 != 0)
143 		emit_insn(ctx, ori, rd, rd, imm_11_0);
144 
145 	if (!is_signed_imm32(imm)) {
146 		if (imm_51_0 != 0) {
147 			/*
148 			 * If bit[51:31] is all 0 or all 1,
149 			 * it means bit[51:32] is sign extended by lu12iw,
150 			 * no need to call lu32id to do a new filled operation.
151 			 */
152 			imm_51_31 = (imm >> 31) & 0x1fffff;
153 			if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) {
154 				/* lu32id rd, imm_51_32 */
155 				imm_51_32 = (imm >> 32) & 0xfffff;
156 				emit_insn(ctx, lu32id, rd, imm_51_32);
157 			}
158 		}
159 
160 		/* lu52id rd, rd, imm_63_52 */
161 		if (!is_signed_imm52(imm))
162 			emit_insn(ctx, lu52id, rd, rd, imm_63_52);
163 	}
164 
165 zext:
166 	emit_zext_32(ctx, rd, is32);
167 }
168 
169 static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd,
170 			    enum loongarch_gpr rj)
171 {
172 	emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO);
173 }
174 
175 static inline int invert_jmp_cond(u8 cond)
176 {
177 	switch (cond) {
178 	case BPF_JEQ:
179 		return BPF_JNE;
180 	case BPF_JNE:
181 	case BPF_JSET:
182 		return BPF_JEQ;
183 	case BPF_JGT:
184 		return BPF_JLE;
185 	case BPF_JGE:
186 		return BPF_JLT;
187 	case BPF_JLT:
188 		return BPF_JGE;
189 	case BPF_JLE:
190 		return BPF_JGT;
191 	case BPF_JSGT:
192 		return BPF_JSLE;
193 	case BPF_JSGE:
194 		return BPF_JSLT;
195 	case BPF_JSLT:
196 		return BPF_JSGE;
197 	case BPF_JSLE:
198 		return BPF_JSGT;
199 	}
200 	return -1;
201 }
202 
203 static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
204 				   enum loongarch_gpr rd, int jmp_offset)
205 {
206 	switch (cond) {
207 	case BPF_JEQ:
208 		/* PC += jmp_offset if rj == rd */
209 		emit_insn(ctx, beq, rj, rd, jmp_offset);
210 		return;
211 	case BPF_JNE:
212 	case BPF_JSET:
213 		/* PC += jmp_offset if rj != rd */
214 		emit_insn(ctx, bne, rj, rd, jmp_offset);
215 		return;
216 	case BPF_JGT:
217 		/* PC += jmp_offset if rj > rd (unsigned) */
218 		emit_insn(ctx, bltu, rd, rj, jmp_offset);
219 		return;
220 	case BPF_JLT:
221 		/* PC += jmp_offset if rj < rd (unsigned) */
222 		emit_insn(ctx, bltu, rj, rd, jmp_offset);
223 		return;
224 	case BPF_JGE:
225 		/* PC += jmp_offset if rj >= rd (unsigned) */
226 		emit_insn(ctx, bgeu, rj, rd, jmp_offset);
227 		return;
228 	case BPF_JLE:
229 		/* PC += jmp_offset if rj <= rd (unsigned) */
230 		emit_insn(ctx, bgeu, rd, rj, jmp_offset);
231 		return;
232 	case BPF_JSGT:
233 		/* PC += jmp_offset if rj > rd (signed) */
234 		emit_insn(ctx, blt, rd, rj, jmp_offset);
235 		return;
236 	case BPF_JSLT:
237 		/* PC += jmp_offset if rj < rd (signed) */
238 		emit_insn(ctx, blt, rj, rd, jmp_offset);
239 		return;
240 	case BPF_JSGE:
241 		/* PC += jmp_offset if rj >= rd (signed) */
242 		emit_insn(ctx, bge, rj, rd, jmp_offset);
243 		return;
244 	case BPF_JSLE:
245 		/* PC += jmp_offset if rj <= rd (signed) */
246 		emit_insn(ctx, bge, rd, rj, jmp_offset);
247 		return;
248 	}
249 }
250 
251 static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
252 				   enum loongarch_gpr rd, int jmp_offset)
253 {
254 	cond = invert_jmp_cond(cond);
255 	cond_jmp_offset(ctx, cond, rj, rd, 2);
256 	emit_insn(ctx, b, jmp_offset);
257 }
258 
259 static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset)
260 {
261 	emit_insn(ctx, b, jmp_offset);
262 }
263 
264 static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
265 				enum loongarch_gpr rd, int jmp_offset)
266 {
267 	/*
268 	 * A large PC-relative jump offset may overflow the immediate field of
269 	 * the native conditional branch instruction, triggering a conversion
270 	 * to use an absolute jump instead, this jump sequence is particularly
271 	 * nasty. For now, use cond_jmp_offs26() directly to keep it simple.
272 	 * In the future, maybe we can add support for far branching, the branch
273 	 * relaxation requires more than two passes to converge, the code seems
274 	 * too complex to understand, not quite sure whether it is necessary and
275 	 * worth the extra pain. Anyway, just leave it as it is to enhance code
276 	 * readability now.
277 	 */
278 	if (is_signed_imm26(jmp_offset)) {
279 		cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset);
280 		return 0;
281 	}
282 
283 	return -EINVAL;
284 }
285 
286 static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset)
287 {
288 	if (is_signed_imm26(jmp_offset)) {
289 		uncond_jmp_offs26(ctx, jmp_offset);
290 		return 0;
291 	}
292 
293 	return -EINVAL;
294 }
295 
296 static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
297 				    enum loongarch_gpr rd, int jmp_offset)
298 {
299 	if (is_signed_imm16(jmp_offset)) {
300 		cond_jmp_offset(ctx, cond, rj, rd, jmp_offset);
301 		return 0;
302 	}
303 
304 	return -EINVAL;
305 }
306