xref: /linux/arch/loongarch/net/bpf_jit.h (revision 3f5a238f24d7b75f9efe324d3539ad388f58536e)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * BPF JIT compiler for LoongArch
4  *
5  * Copyright (C) 2022 Loongson Technology Corporation Limited
6  */
7 #include <linux/bitfield.h>
8 #include <linux/bpf.h>
9 #include <linux/filter.h>
10 #include <asm/cacheflush.h>
11 #include <asm/inst.h>
12 
13 struct jit_ctx {
14 	const struct bpf_prog *prog;
15 	unsigned int idx;
16 	unsigned int flags;
17 	unsigned int epilogue_offset;
18 	u32 *offset;
19 	int num_exentries;
20 	union loongarch_instruction *image;
21 	union loongarch_instruction *ro_image;
22 	u32 stack_size;
23 };
24 
25 struct jit_data {
26 	struct bpf_binary_header *header;
27 	u8 *image;
28 	struct jit_ctx ctx;
29 };
30 
31 static inline void emit_nop(union loongarch_instruction *insn)
32 {
33 	insn->word = INSN_NOP;
34 }
35 
36 #define emit_insn(ctx, func, ...)						\
37 do {										\
38 	if (ctx->image != NULL) {						\
39 		union loongarch_instruction *insn = &ctx->image[ctx->idx];	\
40 		emit_##func(insn, ##__VA_ARGS__);				\
41 	}									\
42 	ctx->idx++;								\
43 } while (0)
44 
45 #define is_signed_imm12(val)	signed_imm_check(val, 12)
46 #define is_signed_imm14(val)	signed_imm_check(val, 14)
47 #define is_signed_imm16(val)	signed_imm_check(val, 16)
48 #define is_signed_imm26(val)	signed_imm_check(val, 26)
49 #define is_signed_imm32(val)	signed_imm_check(val, 32)
50 #define is_signed_imm52(val)	signed_imm_check(val, 52)
51 #define is_unsigned_imm12(val)	unsigned_imm_check(val, 12)
52 
53 static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx)
54 {
55 	/* BPF JMP offset is relative to the next instruction */
56 	bpf_insn++;
57 	/*
58 	 * Whereas LoongArch branch instructions encode the offset
59 	 * from the branch itself, so we must subtract 1 from the
60 	 * instruction offset.
61 	 */
62 	return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1));
63 }
64 
65 static inline int epilogue_offset(const struct jit_ctx *ctx)
66 {
67 	int from = ctx->idx;
68 	int to = ctx->epilogue_offset;
69 
70 	return (to - from);
71 }
72 
73 /* Zero-extend 32 bits into 64 bits */
74 static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
75 {
76 	if (!is32)
77 		return;
78 
79 	emit_insn(ctx, lu32id, reg, 0);
80 }
81 
82 /* Signed-extend 32 bits into 64 bits */
83 static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
84 {
85 	if (!is32)
86 		return;
87 
88 	emit_insn(ctx, addiw, reg, reg, 0);
89 }
90 
91 /* Emit proper extension according to ABI requirements.
92  * Note that it requires a value of size `size` already resides in register `reg`.
93  */
94 static inline void emit_abi_ext(struct jit_ctx *ctx, int reg, u8 size, bool sign)
95 {
96 	/* ABI requires unsigned char/short to be zero-extended */
97 	if (!sign && (size == 1 || size == 2))
98 		return;
99 
100 	switch (size) {
101 	case 1:
102 		emit_insn(ctx, extwb, reg, reg);
103 		break;
104 	case 2:
105 		emit_insn(ctx, extwh, reg, reg);
106 		break;
107 	case 4:
108 		emit_insn(ctx, addiw, reg, reg, 0);
109 		break;
110 	case 8:
111 		break;
112 	default:
113 		pr_warn("bpf_jit: invalid size %d for extension\n", size);
114 	}
115 }
116 
117 static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
118 {
119 	u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
120 
121 	/* lu12iw rd, imm_31_12 */
122 	imm_31_12 = (addr >> 12) & 0xfffff;
123 	emit_insn(ctx, lu12iw, rd, imm_31_12);
124 
125 	/* ori rd, rd, imm_11_0 */
126 	imm_11_0 = addr & 0xfff;
127 	emit_insn(ctx, ori, rd, rd, imm_11_0);
128 
129 	/* lu32id rd, imm_51_32 */
130 	imm_51_32 = (addr >> 32) & 0xfffff;
131 	emit_insn(ctx, lu32id, rd, imm_51_32);
132 
133 	/* lu52id rd, rd, imm_63_52 */
134 	imm_63_52 = (addr >> 52) & 0xfff;
135 	emit_insn(ctx, lu52id, rd, rd, imm_63_52);
136 }
137 
138 static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
139 {
140 	long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;
141 
142 	/* or rd, $zero, $zero */
143 	if (imm == 0) {
144 		emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO);
145 		return;
146 	}
147 
148 	/* addiw rd, $zero, imm_11_0 */
149 	if (is_signed_imm12(imm)) {
150 		emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm);
151 		goto zext;
152 	}
153 
154 	/* ori rd, $zero, imm_11_0 */
155 	if (is_unsigned_imm12(imm)) {
156 		emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm);
157 		goto zext;
158 	}
159 
160 	/* lu52id rd, $zero, imm_63_52 */
161 	imm_63_52 = (imm >> 52) & 0xfff;
162 	imm_51_0 = imm & 0xfffffffffffff;
163 	if (imm_63_52 != 0 && imm_51_0 == 0) {
164 		emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52);
165 		return;
166 	}
167 
168 	/* lu12iw rd, imm_31_12 */
169 	imm_31_12 = (imm >> 12) & 0xfffff;
170 	emit_insn(ctx, lu12iw, rd, imm_31_12);
171 
172 	/* ori rd, rd, imm_11_0 */
173 	imm_11_0 = imm & 0xfff;
174 	if (imm_11_0 != 0)
175 		emit_insn(ctx, ori, rd, rd, imm_11_0);
176 
177 	if (!is_signed_imm32(imm)) {
178 		if (imm_51_0 != 0) {
179 			/*
180 			 * If bit[51:31] is all 0 or all 1,
181 			 * it means bit[51:32] is sign extended by lu12iw,
182 			 * no need to call lu32id to do a new filled operation.
183 			 */
184 			imm_51_31 = (imm >> 31) & 0x1fffff;
185 			if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) {
186 				/* lu32id rd, imm_51_32 */
187 				imm_51_32 = (imm >> 32) & 0xfffff;
188 				emit_insn(ctx, lu32id, rd, imm_51_32);
189 			}
190 		}
191 
192 		/* lu52id rd, rd, imm_63_52 */
193 		if (!is_signed_imm52(imm))
194 			emit_insn(ctx, lu52id, rd, rd, imm_63_52);
195 	}
196 
197 zext:
198 	emit_zext_32(ctx, rd, is32);
199 }
200 
201 static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd,
202 			    enum loongarch_gpr rj)
203 {
204 	emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO);
205 }
206 
207 static inline int invert_jmp_cond(u8 cond)
208 {
209 	switch (cond) {
210 	case BPF_JEQ:
211 		return BPF_JNE;
212 	case BPF_JNE:
213 	case BPF_JSET:
214 		return BPF_JEQ;
215 	case BPF_JGT:
216 		return BPF_JLE;
217 	case BPF_JGE:
218 		return BPF_JLT;
219 	case BPF_JLT:
220 		return BPF_JGE;
221 	case BPF_JLE:
222 		return BPF_JGT;
223 	case BPF_JSGT:
224 		return BPF_JSLE;
225 	case BPF_JSGE:
226 		return BPF_JSLT;
227 	case BPF_JSLT:
228 		return BPF_JSGE;
229 	case BPF_JSLE:
230 		return BPF_JSGT;
231 	}
232 	return -1;
233 }
234 
235 static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
236 				   enum loongarch_gpr rd, int jmp_offset)
237 {
238 	switch (cond) {
239 	case BPF_JEQ:
240 		/* PC += jmp_offset if rj == rd */
241 		emit_insn(ctx, beq, rj, rd, jmp_offset);
242 		return;
243 	case BPF_JNE:
244 	case BPF_JSET:
245 		/* PC += jmp_offset if rj != rd */
246 		emit_insn(ctx, bne, rj, rd, jmp_offset);
247 		return;
248 	case BPF_JGT:
249 		/* PC += jmp_offset if rj > rd (unsigned) */
250 		emit_insn(ctx, bltu, rd, rj, jmp_offset);
251 		return;
252 	case BPF_JLT:
253 		/* PC += jmp_offset if rj < rd (unsigned) */
254 		emit_insn(ctx, bltu, rj, rd, jmp_offset);
255 		return;
256 	case BPF_JGE:
257 		/* PC += jmp_offset if rj >= rd (unsigned) */
258 		emit_insn(ctx, bgeu, rj, rd, jmp_offset);
259 		return;
260 	case BPF_JLE:
261 		/* PC += jmp_offset if rj <= rd (unsigned) */
262 		emit_insn(ctx, bgeu, rd, rj, jmp_offset);
263 		return;
264 	case BPF_JSGT:
265 		/* PC += jmp_offset if rj > rd (signed) */
266 		emit_insn(ctx, blt, rd, rj, jmp_offset);
267 		return;
268 	case BPF_JSLT:
269 		/* PC += jmp_offset if rj < rd (signed) */
270 		emit_insn(ctx, blt, rj, rd, jmp_offset);
271 		return;
272 	case BPF_JSGE:
273 		/* PC += jmp_offset if rj >= rd (signed) */
274 		emit_insn(ctx, bge, rj, rd, jmp_offset);
275 		return;
276 	case BPF_JSLE:
277 		/* PC += jmp_offset if rj <= rd (signed) */
278 		emit_insn(ctx, bge, rd, rj, jmp_offset);
279 		return;
280 	}
281 }
282 
283 static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
284 				   enum loongarch_gpr rd, int jmp_offset)
285 {
286 	cond = invert_jmp_cond(cond);
287 	cond_jmp_offset(ctx, cond, rj, rd, 2);
288 	emit_insn(ctx, b, jmp_offset);
289 }
290 
291 static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset)
292 {
293 	emit_insn(ctx, b, jmp_offset);
294 }
295 
296 static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
297 				enum loongarch_gpr rd, int jmp_offset)
298 {
299 	/*
300 	 * A large PC-relative jump offset may overflow the immediate field of
301 	 * the native conditional branch instruction, triggering a conversion
302 	 * to use an absolute jump instead, this jump sequence is particularly
303 	 * nasty. For now, use cond_jmp_offs26() directly to keep it simple.
304 	 * In the future, maybe we can add support for far branching, the branch
305 	 * relaxation requires more than two passes to converge, the code seems
306 	 * too complex to understand, not quite sure whether it is necessary and
307 	 * worth the extra pain. Anyway, just leave it as it is to enhance code
308 	 * readability now.
309 	 */
310 	if (is_signed_imm26(jmp_offset)) {
311 		cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset);
312 		return 0;
313 	}
314 
315 	return -EINVAL;
316 }
317 
318 static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset)
319 {
320 	if (is_signed_imm26(jmp_offset)) {
321 		uncond_jmp_offs26(ctx, jmp_offset);
322 		return 0;
323 	}
324 
325 	return -EINVAL;
326 }
327 
328 static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
329 				    enum loongarch_gpr rd, int jmp_offset)
330 {
331 	if (is_signed_imm16(jmp_offset)) {
332 		cond_jmp_offset(ctx, cond, rj, rd, jmp_offset);
333 		return 0;
334 	}
335 
336 	return -EINVAL;
337 }
338 
339 static inline void bpf_flush_icache(void *start, void *end)
340 {
341 	flush_icache_range((unsigned long)start, (unsigned long)end);
342 }
343