xref: /linux/arch/loongarch/net/bpf_jit.h (revision 4fdb5dd8aeba3a6b5ffc9c66fd0c8528fd835065)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * BPF JIT compiler for LoongArch
4  *
5  * Copyright (C) 2022 Loongson Technology Corporation Limited
6  */
7 #include <linux/bitfield.h>
8 #include <linux/bpf.h>
9 #include <linux/filter.h>
10 #include <asm/cacheflush.h>
11 #include <asm/inst.h>
12 
13 struct jit_ctx {
14 	const struct bpf_prog *prog;
15 	unsigned int idx;
16 	unsigned int flags;
17 	unsigned int epilogue_offset;
18 	u32 *offset;
19 	int num_exentries;
20 	union loongarch_instruction *image;
21 	union loongarch_instruction *ro_image;
22 	u32 stack_size;
23 	u64 arena_vm_start;
24 	u64 user_vm_start;
25 };
26 
27 struct jit_data {
28 	struct bpf_binary_header *header;
29 	struct bpf_binary_header *ro_header;
30 	struct jit_ctx ctx;
31 };
32 
33 static inline void emit_nop(union loongarch_instruction *insn)
34 {
35 	insn->word = INSN_NOP;
36 }
37 
38 #define emit_insn(ctx, func, ...)						\
39 do {										\
40 	if (ctx->image != NULL) {						\
41 		union loongarch_instruction *insn = &ctx->image[ctx->idx];	\
42 		emit_##func(insn, ##__VA_ARGS__);				\
43 	}									\
44 	ctx->idx++;								\
45 } while (0)
46 
47 #define is_signed_imm12(val)	signed_imm_check(val, 12)
48 #define is_signed_imm14(val)	signed_imm_check(val, 14)
49 #define is_signed_imm16(val)	signed_imm_check(val, 16)
50 #define is_signed_imm26(val)	signed_imm_check(val, 26)
51 #define is_signed_imm32(val)	signed_imm_check(val, 32)
52 #define is_signed_imm52(val)	signed_imm_check(val, 52)
53 #define is_unsigned_imm12(val)	unsigned_imm_check(val, 12)
54 
55 static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx)
56 {
57 	/* BPF JMP offset is relative to the next instruction */
58 	bpf_insn++;
59 	/*
60 	 * Whereas LoongArch branch instructions encode the offset
61 	 * from the branch itself, so we must subtract 1 from the
62 	 * instruction offset.
63 	 */
64 	return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1));
65 }
66 
67 static inline int epilogue_offset(const struct jit_ctx *ctx)
68 {
69 	int from = ctx->idx;
70 	int to = ctx->epilogue_offset;
71 
72 	return (to - from);
73 }
74 
75 /* Zero-extend 32 bits into 64 bits */
76 static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
77 {
78 	if (!is32)
79 		return;
80 
81 	emit_insn(ctx, lu32id, reg, 0);
82 }
83 
84 /* Signed-extend 32 bits into 64 bits */
85 static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
86 {
87 	if (!is32)
88 		return;
89 
90 	emit_insn(ctx, addiw, reg, reg, 0);
91 }
92 
93 /* Emit proper extension according to ABI requirements.
94  * Note that it requires a value of size `size` already resides in register `reg`.
95  */
96 static inline void emit_abi_ext(struct jit_ctx *ctx, int reg, u8 size, bool sign)
97 {
98 	/* ABI requires unsigned char/short to be zero-extended */
99 	if (!sign && (size == 1 || size == 2))
100 		return;
101 
102 	switch (size) {
103 	case 1:
104 		emit_insn(ctx, extwb, reg, reg);
105 		break;
106 	case 2:
107 		emit_insn(ctx, extwh, reg, reg);
108 		break;
109 	case 4:
110 		emit_insn(ctx, addiw, reg, reg, 0);
111 		break;
112 	case 8:
113 		break;
114 	default:
115 		pr_warn("bpf_jit: invalid size %d for extension\n", size);
116 	}
117 }
118 
119 static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
120 {
121 	u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
122 
123 	/* lu12iw rd, imm_31_12 */
124 	imm_31_12 = (addr >> 12) & 0xfffff;
125 	emit_insn(ctx, lu12iw, rd, imm_31_12);
126 
127 	/* ori rd, rd, imm_11_0 */
128 	imm_11_0 = addr & 0xfff;
129 	emit_insn(ctx, ori, rd, rd, imm_11_0);
130 
131 	/* lu32id rd, imm_51_32 */
132 	imm_51_32 = (addr >> 32) & 0xfffff;
133 	emit_insn(ctx, lu32id, rd, imm_51_32);
134 
135 	/* lu52id rd, rd, imm_63_52 */
136 	imm_63_52 = (addr >> 52) & 0xfff;
137 	emit_insn(ctx, lu52id, rd, rd, imm_63_52);
138 }
139 
140 static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
141 {
142 	long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;
143 
144 	/* or rd, $zero, $zero */
145 	if (imm == 0) {
146 		emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO);
147 		return;
148 	}
149 
150 	/* addiw rd, $zero, imm_11_0 */
151 	if (is_signed_imm12(imm)) {
152 		emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm);
153 		goto zext;
154 	}
155 
156 	/* ori rd, $zero, imm_11_0 */
157 	if (is_unsigned_imm12(imm)) {
158 		emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm);
159 		goto zext;
160 	}
161 
162 	/* lu52id rd, $zero, imm_63_52 */
163 	imm_63_52 = (imm >> 52) & 0xfff;
164 	imm_51_0 = imm & 0xfffffffffffff;
165 	if (imm_63_52 != 0 && imm_51_0 == 0) {
166 		emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52);
167 		return;
168 	}
169 
170 	/* lu12iw rd, imm_31_12 */
171 	imm_31_12 = (imm >> 12) & 0xfffff;
172 	emit_insn(ctx, lu12iw, rd, imm_31_12);
173 
174 	/* ori rd, rd, imm_11_0 */
175 	imm_11_0 = imm & 0xfff;
176 	if (imm_11_0 != 0)
177 		emit_insn(ctx, ori, rd, rd, imm_11_0);
178 
179 	if (!is_signed_imm32(imm)) {
180 		if (imm_51_0 != 0) {
181 			/*
182 			 * If bit[51:31] is all 0 or all 1,
183 			 * it means bit[51:32] is sign extended by lu12iw,
184 			 * no need to call lu32id to do a new filled operation.
185 			 */
186 			imm_51_31 = (imm >> 31) & 0x1fffff;
187 			if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) {
188 				/* lu32id rd, imm_51_32 */
189 				imm_51_32 = (imm >> 32) & 0xfffff;
190 				emit_insn(ctx, lu32id, rd, imm_51_32);
191 			}
192 		}
193 
194 		/* lu52id rd, rd, imm_63_52 */
195 		if (!is_signed_imm52(imm))
196 			emit_insn(ctx, lu52id, rd, rd, imm_63_52);
197 	}
198 
199 zext:
200 	emit_zext_32(ctx, rd, is32);
201 }
202 
203 static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd,
204 			    enum loongarch_gpr rj)
205 {
206 	emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO);
207 }
208 
209 static inline int invert_jmp_cond(u8 cond)
210 {
211 	switch (cond) {
212 	case BPF_JEQ:
213 		return BPF_JNE;
214 	case BPF_JNE:
215 	case BPF_JSET:
216 		return BPF_JEQ;
217 	case BPF_JGT:
218 		return BPF_JLE;
219 	case BPF_JGE:
220 		return BPF_JLT;
221 	case BPF_JLT:
222 		return BPF_JGE;
223 	case BPF_JLE:
224 		return BPF_JGT;
225 	case BPF_JSGT:
226 		return BPF_JSLE;
227 	case BPF_JSGE:
228 		return BPF_JSLT;
229 	case BPF_JSLT:
230 		return BPF_JSGE;
231 	case BPF_JSLE:
232 		return BPF_JSGT;
233 	}
234 	return -1;
235 }
236 
237 static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
238 				   enum loongarch_gpr rd, int jmp_offset)
239 {
240 	switch (cond) {
241 	case BPF_JEQ:
242 		/* PC += jmp_offset if rj == rd */
243 		emit_insn(ctx, beq, rj, rd, jmp_offset);
244 		return;
245 	case BPF_JNE:
246 	case BPF_JSET:
247 		/* PC += jmp_offset if rj != rd */
248 		emit_insn(ctx, bne, rj, rd, jmp_offset);
249 		return;
250 	case BPF_JGT:
251 		/* PC += jmp_offset if rj > rd (unsigned) */
252 		emit_insn(ctx, bltu, rd, rj, jmp_offset);
253 		return;
254 	case BPF_JLT:
255 		/* PC += jmp_offset if rj < rd (unsigned) */
256 		emit_insn(ctx, bltu, rj, rd, jmp_offset);
257 		return;
258 	case BPF_JGE:
259 		/* PC += jmp_offset if rj >= rd (unsigned) */
260 		emit_insn(ctx, bgeu, rj, rd, jmp_offset);
261 		return;
262 	case BPF_JLE:
263 		/* PC += jmp_offset if rj <= rd (unsigned) */
264 		emit_insn(ctx, bgeu, rd, rj, jmp_offset);
265 		return;
266 	case BPF_JSGT:
267 		/* PC += jmp_offset if rj > rd (signed) */
268 		emit_insn(ctx, blt, rd, rj, jmp_offset);
269 		return;
270 	case BPF_JSLT:
271 		/* PC += jmp_offset if rj < rd (signed) */
272 		emit_insn(ctx, blt, rj, rd, jmp_offset);
273 		return;
274 	case BPF_JSGE:
275 		/* PC += jmp_offset if rj >= rd (signed) */
276 		emit_insn(ctx, bge, rj, rd, jmp_offset);
277 		return;
278 	case BPF_JSLE:
279 		/* PC += jmp_offset if rj <= rd (signed) */
280 		emit_insn(ctx, bge, rd, rj, jmp_offset);
281 		return;
282 	}
283 }
284 
285 static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
286 				   enum loongarch_gpr rd, int jmp_offset)
287 {
288 	cond = invert_jmp_cond(cond);
289 	cond_jmp_offset(ctx, cond, rj, rd, 2);
290 	emit_insn(ctx, b, jmp_offset);
291 }
292 
293 static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset)
294 {
295 	emit_insn(ctx, b, jmp_offset);
296 }
297 
298 static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
299 				enum loongarch_gpr rd, int jmp_offset)
300 {
301 	/*
302 	 * A large PC-relative jump offset may overflow the immediate field of
303 	 * the native conditional branch instruction, triggering a conversion
304 	 * to use an absolute jump instead, this jump sequence is particularly
305 	 * nasty. For now, use cond_jmp_offs26() directly to keep it simple.
306 	 * In the future, maybe we can add support for far branching, the branch
307 	 * relaxation requires more than two passes to converge, the code seems
308 	 * too complex to understand, not quite sure whether it is necessary and
309 	 * worth the extra pain. Anyway, just leave it as it is to enhance code
310 	 * readability now.
311 	 */
312 	if (is_signed_imm26(jmp_offset)) {
313 		cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset);
314 		return 0;
315 	}
316 
317 	return -EINVAL;
318 }
319 
320 static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset)
321 {
322 	if (is_signed_imm26(jmp_offset)) {
323 		uncond_jmp_offs26(ctx, jmp_offset);
324 		return 0;
325 	}
326 
327 	return -EINVAL;
328 }
329 
330 static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
331 				    enum loongarch_gpr rd, int jmp_offset)
332 {
333 	if (is_signed_imm16(jmp_offset)) {
334 		cond_jmp_offset(ctx, cond, rj, rd, jmp_offset);
335 		return 0;
336 	}
337 
338 	return -EINVAL;
339 }
340 
341 static inline void bpf_flush_icache(void *start, void *end)
342 {
343 	flush_icache_range((unsigned long)start, (unsigned long)end);
344 }
345