xref: /linux/arch/loongarch/net/bpf_jit.c (revision 015e7b0b0e8e51f7321ec2aafc1d7fc0a8a5536f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler for LoongArch
4  *
5  * Copyright (C) 2022 Loongson Technology Corporation Limited
6  */
7 #include <linux/memory.h>
8 #include "bpf_jit.h"
9 
10 #define LOONGARCH_MAX_REG_ARGS 8
11 
12 #define LOONGARCH_LONG_JUMP_NINSNS 5
13 #define LOONGARCH_LONG_JUMP_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
14 
15 #define LOONGARCH_FENTRY_NINSNS 2
16 #define LOONGARCH_FENTRY_NBYTES (LOONGARCH_FENTRY_NINSNS * 4)
17 #define LOONGARCH_BPF_FENTRY_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
18 
19 #define REG_TCC		LOONGARCH_GPR_A6
20 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (round_up(stack, 16) - 80)
21 
22 static const int regmap[] = {
23 	/* return value from in-kernel function, and exit value for eBPF program */
24 	[BPF_REG_0] = LOONGARCH_GPR_A5,
25 	/* arguments from eBPF program to in-kernel function */
26 	[BPF_REG_1] = LOONGARCH_GPR_A0,
27 	[BPF_REG_2] = LOONGARCH_GPR_A1,
28 	[BPF_REG_3] = LOONGARCH_GPR_A2,
29 	[BPF_REG_4] = LOONGARCH_GPR_A3,
30 	[BPF_REG_5] = LOONGARCH_GPR_A4,
31 	/* callee saved registers that in-kernel function will preserve */
32 	[BPF_REG_6] = LOONGARCH_GPR_S0,
33 	[BPF_REG_7] = LOONGARCH_GPR_S1,
34 	[BPF_REG_8] = LOONGARCH_GPR_S2,
35 	[BPF_REG_9] = LOONGARCH_GPR_S3,
36 	/* read-only frame pointer to access stack */
37 	[BPF_REG_FP] = LOONGARCH_GPR_S4,
38 	/* temporary register for blinding constants */
39 	[BPF_REG_AX] = LOONGARCH_GPR_T0,
40 };
41 
42 static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx, int *store_offset)
43 {
44 	const struct bpf_prog *prog = ctx->prog;
45 	const bool is_main_prog = !bpf_is_subprog(prog);
46 
47 	if (is_main_prog) {
48 		/*
49 		 * LOONGARCH_GPR_T3 = MAX_TAIL_CALL_CNT
50 		 * if (REG_TCC > T3 )
51 		 *	std REG_TCC -> LOONGARCH_GPR_SP + store_offset
52 		 * else
53 		 *	std REG_TCC -> LOONGARCH_GPR_SP + store_offset
54 		 *	REG_TCC = LOONGARCH_GPR_SP + store_offset
55 		 *
56 		 * std REG_TCC -> LOONGARCH_GPR_SP + store_offset
57 		 *
58 		 * The purpose of this code is to first push the TCC into stack,
59 		 * and then push the address of TCC into stack.
60 		 * In cases where bpf2bpf and tailcall are used in combination,
61 		 * the value in REG_TCC may be a count or an address,
62 		 * these two cases need to be judged and handled separately.
63 		 */
64 		emit_insn(ctx, addid, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
65 		*store_offset -= sizeof(long);
66 
67 		emit_cond_jmp(ctx, BPF_JGT, REG_TCC, LOONGARCH_GPR_T3, 4);
68 
69 		/*
70 		 * If REG_TCC < MAX_TAIL_CALL_CNT, the value in REG_TCC is a count,
71 		 * push tcc into stack
72 		 */
73 		emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
74 
75 		/* Push the address of TCC into the REG_TCC */
76 		emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
77 
78 		emit_uncond_jmp(ctx, 2);
79 
80 		/*
81 		 * If REG_TCC > MAX_TAIL_CALL_CNT, the value in REG_TCC is an address,
82 		 * push tcc_ptr into stack
83 		 */
84 		emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
85 	} else {
86 		*store_offset -= sizeof(long);
87 		emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
88 	}
89 
90 	/* Push tcc_ptr into stack */
91 	*store_offset -= sizeof(long);
92 	emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
93 }
94 
95 /*
96  * eBPF prog stack layout:
97  *
98  *                                        high
99  * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
100  *                            |           $ra           |
101  *                            +-------------------------+
102  *                            |           $fp           |
103  *                            +-------------------------+
104  *                            |           $s0           |
105  *                            +-------------------------+
106  *                            |           $s1           |
107  *                            +-------------------------+
108  *                            |           $s2           |
109  *                            +-------------------------+
110  *                            |           $s3           |
111  *                            +-------------------------+
112  *                            |           $s4           |
113  *                            +-------------------------+
114  *                            |           $s5           |
115  *                            +-------------------------+
116  *                            |           tcc           |
117  *                            +-------------------------+
118  *                            |           tcc_ptr       |
119  *                            +-------------------------+ <--BPF_REG_FP
120  *                            |  prog->aux->stack_depth |
121  *                            |        (optional)       |
122  * current $sp -------------> +-------------------------+
123  *                                        low
124  */
125 static void build_prologue(struct jit_ctx *ctx)
126 {
127 	int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
128 	const struct bpf_prog *prog = ctx->prog;
129 	const bool is_main_prog = !bpf_is_subprog(prog);
130 
131 	bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
132 
133 	/* To store ra, fp, s0, s1, s2, s3, s4, s5 */
134 	stack_adjust += sizeof(long) * 8;
135 
136 	/* To store tcc and tcc_ptr */
137 	stack_adjust += sizeof(long) * 2;
138 
139 	stack_adjust = round_up(stack_adjust, 16);
140 	stack_adjust += bpf_stack_adjust;
141 
142 	/* Reserve space for the move_imm + jirl instruction */
143 	for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
144 		emit_insn(ctx, nop);
145 
146 	/*
147 	 * First instruction initializes the tail call count (TCC)
148 	 * register to zero. On tail call we skip this instruction,
149 	 * and the TCC is passed in REG_TCC from the caller.
150 	 */
151 	if (is_main_prog)
152 		emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, 0);
153 
154 	emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
155 
156 	store_offset = stack_adjust - sizeof(long);
157 	emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset);
158 
159 	store_offset -= sizeof(long);
160 	emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset);
161 
162 	store_offset -= sizeof(long);
163 	emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset);
164 
165 	store_offset -= sizeof(long);
166 	emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset);
167 
168 	store_offset -= sizeof(long);
169 	emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset);
170 
171 	store_offset -= sizeof(long);
172 	emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset);
173 
174 	store_offset -= sizeof(long);
175 	emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset);
176 
177 	store_offset -= sizeof(long);
178 	emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
179 
180 	prepare_bpf_tail_call_cnt(ctx, &store_offset);
181 
182 	emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
183 
184 	if (bpf_stack_adjust)
185 		emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
186 
187 	ctx->stack_size = stack_adjust;
188 }
189 
190 static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
191 {
192 	int stack_adjust = ctx->stack_size;
193 	int load_offset;
194 
195 	load_offset = stack_adjust - sizeof(long);
196 	emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset);
197 
198 	load_offset -= sizeof(long);
199 	emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset);
200 
201 	load_offset -= sizeof(long);
202 	emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset);
203 
204 	load_offset -= sizeof(long);
205 	emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset);
206 
207 	load_offset -= sizeof(long);
208 	emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset);
209 
210 	load_offset -= sizeof(long);
211 	emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset);
212 
213 	load_offset -= sizeof(long);
214 	emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset);
215 
216 	load_offset -= sizeof(long);
217 	emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
218 
219 	/*
220 	 * When push into the stack, follow the order of tcc then tcc_ptr.
221 	 * When pop from the stack, first pop tcc_ptr then followed by tcc.
222 	 */
223 	load_offset -= 2 * sizeof(long);
224 	emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset);
225 
226 	load_offset += sizeof(long);
227 	emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset);
228 
229 	emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
230 
231 	if (!is_tail_call) {
232 		/* Set return value */
233 		emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0);
234 		/* Return to the caller */
235 		emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
236 	} else {
237 		/*
238 		 * Call the next bpf prog and skip the first instruction
239 		 * of TCC initialization.
240 		 */
241 		emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 6);
242 	}
243 }
244 
245 static void build_epilogue(struct jit_ctx *ctx)
246 {
247 	__build_epilogue(ctx, false);
248 }
249 
250 bool bpf_jit_supports_kfunc_call(void)
251 {
252 	return true;
253 }
254 
255 bool bpf_jit_supports_far_kfunc_call(void)
256 {
257 	return true;
258 }
259 
260 static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn)
261 {
262 	int off, tc_ninsn = 0;
263 	int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size);
264 	u8 a1 = LOONGARCH_GPR_A1;
265 	u8 a2 = LOONGARCH_GPR_A2;
266 	u8 t1 = LOONGARCH_GPR_T1;
267 	u8 t2 = LOONGARCH_GPR_T2;
268 	u8 t3 = LOONGARCH_GPR_T3;
269 	const int idx0 = ctx->idx;
270 
271 #define cur_offset (ctx->idx - idx0)
272 #define jmp_offset (tc_ninsn - (cur_offset))
273 
274 	/*
275 	 * a0: &ctx
276 	 * a1: &array
277 	 * a2: index
278 	 *
279 	 * if (index >= array->map.max_entries)
280 	 *	 goto out;
281 	 */
282 	tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0];
283 	off = offsetof(struct bpf_array, map.max_entries);
284 	emit_insn(ctx, ldwu, t1, a1, off);
285 	/* bgeu $a2, $t1, jmp_offset */
286 	if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0)
287 		goto toofar;
288 
289 	/*
290 	 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
291 	 *      goto out;
292 	 */
293 	emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off);
294 	emit_insn(ctx, ldd, t3, REG_TCC, 0);
295 	emit_insn(ctx, addid, t3, t3, 1);
296 	emit_insn(ctx, std, t3, REG_TCC, 0);
297 	emit_insn(ctx, addid, t2, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
298 	if (emit_tailcall_jmp(ctx, BPF_JSGT, t3, t2, jmp_offset) < 0)
299 		goto toofar;
300 
301 	/*
302 	 * prog = array->ptrs[index];
303 	 * if (!prog)
304 	 *	 goto out;
305 	 */
306 	emit_insn(ctx, alsld, t2, a2, a1, 2);
307 	off = offsetof(struct bpf_array, ptrs);
308 	emit_insn(ctx, ldd, t2, t2, off);
309 	/* beq $t2, $zero, jmp_offset */
310 	if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
311 		goto toofar;
312 
313 	/* goto *(prog->bpf_func + 4); */
314 	off = offsetof(struct bpf_prog, bpf_func);
315 	emit_insn(ctx, ldd, t3, t2, off);
316 	__build_epilogue(ctx, true);
317 
318 	return 0;
319 
320 toofar:
321 	pr_info_once("tail_call: jump too far\n");
322 	return -1;
323 #undef cur_offset
324 #undef jmp_offset
325 }
326 
327 static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
328 {
329 	const u8 t1 = LOONGARCH_GPR_T1;
330 	const u8 t2 = LOONGARCH_GPR_T2;
331 	const u8 t3 = LOONGARCH_GPR_T3;
332 	const u8 r0 = regmap[BPF_REG_0];
333 	const u8 src = regmap[insn->src_reg];
334 	const u8 dst = regmap[insn->dst_reg];
335 	const s16 off = insn->off;
336 	const s32 imm = insn->imm;
337 	const bool isdw = BPF_SIZE(insn->code) == BPF_DW;
338 
339 	move_imm(ctx, t1, off, false);
340 	emit_insn(ctx, addd, t1, dst, t1);
341 	move_reg(ctx, t3, src);
342 
343 	switch (imm) {
344 	/* lock *(size *)(dst + off) <op>= src */
345 	case BPF_ADD:
346 		if (isdw)
347 			emit_insn(ctx, amaddd, t2, t1, src);
348 		else
349 			emit_insn(ctx, amaddw, t2, t1, src);
350 		break;
351 	case BPF_AND:
352 		if (isdw)
353 			emit_insn(ctx, amandd, t2, t1, src);
354 		else
355 			emit_insn(ctx, amandw, t2, t1, src);
356 		break;
357 	case BPF_OR:
358 		if (isdw)
359 			emit_insn(ctx, amord, t2, t1, src);
360 		else
361 			emit_insn(ctx, amorw, t2, t1, src);
362 		break;
363 	case BPF_XOR:
364 		if (isdw)
365 			emit_insn(ctx, amxord, t2, t1, src);
366 		else
367 			emit_insn(ctx, amxorw, t2, t1, src);
368 		break;
369 	/* src = atomic_fetch_<op>(dst + off, src) */
370 	case BPF_ADD | BPF_FETCH:
371 		if (isdw) {
372 			emit_insn(ctx, amaddd, src, t1, t3);
373 		} else {
374 			emit_insn(ctx, amaddw, src, t1, t3);
375 			emit_zext_32(ctx, src, true);
376 		}
377 		break;
378 	case BPF_AND | BPF_FETCH:
379 		if (isdw) {
380 			emit_insn(ctx, amandd, src, t1, t3);
381 		} else {
382 			emit_insn(ctx, amandw, src, t1, t3);
383 			emit_zext_32(ctx, src, true);
384 		}
385 		break;
386 	case BPF_OR | BPF_FETCH:
387 		if (isdw) {
388 			emit_insn(ctx, amord, src, t1, t3);
389 		} else {
390 			emit_insn(ctx, amorw, src, t1, t3);
391 			emit_zext_32(ctx, src, true);
392 		}
393 		break;
394 	case BPF_XOR | BPF_FETCH:
395 		if (isdw) {
396 			emit_insn(ctx, amxord, src, t1, t3);
397 		} else {
398 			emit_insn(ctx, amxorw, src, t1, t3);
399 			emit_zext_32(ctx, src, true);
400 		}
401 		break;
402 	/* src = atomic_xchg(dst + off, src); */
403 	case BPF_XCHG:
404 		if (isdw) {
405 			emit_insn(ctx, amswapd, src, t1, t3);
406 		} else {
407 			emit_insn(ctx, amswapw, src, t1, t3);
408 			emit_zext_32(ctx, src, true);
409 		}
410 		break;
411 	/* r0 = atomic_cmpxchg(dst + off, r0, src); */
412 	case BPF_CMPXCHG:
413 		move_reg(ctx, t2, r0);
414 		if (isdw) {
415 			emit_insn(ctx, lld, r0, t1, 0);
416 			emit_insn(ctx, bne, t2, r0, 4);
417 			move_reg(ctx, t3, src);
418 			emit_insn(ctx, scd, t3, t1, 0);
419 			emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4);
420 		} else {
421 			emit_insn(ctx, llw, r0, t1, 0);
422 			emit_zext_32(ctx, t2, true);
423 			emit_zext_32(ctx, r0, true);
424 			emit_insn(ctx, bne, t2, r0, 4);
425 			move_reg(ctx, t3, src);
426 			emit_insn(ctx, scw, t3, t1, 0);
427 			emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6);
428 			emit_zext_32(ctx, r0, true);
429 		}
430 		break;
431 	}
432 }
433 
434 static bool is_signed_bpf_cond(u8 cond)
435 {
436 	return cond == BPF_JSGT || cond == BPF_JSLT ||
437 	       cond == BPF_JSGE || cond == BPF_JSLE;
438 }
439 
440 #define BPF_FIXUP_REG_MASK	GENMASK(31, 27)
441 #define BPF_FIXUP_OFFSET_MASK	GENMASK(26, 0)
442 
443 bool ex_handler_bpf(const struct exception_table_entry *ex,
444 		    struct pt_regs *regs)
445 {
446 	int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
447 	off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
448 
449 	regs->regs[dst_reg] = 0;
450 	regs->csr_era = (unsigned long)&ex->fixup - offset;
451 
452 	return true;
453 }
454 
455 /* For accesses to BTF pointers, add an entry to the exception table */
456 static int add_exception_handler(const struct bpf_insn *insn,
457 				 struct jit_ctx *ctx,
458 				 int dst_reg)
459 {
460 	unsigned long pc;
461 	off_t offset;
462 	struct exception_table_entry *ex;
463 
464 	if (!ctx->image || !ctx->prog->aux->extable)
465 		return 0;
466 
467 	if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
468 	    BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
469 		return 0;
470 
471 	if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
472 		return -EINVAL;
473 
474 	ex = &ctx->prog->aux->extable[ctx->num_exentries];
475 	pc = (unsigned long)&ctx->image[ctx->idx - 1];
476 
477 	offset = pc - (long)&ex->insn;
478 	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
479 		return -ERANGE;
480 
481 	ex->insn = offset;
482 
483 	/*
484 	 * Since the extable follows the program, the fixup offset is always
485 	 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
486 	 * to keep things simple, and put the destination register in the upper
487 	 * bits. We don't need to worry about buildtime or runtime sort
488 	 * modifying the upper bits because the table is already sorted, and
489 	 * isn't part of the main exception table.
490 	 */
491 	offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
492 	if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
493 		return -ERANGE;
494 
495 	ex->type = EX_TYPE_BPF;
496 	ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
497 
498 	ctx->num_exentries++;
499 
500 	return 0;
501 }
502 
503 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
504 {
505 	u8 tm = -1;
506 	u64 func_addr;
507 	bool func_addr_fixed, sign_extend;
508 	int i = insn - ctx->prog->insnsi;
509 	int ret, jmp_offset, tcc_ptr_off;
510 	const u8 code = insn->code;
511 	const u8 cond = BPF_OP(code);
512 	const u8 t1 = LOONGARCH_GPR_T1;
513 	const u8 t2 = LOONGARCH_GPR_T2;
514 	const u8 src = regmap[insn->src_reg];
515 	const u8 dst = regmap[insn->dst_reg];
516 	const s16 off = insn->off;
517 	const s32 imm = insn->imm;
518 	const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
519 
520 	switch (code) {
521 	/* dst = src */
522 	case BPF_ALU | BPF_MOV | BPF_X:
523 	case BPF_ALU64 | BPF_MOV | BPF_X:
524 		switch (off) {
525 		case 0:
526 			move_reg(ctx, dst, src);
527 			emit_zext_32(ctx, dst, is32);
528 			break;
529 		case 8:
530 			emit_insn(ctx, extwb, dst, src);
531 			emit_zext_32(ctx, dst, is32);
532 			break;
533 		case 16:
534 			emit_insn(ctx, extwh, dst, src);
535 			emit_zext_32(ctx, dst, is32);
536 			break;
537 		case 32:
538 			emit_insn(ctx, addw, dst, src, LOONGARCH_GPR_ZERO);
539 			break;
540 		}
541 		break;
542 
543 	/* dst = imm */
544 	case BPF_ALU | BPF_MOV | BPF_K:
545 	case BPF_ALU64 | BPF_MOV | BPF_K:
546 		move_imm(ctx, dst, imm, is32);
547 		break;
548 
549 	/* dst = dst + src */
550 	case BPF_ALU | BPF_ADD | BPF_X:
551 	case BPF_ALU64 | BPF_ADD | BPF_X:
552 		emit_insn(ctx, addd, dst, dst, src);
553 		emit_zext_32(ctx, dst, is32);
554 		break;
555 
556 	/* dst = dst + imm */
557 	case BPF_ALU | BPF_ADD | BPF_K:
558 	case BPF_ALU64 | BPF_ADD | BPF_K:
559 		if (is_signed_imm12(imm)) {
560 			emit_insn(ctx, addid, dst, dst, imm);
561 		} else {
562 			move_imm(ctx, t1, imm, is32);
563 			emit_insn(ctx, addd, dst, dst, t1);
564 		}
565 		emit_zext_32(ctx, dst, is32);
566 		break;
567 
568 	/* dst = dst - src */
569 	case BPF_ALU | BPF_SUB | BPF_X:
570 	case BPF_ALU64 | BPF_SUB | BPF_X:
571 		emit_insn(ctx, subd, dst, dst, src);
572 		emit_zext_32(ctx, dst, is32);
573 		break;
574 
575 	/* dst = dst - imm */
576 	case BPF_ALU | BPF_SUB | BPF_K:
577 	case BPF_ALU64 | BPF_SUB | BPF_K:
578 		if (is_signed_imm12(-imm)) {
579 			emit_insn(ctx, addid, dst, dst, -imm);
580 		} else {
581 			move_imm(ctx, t1, imm, is32);
582 			emit_insn(ctx, subd, dst, dst, t1);
583 		}
584 		emit_zext_32(ctx, dst, is32);
585 		break;
586 
587 	/* dst = dst * src */
588 	case BPF_ALU | BPF_MUL | BPF_X:
589 	case BPF_ALU64 | BPF_MUL | BPF_X:
590 		emit_insn(ctx, muld, dst, dst, src);
591 		emit_zext_32(ctx, dst, is32);
592 		break;
593 
594 	/* dst = dst * imm */
595 	case BPF_ALU | BPF_MUL | BPF_K:
596 	case BPF_ALU64 | BPF_MUL | BPF_K:
597 		move_imm(ctx, t1, imm, is32);
598 		emit_insn(ctx, muld, dst, dst, t1);
599 		emit_zext_32(ctx, dst, is32);
600 		break;
601 
602 	/* dst = dst / src */
603 	case BPF_ALU | BPF_DIV | BPF_X:
604 	case BPF_ALU64 | BPF_DIV | BPF_X:
605 		if (!off) {
606 			emit_zext_32(ctx, dst, is32);
607 			move_reg(ctx, t1, src);
608 			emit_zext_32(ctx, t1, is32);
609 			emit_insn(ctx, divdu, dst, dst, t1);
610 			emit_zext_32(ctx, dst, is32);
611 		} else {
612 			emit_sext_32(ctx, dst, is32);
613 			move_reg(ctx, t1, src);
614 			emit_sext_32(ctx, t1, is32);
615 			emit_insn(ctx, divd, dst, dst, t1);
616 			emit_sext_32(ctx, dst, is32);
617 		}
618 		break;
619 
620 	/* dst = dst / imm */
621 	case BPF_ALU | BPF_DIV | BPF_K:
622 	case BPF_ALU64 | BPF_DIV | BPF_K:
623 		if (!off) {
624 			move_imm(ctx, t1, imm, is32);
625 			emit_zext_32(ctx, dst, is32);
626 			emit_insn(ctx, divdu, dst, dst, t1);
627 			emit_zext_32(ctx, dst, is32);
628 		} else {
629 			move_imm(ctx, t1, imm, false);
630 			emit_sext_32(ctx, t1, is32);
631 			emit_sext_32(ctx, dst, is32);
632 			emit_insn(ctx, divd, dst, dst, t1);
633 			emit_sext_32(ctx, dst, is32);
634 		}
635 		break;
636 
637 	/* dst = dst % src */
638 	case BPF_ALU | BPF_MOD | BPF_X:
639 	case BPF_ALU64 | BPF_MOD | BPF_X:
640 		if (!off) {
641 			emit_zext_32(ctx, dst, is32);
642 			move_reg(ctx, t1, src);
643 			emit_zext_32(ctx, t1, is32);
644 			emit_insn(ctx, moddu, dst, dst, t1);
645 			emit_zext_32(ctx, dst, is32);
646 		} else {
647 			emit_sext_32(ctx, dst, is32);
648 			move_reg(ctx, t1, src);
649 			emit_sext_32(ctx, t1, is32);
650 			emit_insn(ctx, modd, dst, dst, t1);
651 			emit_sext_32(ctx, dst, is32);
652 		}
653 		break;
654 
655 	/* dst = dst % imm */
656 	case BPF_ALU | BPF_MOD | BPF_K:
657 	case BPF_ALU64 | BPF_MOD | BPF_K:
658 		if (!off) {
659 			move_imm(ctx, t1, imm, is32);
660 			emit_zext_32(ctx, dst, is32);
661 			emit_insn(ctx, moddu, dst, dst, t1);
662 			emit_zext_32(ctx, dst, is32);
663 		} else {
664 			move_imm(ctx, t1, imm, false);
665 			emit_sext_32(ctx, t1, is32);
666 			emit_sext_32(ctx, dst, is32);
667 			emit_insn(ctx, modd, dst, dst, t1);
668 			emit_sext_32(ctx, dst, is32);
669 		}
670 		break;
671 
672 	/* dst = -dst */
673 	case BPF_ALU | BPF_NEG:
674 	case BPF_ALU64 | BPF_NEG:
675 		move_imm(ctx, t1, imm, is32);
676 		emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst);
677 		emit_zext_32(ctx, dst, is32);
678 		break;
679 
680 	/* dst = dst & src */
681 	case BPF_ALU | BPF_AND | BPF_X:
682 	case BPF_ALU64 | BPF_AND | BPF_X:
683 		emit_insn(ctx, and, dst, dst, src);
684 		emit_zext_32(ctx, dst, is32);
685 		break;
686 
687 	/* dst = dst & imm */
688 	case BPF_ALU | BPF_AND | BPF_K:
689 	case BPF_ALU64 | BPF_AND | BPF_K:
690 		if (is_unsigned_imm12(imm)) {
691 			emit_insn(ctx, andi, dst, dst, imm);
692 		} else {
693 			move_imm(ctx, t1, imm, is32);
694 			emit_insn(ctx, and, dst, dst, t1);
695 		}
696 		emit_zext_32(ctx, dst, is32);
697 		break;
698 
699 	/* dst = dst | src */
700 	case BPF_ALU | BPF_OR | BPF_X:
701 	case BPF_ALU64 | BPF_OR | BPF_X:
702 		emit_insn(ctx, or, dst, dst, src);
703 		emit_zext_32(ctx, dst, is32);
704 		break;
705 
706 	/* dst = dst | imm */
707 	case BPF_ALU | BPF_OR | BPF_K:
708 	case BPF_ALU64 | BPF_OR | BPF_K:
709 		if (is_unsigned_imm12(imm)) {
710 			emit_insn(ctx, ori, dst, dst, imm);
711 		} else {
712 			move_imm(ctx, t1, imm, is32);
713 			emit_insn(ctx, or, dst, dst, t1);
714 		}
715 		emit_zext_32(ctx, dst, is32);
716 		break;
717 
718 	/* dst = dst ^ src */
719 	case BPF_ALU | BPF_XOR | BPF_X:
720 	case BPF_ALU64 | BPF_XOR | BPF_X:
721 		emit_insn(ctx, xor, dst, dst, src);
722 		emit_zext_32(ctx, dst, is32);
723 		break;
724 
725 	/* dst = dst ^ imm */
726 	case BPF_ALU | BPF_XOR | BPF_K:
727 	case BPF_ALU64 | BPF_XOR | BPF_K:
728 		if (is_unsigned_imm12(imm)) {
729 			emit_insn(ctx, xori, dst, dst, imm);
730 		} else {
731 			move_imm(ctx, t1, imm, is32);
732 			emit_insn(ctx, xor, dst, dst, t1);
733 		}
734 		emit_zext_32(ctx, dst, is32);
735 		break;
736 
737 	/* dst = dst << src (logical) */
738 	case BPF_ALU | BPF_LSH | BPF_X:
739 		emit_insn(ctx, sllw, dst, dst, src);
740 		emit_zext_32(ctx, dst, is32);
741 		break;
742 
743 	case BPF_ALU64 | BPF_LSH | BPF_X:
744 		emit_insn(ctx, slld, dst, dst, src);
745 		break;
746 
747 	/* dst = dst << imm (logical) */
748 	case BPF_ALU | BPF_LSH | BPF_K:
749 		emit_insn(ctx, slliw, dst, dst, imm);
750 		emit_zext_32(ctx, dst, is32);
751 		break;
752 
753 	case BPF_ALU64 | BPF_LSH | BPF_K:
754 		emit_insn(ctx, sllid, dst, dst, imm);
755 		break;
756 
757 	/* dst = dst >> src (logical) */
758 	case BPF_ALU | BPF_RSH | BPF_X:
759 		emit_insn(ctx, srlw, dst, dst, src);
760 		emit_zext_32(ctx, dst, is32);
761 		break;
762 
763 	case BPF_ALU64 | BPF_RSH | BPF_X:
764 		emit_insn(ctx, srld, dst, dst, src);
765 		break;
766 
767 	/* dst = dst >> imm (logical) */
768 	case BPF_ALU | BPF_RSH | BPF_K:
769 		emit_insn(ctx, srliw, dst, dst, imm);
770 		emit_zext_32(ctx, dst, is32);
771 		break;
772 
773 	case BPF_ALU64 | BPF_RSH | BPF_K:
774 		emit_insn(ctx, srlid, dst, dst, imm);
775 		break;
776 
777 	/* dst = dst >> src (arithmetic) */
778 	case BPF_ALU | BPF_ARSH | BPF_X:
779 		emit_insn(ctx, sraw, dst, dst, src);
780 		emit_zext_32(ctx, dst, is32);
781 		break;
782 
783 	case BPF_ALU64 | BPF_ARSH | BPF_X:
784 		emit_insn(ctx, srad, dst, dst, src);
785 		break;
786 
787 	/* dst = dst >> imm (arithmetic) */
788 	case BPF_ALU | BPF_ARSH | BPF_K:
789 		emit_insn(ctx, sraiw, dst, dst, imm);
790 		emit_zext_32(ctx, dst, is32);
791 		break;
792 
793 	case BPF_ALU64 | BPF_ARSH | BPF_K:
794 		emit_insn(ctx, sraid, dst, dst, imm);
795 		break;
796 
797 	/* dst = BSWAP##imm(dst) */
798 	case BPF_ALU | BPF_END | BPF_FROM_LE:
799 		switch (imm) {
800 		case 16:
801 			/* zero-extend 16 bits into 64 bits */
802 			emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
803 			break;
804 		case 32:
805 			/* zero-extend 32 bits into 64 bits */
806 			emit_zext_32(ctx, dst, is32);
807 			break;
808 		case 64:
809 			/* do nothing */
810 			break;
811 		}
812 		break;
813 
814 	case BPF_ALU | BPF_END | BPF_FROM_BE:
815 	case BPF_ALU64 | BPF_END | BPF_FROM_LE:
816 		switch (imm) {
817 		case 16:
818 			emit_insn(ctx, revb2h, dst, dst);
819 			/* zero-extend 16 bits into 64 bits */
820 			emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
821 			break;
822 		case 32:
823 			emit_insn(ctx, revb2w, dst, dst);
824 			/* clear the upper 32 bits */
825 			emit_zext_32(ctx, dst, true);
826 			break;
827 		case 64:
828 			emit_insn(ctx, revbd, dst, dst);
829 			break;
830 		}
831 		break;
832 
833 	/* PC += off if dst cond src */
834 	case BPF_JMP | BPF_JEQ | BPF_X:
835 	case BPF_JMP | BPF_JNE | BPF_X:
836 	case BPF_JMP | BPF_JGT | BPF_X:
837 	case BPF_JMP | BPF_JGE | BPF_X:
838 	case BPF_JMP | BPF_JLT | BPF_X:
839 	case BPF_JMP | BPF_JLE | BPF_X:
840 	case BPF_JMP | BPF_JSGT | BPF_X:
841 	case BPF_JMP | BPF_JSGE | BPF_X:
842 	case BPF_JMP | BPF_JSLT | BPF_X:
843 	case BPF_JMP | BPF_JSLE | BPF_X:
844 	case BPF_JMP32 | BPF_JEQ | BPF_X:
845 	case BPF_JMP32 | BPF_JNE | BPF_X:
846 	case BPF_JMP32 | BPF_JGT | BPF_X:
847 	case BPF_JMP32 | BPF_JGE | BPF_X:
848 	case BPF_JMP32 | BPF_JLT | BPF_X:
849 	case BPF_JMP32 | BPF_JLE | BPF_X:
850 	case BPF_JMP32 | BPF_JSGT | BPF_X:
851 	case BPF_JMP32 | BPF_JSGE | BPF_X:
852 	case BPF_JMP32 | BPF_JSLT | BPF_X:
853 	case BPF_JMP32 | BPF_JSLE | BPF_X:
854 		jmp_offset = bpf2la_offset(i, off, ctx);
855 		move_reg(ctx, t1, dst);
856 		move_reg(ctx, t2, src);
857 		if (is_signed_bpf_cond(BPF_OP(code))) {
858 			emit_sext_32(ctx, t1, is32);
859 			emit_sext_32(ctx, t2, is32);
860 		} else {
861 			emit_zext_32(ctx, t1, is32);
862 			emit_zext_32(ctx, t2, is32);
863 		}
864 		if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0)
865 			goto toofar;
866 		break;
867 
868 	/* PC += off if dst cond imm */
869 	case BPF_JMP | BPF_JEQ | BPF_K:
870 	case BPF_JMP | BPF_JNE | BPF_K:
871 	case BPF_JMP | BPF_JGT | BPF_K:
872 	case BPF_JMP | BPF_JGE | BPF_K:
873 	case BPF_JMP | BPF_JLT | BPF_K:
874 	case BPF_JMP | BPF_JLE | BPF_K:
875 	case BPF_JMP | BPF_JSGT | BPF_K:
876 	case BPF_JMP | BPF_JSGE | BPF_K:
877 	case BPF_JMP | BPF_JSLT | BPF_K:
878 	case BPF_JMP | BPF_JSLE | BPF_K:
879 	case BPF_JMP32 | BPF_JEQ | BPF_K:
880 	case BPF_JMP32 | BPF_JNE | BPF_K:
881 	case BPF_JMP32 | BPF_JGT | BPF_K:
882 	case BPF_JMP32 | BPF_JGE | BPF_K:
883 	case BPF_JMP32 | BPF_JLT | BPF_K:
884 	case BPF_JMP32 | BPF_JLE | BPF_K:
885 	case BPF_JMP32 | BPF_JSGT | BPF_K:
886 	case BPF_JMP32 | BPF_JSGE | BPF_K:
887 	case BPF_JMP32 | BPF_JSLT | BPF_K:
888 	case BPF_JMP32 | BPF_JSLE | BPF_K:
889 		jmp_offset = bpf2la_offset(i, off, ctx);
890 		if (imm) {
891 			move_imm(ctx, t1, imm, false);
892 			tm = t1;
893 		} else {
894 			/* If imm is 0, simply use zero register. */
895 			tm = LOONGARCH_GPR_ZERO;
896 		}
897 		move_reg(ctx, t2, dst);
898 		if (is_signed_bpf_cond(BPF_OP(code))) {
899 			emit_sext_32(ctx, tm, is32);
900 			emit_sext_32(ctx, t2, is32);
901 		} else {
902 			emit_zext_32(ctx, tm, is32);
903 			emit_zext_32(ctx, t2, is32);
904 		}
905 		if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
906 			goto toofar;
907 		break;
908 
909 	/* PC += off if dst & src */
910 	case BPF_JMP | BPF_JSET | BPF_X:
911 	case BPF_JMP32 | BPF_JSET | BPF_X:
912 		jmp_offset = bpf2la_offset(i, off, ctx);
913 		emit_insn(ctx, and, t1, dst, src);
914 		emit_zext_32(ctx, t1, is32);
915 		if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
916 			goto toofar;
917 		break;
918 
919 	/* PC += off if dst & imm */
920 	case BPF_JMP | BPF_JSET | BPF_K:
921 	case BPF_JMP32 | BPF_JSET | BPF_K:
922 		jmp_offset = bpf2la_offset(i, off, ctx);
923 		move_imm(ctx, t1, imm, is32);
924 		emit_insn(ctx, and, t1, dst, t1);
925 		emit_zext_32(ctx, t1, is32);
926 		if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
927 			goto toofar;
928 		break;
929 
930 	/* PC += off */
931 	case BPF_JMP | BPF_JA:
932 	case BPF_JMP32 | BPF_JA:
933 		if (BPF_CLASS(code) == BPF_JMP)
934 			jmp_offset = bpf2la_offset(i, off, ctx);
935 		else
936 			jmp_offset = bpf2la_offset(i, imm, ctx);
937 		if (emit_uncond_jmp(ctx, jmp_offset) < 0)
938 			goto toofar;
939 		break;
940 
941 	/* function call */
942 	case BPF_JMP | BPF_CALL:
943 		ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
944 					    &func_addr, &func_addr_fixed);
945 		if (ret < 0)
946 			return ret;
947 
948 		if (insn->src_reg == BPF_PSEUDO_CALL) {
949 			tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size);
950 			emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off);
951 		}
952 
953 		move_addr(ctx, t1, func_addr);
954 		emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
955 
956 		if (insn->src_reg != BPF_PSEUDO_CALL)
957 			move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
958 
959 		break;
960 
961 	/* tail call */
962 	case BPF_JMP | BPF_TAIL_CALL:
963 		if (emit_bpf_tail_call(ctx, i) < 0)
964 			return -EINVAL;
965 		break;
966 
967 	/* function return */
968 	case BPF_JMP | BPF_EXIT:
969 		if (i == ctx->prog->len - 1)
970 			break;
971 
972 		jmp_offset = epilogue_offset(ctx);
973 		if (emit_uncond_jmp(ctx, jmp_offset) < 0)
974 			goto toofar;
975 		break;
976 
977 	/* dst = imm64 */
978 	case BPF_LD | BPF_IMM | BPF_DW:
979 	{
980 		const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
981 
982 		if (bpf_pseudo_func(insn))
983 			move_addr(ctx, dst, imm64);
984 		else
985 			move_imm(ctx, dst, imm64, is32);
986 		return 1;
987 	}
988 
989 	/* dst = *(size *)(src + off) */
990 	case BPF_LDX | BPF_MEM | BPF_B:
991 	case BPF_LDX | BPF_MEM | BPF_H:
992 	case BPF_LDX | BPF_MEM | BPF_W:
993 	case BPF_LDX | BPF_MEM | BPF_DW:
994 	case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
995 	case BPF_LDX | BPF_PROBE_MEM | BPF_W:
996 	case BPF_LDX | BPF_PROBE_MEM | BPF_H:
997 	case BPF_LDX | BPF_PROBE_MEM | BPF_B:
998 	/* dst_reg = (s64)*(signed size *)(src_reg + off) */
999 	case BPF_LDX | BPF_MEMSX | BPF_B:
1000 	case BPF_LDX | BPF_MEMSX | BPF_H:
1001 	case BPF_LDX | BPF_MEMSX | BPF_W:
1002 	case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1003 	case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1004 	case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1005 		sign_extend = BPF_MODE(insn->code) == BPF_MEMSX ||
1006 			      BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
1007 		switch (BPF_SIZE(code)) {
1008 		case BPF_B:
1009 			if (is_signed_imm12(off)) {
1010 				if (sign_extend)
1011 					emit_insn(ctx, ldb, dst, src, off);
1012 				else
1013 					emit_insn(ctx, ldbu, dst, src, off);
1014 			} else {
1015 				move_imm(ctx, t1, off, is32);
1016 				if (sign_extend)
1017 					emit_insn(ctx, ldxb, dst, src, t1);
1018 				else
1019 					emit_insn(ctx, ldxbu, dst, src, t1);
1020 			}
1021 			break;
1022 		case BPF_H:
1023 			if (is_signed_imm12(off)) {
1024 				if (sign_extend)
1025 					emit_insn(ctx, ldh, dst, src, off);
1026 				else
1027 					emit_insn(ctx, ldhu, dst, src, off);
1028 			} else {
1029 				move_imm(ctx, t1, off, is32);
1030 				if (sign_extend)
1031 					emit_insn(ctx, ldxh, dst, src, t1);
1032 				else
1033 					emit_insn(ctx, ldxhu, dst, src, t1);
1034 			}
1035 			break;
1036 		case BPF_W:
1037 			if (is_signed_imm12(off)) {
1038 				if (sign_extend)
1039 					emit_insn(ctx, ldw, dst, src, off);
1040 				else
1041 					emit_insn(ctx, ldwu, dst, src, off);
1042 			} else {
1043 				move_imm(ctx, t1, off, is32);
1044 				if (sign_extend)
1045 					emit_insn(ctx, ldxw, dst, src, t1);
1046 				else
1047 					emit_insn(ctx, ldxwu, dst, src, t1);
1048 			}
1049 			break;
1050 		case BPF_DW:
1051 			move_imm(ctx, t1, off, is32);
1052 			emit_insn(ctx, ldxd, dst, src, t1);
1053 			break;
1054 		}
1055 
1056 		ret = add_exception_handler(insn, ctx, dst);
1057 		if (ret)
1058 			return ret;
1059 		break;
1060 
1061 	/* *(size *)(dst + off) = imm */
1062 	case BPF_ST | BPF_MEM | BPF_B:
1063 	case BPF_ST | BPF_MEM | BPF_H:
1064 	case BPF_ST | BPF_MEM | BPF_W:
1065 	case BPF_ST | BPF_MEM | BPF_DW:
1066 		switch (BPF_SIZE(code)) {
1067 		case BPF_B:
1068 			move_imm(ctx, t1, imm, is32);
1069 			if (is_signed_imm12(off)) {
1070 				emit_insn(ctx, stb, t1, dst, off);
1071 			} else {
1072 				move_imm(ctx, t2, off, is32);
1073 				emit_insn(ctx, stxb, t1, dst, t2);
1074 			}
1075 			break;
1076 		case BPF_H:
1077 			move_imm(ctx, t1, imm, is32);
1078 			if (is_signed_imm12(off)) {
1079 				emit_insn(ctx, sth, t1, dst, off);
1080 			} else {
1081 				move_imm(ctx, t2, off, is32);
1082 				emit_insn(ctx, stxh, t1, dst, t2);
1083 			}
1084 			break;
1085 		case BPF_W:
1086 			move_imm(ctx, t1, imm, is32);
1087 			if (is_signed_imm12(off)) {
1088 				emit_insn(ctx, stw, t1, dst, off);
1089 			} else if (is_signed_imm14(off)) {
1090 				emit_insn(ctx, stptrw, t1, dst, off);
1091 			} else {
1092 				move_imm(ctx, t2, off, is32);
1093 				emit_insn(ctx, stxw, t1, dst, t2);
1094 			}
1095 			break;
1096 		case BPF_DW:
1097 			move_imm(ctx, t1, imm, is32);
1098 			if (is_signed_imm12(off)) {
1099 				emit_insn(ctx, std, t1, dst, off);
1100 			} else if (is_signed_imm14(off)) {
1101 				emit_insn(ctx, stptrd, t1, dst, off);
1102 			} else {
1103 				move_imm(ctx, t2, off, is32);
1104 				emit_insn(ctx, stxd, t1, dst, t2);
1105 			}
1106 			break;
1107 		}
1108 		break;
1109 
1110 	/* *(size *)(dst + off) = src */
1111 	case BPF_STX | BPF_MEM | BPF_B:
1112 	case BPF_STX | BPF_MEM | BPF_H:
1113 	case BPF_STX | BPF_MEM | BPF_W:
1114 	case BPF_STX | BPF_MEM | BPF_DW:
1115 		switch (BPF_SIZE(code)) {
1116 		case BPF_B:
1117 			if (is_signed_imm12(off)) {
1118 				emit_insn(ctx, stb, src, dst, off);
1119 			} else {
1120 				move_imm(ctx, t1, off, is32);
1121 				emit_insn(ctx, stxb, src, dst, t1);
1122 			}
1123 			break;
1124 		case BPF_H:
1125 			if (is_signed_imm12(off)) {
1126 				emit_insn(ctx, sth, src, dst, off);
1127 			} else {
1128 				move_imm(ctx, t1, off, is32);
1129 				emit_insn(ctx, stxh, src, dst, t1);
1130 			}
1131 			break;
1132 		case BPF_W:
1133 			if (is_signed_imm12(off)) {
1134 				emit_insn(ctx, stw, src, dst, off);
1135 			} else if (is_signed_imm14(off)) {
1136 				emit_insn(ctx, stptrw, src, dst, off);
1137 			} else {
1138 				move_imm(ctx, t1, off, is32);
1139 				emit_insn(ctx, stxw, src, dst, t1);
1140 			}
1141 			break;
1142 		case BPF_DW:
1143 			if (is_signed_imm12(off)) {
1144 				emit_insn(ctx, std, src, dst, off);
1145 			} else if (is_signed_imm14(off)) {
1146 				emit_insn(ctx, stptrd, src, dst, off);
1147 			} else {
1148 				move_imm(ctx, t1, off, is32);
1149 				emit_insn(ctx, stxd, src, dst, t1);
1150 			}
1151 			break;
1152 		}
1153 		break;
1154 
1155 	case BPF_STX | BPF_ATOMIC | BPF_W:
1156 	case BPF_STX | BPF_ATOMIC | BPF_DW:
1157 		emit_atomic(insn, ctx);
1158 		break;
1159 
1160 	/* Speculation barrier */
1161 	case BPF_ST | BPF_NOSPEC:
1162 		break;
1163 
1164 	default:
1165 		pr_err("bpf_jit: unknown opcode %02x\n", code);
1166 		return -EINVAL;
1167 	}
1168 
1169 	return 0;
1170 
1171 toofar:
1172 	pr_info_once("bpf_jit: opcode %02x, jump too far\n", code);
1173 	return -E2BIG;
1174 }
1175 
1176 static int build_body(struct jit_ctx *ctx, bool extra_pass)
1177 {
1178 	int i;
1179 	const struct bpf_prog *prog = ctx->prog;
1180 
1181 	for (i = 0; i < prog->len; i++) {
1182 		const struct bpf_insn *insn = &prog->insnsi[i];
1183 		int ret;
1184 
1185 		if (ctx->image == NULL)
1186 			ctx->offset[i] = ctx->idx;
1187 
1188 		ret = build_insn(insn, ctx, extra_pass);
1189 		if (ret > 0) {
1190 			i++;
1191 			if (ctx->image == NULL)
1192 				ctx->offset[i] = ctx->idx;
1193 			continue;
1194 		}
1195 		if (ret)
1196 			return ret;
1197 	}
1198 
1199 	if (ctx->image == NULL)
1200 		ctx->offset[i] = ctx->idx;
1201 
1202 	return 0;
1203 }
1204 
1205 /* Fill space with break instructions */
1206 static void jit_fill_hole(void *area, unsigned int size)
1207 {
1208 	u32 *ptr;
1209 
1210 	/* We are guaranteed to have aligned memory */
1211 	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
1212 		*ptr++ = INSN_BREAK;
1213 }
1214 
1215 static int validate_code(struct jit_ctx *ctx)
1216 {
1217 	int i;
1218 	union loongarch_instruction insn;
1219 
1220 	for (i = 0; i < ctx->idx; i++) {
1221 		insn = ctx->image[i];
1222 		/* Check INSN_BREAK */
1223 		if (insn.word == INSN_BREAK)
1224 			return -1;
1225 	}
1226 
1227 	return 0;
1228 }
1229 
1230 static int validate_ctx(struct jit_ctx *ctx)
1231 {
1232 	if (validate_code(ctx))
1233 		return -1;
1234 
1235 	if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
1236 		return -1;
1237 
1238 	return 0;
1239 }
1240 
1241 static int emit_jump_and_link(struct jit_ctx *ctx, u8 rd, u64 target)
1242 {
1243 	if (!target) {
1244 		pr_err("bpf_jit: jump target address is error\n");
1245 		return -EFAULT;
1246 	}
1247 
1248 	move_imm(ctx, LOONGARCH_GPR_T1, target, false);
1249 	emit_insn(ctx, jirl, rd, LOONGARCH_GPR_T1, 0);
1250 
1251 	return 0;
1252 }
1253 
1254 static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
1255 {
1256 	int i;
1257 	struct jit_ctx ctx;
1258 
1259 	ctx.idx = 0;
1260 	ctx.image = (union loongarch_instruction *)insns;
1261 
1262 	if (!target) {
1263 		for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
1264 			emit_insn((&ctx), nop);
1265 		return 0;
1266 	}
1267 
1268 	return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target);
1269 }
1270 
1271 static int emit_call(struct jit_ctx *ctx, u64 addr)
1272 {
1273 	return emit_jump_and_link(ctx, LOONGARCH_GPR_RA, addr);
1274 }
1275 
1276 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
1277 {
1278 	int ret;
1279 
1280 	mutex_lock(&text_mutex);
1281 	ret = larch_insn_text_copy(dst, src, len);
1282 	mutex_unlock(&text_mutex);
1283 
1284 	return ret ? ERR_PTR(-EINVAL) : dst;
1285 }
1286 
1287 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
1288 		       enum bpf_text_poke_type new_t, void *old_addr,
1289 		       void *new_addr)
1290 {
1291 	int ret;
1292 	bool is_call;
1293 	u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
1294 	u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
1295 
1296 	/* Only poking bpf text is supported. Since kernel function entry
1297 	 * is set up by ftrace, we rely on ftrace to poke kernel functions.
1298 	 */
1299 	if (!is_bpf_text_address((unsigned long)ip))
1300 		return -ENOTSUPP;
1301 
1302 	is_call = old_t == BPF_MOD_CALL;
1303 	ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
1304 	if (ret)
1305 		return ret;
1306 
1307 	if (memcmp(ip, old_insns, LOONGARCH_LONG_JUMP_NBYTES))
1308 		return -EFAULT;
1309 
1310 	is_call = new_t == BPF_MOD_CALL;
1311 	ret = emit_jump_or_nops(new_addr, ip, new_insns, is_call);
1312 	if (ret)
1313 		return ret;
1314 
1315 	mutex_lock(&text_mutex);
1316 	if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES))
1317 		ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES);
1318 	mutex_unlock(&text_mutex);
1319 
1320 	return ret;
1321 }
1322 
1323 int bpf_arch_text_invalidate(void *dst, size_t len)
1324 {
1325 	int i;
1326 	int ret = 0;
1327 	u32 *inst;
1328 
1329 	inst = kvmalloc(len, GFP_KERNEL);
1330 	if (!inst)
1331 		return -ENOMEM;
1332 
1333 	for (i = 0; i < (len / sizeof(u32)); i++)
1334 		inst[i] = INSN_BREAK;
1335 
1336 	mutex_lock(&text_mutex);
1337 	if (larch_insn_text_copy(dst, inst, len))
1338 		ret = -EINVAL;
1339 	mutex_unlock(&text_mutex);
1340 
1341 	kvfree(inst);
1342 
1343 	return ret;
1344 }
1345 
1346 static void store_args(struct jit_ctx *ctx, int nargs, int args_off)
1347 {
1348 	int i;
1349 
1350 	for (i = 0; i < nargs; i++) {
1351 		emit_insn(ctx, std, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off);
1352 		args_off -= 8;
1353 	}
1354 }
1355 
1356 static void restore_args(struct jit_ctx *ctx, int nargs, int args_off)
1357 {
1358 	int i;
1359 
1360 	for (i = 0; i < nargs; i++) {
1361 		emit_insn(ctx, ldd, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off);
1362 		args_off -= 8;
1363 	}
1364 }
1365 
1366 static int invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
1367 			   int args_off, int retval_off, int run_ctx_off, bool save_ret)
1368 {
1369 	int ret;
1370 	u32 *branch;
1371 	struct bpf_prog *p = l->link.prog;
1372 	int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
1373 
1374 	if (l->cookie) {
1375 		move_imm(ctx, LOONGARCH_GPR_T1, l->cookie, false);
1376 		emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off);
1377 	} else {
1378 		emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off);
1379 	}
1380 
1381 	/* arg1: prog */
1382 	move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false);
1383 	/* arg2: &run_ctx */
1384 	emit_insn(ctx, addid, LOONGARCH_GPR_A1, LOONGARCH_GPR_FP, -run_ctx_off);
1385 	ret = emit_call(ctx, (const u64)bpf_trampoline_enter(p));
1386 	if (ret)
1387 		return ret;
1388 
1389 	/* store prog start time */
1390 	move_reg(ctx, LOONGARCH_GPR_S1, LOONGARCH_GPR_A0);
1391 
1392 	/*
1393 	 * if (__bpf_prog_enter(prog) == 0)
1394 	 *      goto skip_exec_of_prog;
1395 	 */
1396 	branch = (u32 *)ctx->image + ctx->idx;
1397 	/* nop reserved for conditional jump */
1398 	emit_insn(ctx, nop);
1399 
1400 	/* arg1: &args_off */
1401 	emit_insn(ctx, addid, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -args_off);
1402 	if (!p->jited)
1403 		move_imm(ctx, LOONGARCH_GPR_A1, (const s64)p->insnsi, false);
1404 	ret = emit_call(ctx, (const u64)p->bpf_func);
1405 	if (ret)
1406 		return ret;
1407 
1408 	if (save_ret) {
1409 		emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
1410 		emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
1411 	}
1412 
1413 	/* update branch with beqz */
1414 	if (ctx->image) {
1415 		int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branch;
1416 		*branch = larch_insn_gen_beq(LOONGARCH_GPR_A0, LOONGARCH_GPR_ZERO, offset);
1417 	}
1418 
1419 	/* arg1: prog */
1420 	move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false);
1421 	/* arg2: prog start time */
1422 	move_reg(ctx, LOONGARCH_GPR_A1, LOONGARCH_GPR_S1);
1423 	/* arg3: &run_ctx */
1424 	emit_insn(ctx, addid, LOONGARCH_GPR_A2, LOONGARCH_GPR_FP, -run_ctx_off);
1425 	ret = emit_call(ctx, (const u64)bpf_trampoline_exit(p));
1426 
1427 	return ret;
1428 }
1429 
1430 static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
1431 			       int args_off, int retval_off, int run_ctx_off, u32 **branches)
1432 {
1433 	int i;
1434 
1435 	emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -retval_off);
1436 	for (i = 0; i < tl->nr_links; i++) {
1437 		invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, run_ctx_off, true);
1438 		emit_insn(ctx, ldd, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -retval_off);
1439 		branches[i] = (u32 *)ctx->image + ctx->idx;
1440 		emit_insn(ctx, nop);
1441 	}
1442 }
1443 
1444 void *arch_alloc_bpf_trampoline(unsigned int size)
1445 {
1446 	return bpf_prog_pack_alloc(size, jit_fill_hole);
1447 }
1448 
1449 void arch_free_bpf_trampoline(void *image, unsigned int size)
1450 {
1451 	bpf_prog_pack_free(image, size);
1452 }
1453 
1454 /*
1455  * Sign-extend the register if necessary
1456  */
1457 static void sign_extend(struct jit_ctx *ctx, int rd, int rj, u8 size, bool sign)
1458 {
1459 	/* ABI requires unsigned char/short to be zero-extended */
1460 	if (!sign && (size == 1 || size == 2)) {
1461 		if (rd != rj)
1462 			move_reg(ctx, rd, rj);
1463 		return;
1464 	}
1465 
1466 	switch (size) {
1467 	case 1:
1468 		emit_insn(ctx, extwb, rd, rj);
1469 		break;
1470 	case 2:
1471 		emit_insn(ctx, extwh, rd, rj);
1472 		break;
1473 	case 4:
1474 		emit_insn(ctx, addiw, rd, rj, 0);
1475 		break;
1476 	case 8:
1477 		if (rd != rj)
1478 			move_reg(ctx, rd, rj);
1479 		break;
1480 	default:
1481 		pr_warn("bpf_jit: invalid size %d for sign_extend\n", size);
1482 	}
1483 }
1484 
1485 static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
1486 					 const struct btf_func_model *m, struct bpf_tramp_links *tlinks,
1487 					 void *func_addr, u32 flags)
1488 {
1489 	int i, ret, save_ret;
1490 	int stack_size, nargs;
1491 	int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off;
1492 	bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
1493 	void *orig_call = func_addr;
1494 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
1495 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
1496 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
1497 	u32 **branches = NULL;
1498 
1499 	/*
1500 	 * FP + 8       [ RA to parent func ] return address to parent
1501 	 *                    function
1502 	 * FP + 0       [ FP of parent func ] frame pointer of parent
1503 	 *                    function
1504 	 * FP - 8       [ T0 to traced func ] return address of traced
1505 	 *                    function
1506 	 * FP - 16      [ FP of traced func ] frame pointer of traced
1507 	 *                    function
1508 	 *
1509 	 * FP - retval_off  [ return value      ] BPF_TRAMP_F_CALL_ORIG or
1510 	 *                    BPF_TRAMP_F_RET_FENTRY_RET
1511 	 *                  [ argN              ]
1512 	 *                  [ ...               ]
1513 	 * FP - args_off    [ arg1              ]
1514 	 *
1515 	 * FP - nargs_off   [ regs count        ]
1516 	 *
1517 	 * FP - ip_off      [ traced func   ] BPF_TRAMP_F_IP_ARG
1518 	 *
1519 	 * FP - run_ctx_off [ bpf_tramp_run_ctx ]
1520 	 *
1521 	 * FP - sreg_off    [ callee saved reg  ]
1522 	 *
1523 	 * FP - tcc_ptr_off [ tail_call_cnt_ptr ]
1524 	 */
1525 
1526 	if (m->nr_args > LOONGARCH_MAX_REG_ARGS)
1527 		return -ENOTSUPP;
1528 
1529 	/* FIXME: No support of struct argument */
1530 	for (i = 0; i < m->nr_args; i++) {
1531 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
1532 			return -ENOTSUPP;
1533 	}
1534 
1535 	if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
1536 		return -ENOTSUPP;
1537 
1538 	/* Room of trampoline frame to store return address and frame pointer */
1539 	stack_size = 16;
1540 
1541 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
1542 	if (save_ret)
1543 		stack_size += 16; /* Save BPF R0 and A0 */
1544 
1545 	retval_off = stack_size;
1546 
1547 	/* Room of trampoline frame to store args */
1548 	nargs = m->nr_args;
1549 	stack_size += nargs * 8;
1550 	args_off = stack_size;
1551 
1552 	/* Room of trampoline frame to store args number */
1553 	stack_size += 8;
1554 	nargs_off = stack_size;
1555 
1556 	/* Room of trampoline frame to store ip address */
1557 	if (flags & BPF_TRAMP_F_IP_ARG) {
1558 		stack_size += 8;
1559 		ip_off = stack_size;
1560 	}
1561 
1562 	/* Room of trampoline frame to store struct bpf_tramp_run_ctx */
1563 	stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
1564 	run_ctx_off = stack_size;
1565 
1566 	stack_size += 8;
1567 	sreg_off = stack_size;
1568 
1569 	/* Room of trampoline frame to store tail_call_cnt_ptr */
1570 	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
1571 		stack_size += 8;
1572 		tcc_ptr_off = stack_size;
1573 	}
1574 
1575 	stack_size = round_up(stack_size, 16);
1576 
1577 	if (is_struct_ops) {
1578 		/*
1579 		 * For the trampoline called directly, just handle
1580 		 * the frame of trampoline.
1581 		 */
1582 		emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size);
1583 		emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8);
1584 		emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
1585 		emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size);
1586 	} else {
1587 		/*
1588 		 * For the trampoline called from function entry,
1589 		 * the frame of traced function and the frame of
1590 		 * trampoline need to be considered.
1591 		 */
1592 		/* RA and FP for parent function */
1593 		emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -16);
1594 		emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8);
1595 		emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
1596 		emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 16);
1597 
1598 		/* RA and FP for traced function */
1599 		emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size);
1600 		emit_insn(ctx, std, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8);
1601 		emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
1602 		emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size);
1603 	}
1604 
1605 	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
1606 		emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
1607 
1608 	/* callee saved register S1 to pass start time */
1609 	emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
1610 
1611 	/* store ip address of the traced function */
1612 	if (flags & BPF_TRAMP_F_IP_ARG) {
1613 		move_imm(ctx, LOONGARCH_GPR_T1, (const s64)func_addr, false);
1614 		emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -ip_off);
1615 	}
1616 
1617 	/* store nargs number */
1618 	move_imm(ctx, LOONGARCH_GPR_T1, nargs, false);
1619 	emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -nargs_off);
1620 
1621 	store_args(ctx, nargs, args_off);
1622 
1623 	/* To traced function */
1624 	/* Ftrace jump skips 2 NOP instructions */
1625 	if (is_kernel_text((unsigned long)orig_call))
1626 		orig_call += LOONGARCH_FENTRY_NBYTES;
1627 	/* Direct jump skips 5 NOP instructions */
1628 	else if (is_bpf_text_address((unsigned long)orig_call))
1629 		orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
1630 	/* Module tracing not supported - cause kernel lockups */
1631 	else if (is_module_text_address((unsigned long)orig_call))
1632 		return -ENOTSUPP;
1633 
1634 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
1635 		move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
1636 		ret = emit_call(ctx, (const u64)__bpf_tramp_enter);
1637 		if (ret)
1638 			return ret;
1639 	}
1640 
1641 	for (i = 0; i < fentry->nr_links; i++) {
1642 		ret = invoke_bpf_prog(ctx, fentry->links[i], args_off, retval_off,
1643 				      run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET);
1644 		if (ret)
1645 			return ret;
1646 	}
1647 	if (fmod_ret->nr_links) {
1648 		branches  = kcalloc(fmod_ret->nr_links, sizeof(u32 *), GFP_KERNEL);
1649 		if (!branches)
1650 			return -ENOMEM;
1651 
1652 		invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off, run_ctx_off, branches);
1653 	}
1654 
1655 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
1656 		restore_args(ctx, m->nr_args, args_off);
1657 
1658 		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
1659 			emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
1660 
1661 		ret = emit_call(ctx, (const u64)orig_call);
1662 		if (ret)
1663 			goto out;
1664 		emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
1665 		emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
1666 		im->ip_after_call = ctx->ro_image + ctx->idx;
1667 		/* Reserve space for the move_imm + jirl instruction */
1668 		for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
1669 			emit_insn(ctx, nop);
1670 	}
1671 
1672 	for (i = 0; ctx->image && i < fmod_ret->nr_links; i++) {
1673 		int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branches[i];
1674 		*branches[i] = larch_insn_gen_bne(LOONGARCH_GPR_T1, LOONGARCH_GPR_ZERO, offset);
1675 	}
1676 
1677 	for (i = 0; i < fexit->nr_links; i++) {
1678 		ret = invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, run_ctx_off, false);
1679 		if (ret)
1680 			goto out;
1681 	}
1682 
1683 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
1684 		im->ip_epilogue = ctx->ro_image + ctx->idx;
1685 		move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
1686 		ret = emit_call(ctx, (const u64)__bpf_tramp_exit);
1687 		if (ret)
1688 			goto out;
1689 	}
1690 
1691 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
1692 		restore_args(ctx, m->nr_args, args_off);
1693 
1694 	if (save_ret) {
1695 		emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
1696 		if (is_struct_ops)
1697 			sign_extend(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0],
1698 				    m->ret_size, m->ret_flags & BTF_FMODEL_SIGNED_ARG);
1699 		else
1700 			emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
1701 	}
1702 
1703 	emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
1704 
1705 	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
1706 		emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
1707 
1708 	if (is_struct_ops) {
1709 		/* trampoline called directly */
1710 		emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8);
1711 		emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
1712 		emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size);
1713 
1714 		emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
1715 	} else {
1716 		/* trampoline called from function entry */
1717 		emit_insn(ctx, ldd, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8);
1718 		emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
1719 		emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size);
1720 
1721 		emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8);
1722 		emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
1723 		emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, 16);
1724 
1725 		if (flags & BPF_TRAMP_F_SKIP_FRAME)
1726 			/* return to parent function */
1727 			emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
1728 		else
1729 			/* return to traced function */
1730 			emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T0, 0);
1731 	}
1732 
1733 	ret = ctx->idx;
1734 out:
1735 	kfree(branches);
1736 
1737 	return ret;
1738 }
1739 
1740 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
1741 				void *ro_image_end, const struct btf_func_model *m,
1742 				u32 flags, struct bpf_tramp_links *tlinks, void *func_addr)
1743 {
1744 	int ret, size;
1745 	void *image, *tmp;
1746 	struct jit_ctx ctx;
1747 
1748 	size = ro_image_end - ro_image;
1749 	image = kvmalloc(size, GFP_KERNEL);
1750 	if (!image)
1751 		return -ENOMEM;
1752 
1753 	ctx.image = (union loongarch_instruction *)image;
1754 	ctx.ro_image = (union loongarch_instruction *)ro_image;
1755 	ctx.idx = 0;
1756 
1757 	jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image));
1758 	ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags);
1759 	if (ret < 0)
1760 		goto out;
1761 
1762 	if (validate_code(&ctx) < 0) {
1763 		ret = -EINVAL;
1764 		goto out;
1765 	}
1766 
1767 	tmp = bpf_arch_text_copy(ro_image, image, size);
1768 	if (IS_ERR(tmp)) {
1769 		ret = PTR_ERR(tmp);
1770 		goto out;
1771 	}
1772 
1773 out:
1774 	kvfree(image);
1775 	return ret < 0 ? ret : size;
1776 }
1777 
1778 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
1779 			     struct bpf_tramp_links *tlinks, void *func_addr)
1780 {
1781 	int ret;
1782 	struct jit_ctx ctx;
1783 	struct bpf_tramp_image im;
1784 
1785 	ctx.image = NULL;
1786 	ctx.idx = 0;
1787 
1788 	ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags);
1789 
1790 	return ret < 0 ? ret : ret * LOONGARCH_INSN_SIZE;
1791 }
1792 
1793 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1794 {
1795 	bool tmp_blinded = false, extra_pass = false;
1796 	u8 *image_ptr;
1797 	int image_size, prog_size, extable_size;
1798 	struct jit_ctx ctx;
1799 	struct jit_data *jit_data;
1800 	struct bpf_binary_header *header;
1801 	struct bpf_prog *tmp, *orig_prog = prog;
1802 
1803 	/*
1804 	 * If BPF JIT was not enabled then we must fall back to
1805 	 * the interpreter.
1806 	 */
1807 	if (!prog->jit_requested)
1808 		return orig_prog;
1809 
1810 	tmp = bpf_jit_blind_constants(prog);
1811 	/*
1812 	 * If blinding was requested and we failed during blinding,
1813 	 * we must fall back to the interpreter. Otherwise, we save
1814 	 * the new JITed code.
1815 	 */
1816 	if (IS_ERR(tmp))
1817 		return orig_prog;
1818 
1819 	if (tmp != prog) {
1820 		tmp_blinded = true;
1821 		prog = tmp;
1822 	}
1823 
1824 	jit_data = prog->aux->jit_data;
1825 	if (!jit_data) {
1826 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1827 		if (!jit_data) {
1828 			prog = orig_prog;
1829 			goto out;
1830 		}
1831 		prog->aux->jit_data = jit_data;
1832 	}
1833 	if (jit_data->ctx.offset) {
1834 		ctx = jit_data->ctx;
1835 		image_ptr = jit_data->image;
1836 		header = jit_data->header;
1837 		extra_pass = true;
1838 		prog_size = sizeof(u32) * ctx.idx;
1839 		goto skip_init_ctx;
1840 	}
1841 
1842 	memset(&ctx, 0, sizeof(ctx));
1843 	ctx.prog = prog;
1844 
1845 	ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
1846 	if (ctx.offset == NULL) {
1847 		prog = orig_prog;
1848 		goto out_offset;
1849 	}
1850 
1851 	/* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
1852 	build_prologue(&ctx);
1853 	if (build_body(&ctx, extra_pass)) {
1854 		prog = orig_prog;
1855 		goto out_offset;
1856 	}
1857 	ctx.epilogue_offset = ctx.idx;
1858 	build_epilogue(&ctx);
1859 
1860 	extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry);
1861 
1862 	/* Now we know the actual image size.
1863 	 * As each LoongArch instruction is of length 32bit,
1864 	 * we are translating number of JITed intructions into
1865 	 * the size required to store these JITed code.
1866 	 */
1867 	prog_size = sizeof(u32) * ctx.idx;
1868 	image_size = prog_size + extable_size;
1869 	/* Now we know the size of the structure to make */
1870 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
1871 				      sizeof(u32), jit_fill_hole);
1872 	if (header == NULL) {
1873 		prog = orig_prog;
1874 		goto out_offset;
1875 	}
1876 
1877 	/* 2. Now, the actual pass to generate final JIT code */
1878 	ctx.image = (union loongarch_instruction *)image_ptr;
1879 	if (extable_size)
1880 		prog->aux->extable = (void *)image_ptr + prog_size;
1881 
1882 skip_init_ctx:
1883 	ctx.idx = 0;
1884 	ctx.num_exentries = 0;
1885 
1886 	build_prologue(&ctx);
1887 	if (build_body(&ctx, extra_pass)) {
1888 		bpf_jit_binary_free(header);
1889 		prog = orig_prog;
1890 		goto out_offset;
1891 	}
1892 	build_epilogue(&ctx);
1893 
1894 	/* 3. Extra pass to validate JITed code */
1895 	if (validate_ctx(&ctx)) {
1896 		bpf_jit_binary_free(header);
1897 		prog = orig_prog;
1898 		goto out_offset;
1899 	}
1900 
1901 	/* And we're done */
1902 	if (bpf_jit_enable > 1)
1903 		bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1904 
1905 	/* Update the icache */
1906 	flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
1907 
1908 	if (!prog->is_func || extra_pass) {
1909 		int err;
1910 
1911 		if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1912 			pr_err_once("multi-func JIT bug %d != %d\n",
1913 				    ctx.idx, jit_data->ctx.idx);
1914 			goto out_free;
1915 		}
1916 		err = bpf_jit_binary_lock_ro(header);
1917 		if (err) {
1918 			pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
1919 				    err);
1920 			goto out_free;
1921 		}
1922 	} else {
1923 		jit_data->ctx = ctx;
1924 		jit_data->image = image_ptr;
1925 		jit_data->header = header;
1926 	}
1927 	prog->jited = 1;
1928 	prog->jited_len = prog_size;
1929 	prog->bpf_func = (void *)ctx.image;
1930 
1931 	if (!prog->is_func || extra_pass) {
1932 		int i;
1933 
1934 		/* offset[prog->len] is the size of program */
1935 		for (i = 0; i <= prog->len; i++)
1936 			ctx.offset[i] *= LOONGARCH_INSN_SIZE;
1937 		bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1938 
1939 out_offset:
1940 		kvfree(ctx.offset);
1941 		kfree(jit_data);
1942 		prog->aux->jit_data = NULL;
1943 	}
1944 
1945 out:
1946 	if (tmp_blinded)
1947 		bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
1948 
1949 
1950 	return prog;
1951 
1952 out_free:
1953 	bpf_jit_binary_free(header);
1954 	prog->bpf_func = NULL;
1955 	prog->jited = 0;
1956 	prog->jited_len = 0;
1957 	goto out_offset;
1958 }
1959 
1960 bool bpf_jit_bypass_spec_v1(void)
1961 {
1962 	return true;
1963 }
1964 
1965 bool bpf_jit_bypass_spec_v4(void)
1966 {
1967 	return true;
1968 }
1969 
1970 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
1971 bool bpf_jit_supports_subprog_tailcalls(void)
1972 {
1973 	return true;
1974 }
1975