1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * BPF JIT compiler for LoongArch
4 *
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
6 */
7 #include <linux/memory.h>
8 #include "bpf_jit.h"
9
10 #define LOONGARCH_MAX_REG_ARGS 8
11
12 #define LOONGARCH_LONG_JUMP_NINSNS 5
13 #define LOONGARCH_LONG_JUMP_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
14
15 #define LOONGARCH_FENTRY_NINSNS 2
16 #define LOONGARCH_FENTRY_NBYTES (LOONGARCH_FENTRY_NINSNS * 4)
17 #define LOONGARCH_BPF_FENTRY_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
18
19 #define REG_TCC LOONGARCH_GPR_A6
20 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (round_up(stack, 16) - 80)
21
22 static const int regmap[] = {
23 /* return value from in-kernel function, and exit value for eBPF program */
24 [BPF_REG_0] = LOONGARCH_GPR_A5,
25 /* arguments from eBPF program to in-kernel function */
26 [BPF_REG_1] = LOONGARCH_GPR_A0,
27 [BPF_REG_2] = LOONGARCH_GPR_A1,
28 [BPF_REG_3] = LOONGARCH_GPR_A2,
29 [BPF_REG_4] = LOONGARCH_GPR_A3,
30 [BPF_REG_5] = LOONGARCH_GPR_A4,
31 /* callee saved registers that in-kernel function will preserve */
32 [BPF_REG_6] = LOONGARCH_GPR_S0,
33 [BPF_REG_7] = LOONGARCH_GPR_S1,
34 [BPF_REG_8] = LOONGARCH_GPR_S2,
35 [BPF_REG_9] = LOONGARCH_GPR_S3,
36 /* read-only frame pointer to access stack */
37 [BPF_REG_FP] = LOONGARCH_GPR_S4,
38 /* temporary register for blinding constants */
39 [BPF_REG_AX] = LOONGARCH_GPR_T0,
40 };
41
prepare_bpf_tail_call_cnt(struct jit_ctx * ctx,int * store_offset)42 static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx, int *store_offset)
43 {
44 const struct bpf_prog *prog = ctx->prog;
45 const bool is_main_prog = !bpf_is_subprog(prog);
46
47 if (is_main_prog) {
48 /*
49 * LOONGARCH_GPR_T3 = MAX_TAIL_CALL_CNT
50 * if (REG_TCC > T3 )
51 * std REG_TCC -> LOONGARCH_GPR_SP + store_offset
52 * else
53 * std REG_TCC -> LOONGARCH_GPR_SP + store_offset
54 * REG_TCC = LOONGARCH_GPR_SP + store_offset
55 *
56 * std REG_TCC -> LOONGARCH_GPR_SP + store_offset
57 *
58 * The purpose of this code is to first push the TCC into stack,
59 * and then push the address of TCC into stack.
60 * In cases where bpf2bpf and tailcall are used in combination,
61 * the value in REG_TCC may be a count or an address,
62 * these two cases need to be judged and handled separately.
63 */
64 emit_insn(ctx, addid, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
65 *store_offset -= sizeof(long);
66
67 emit_cond_jmp(ctx, BPF_JGT, REG_TCC, LOONGARCH_GPR_T3, 4);
68
69 /*
70 * If REG_TCC < MAX_TAIL_CALL_CNT, the value in REG_TCC is a count,
71 * push tcc into stack
72 */
73 emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
74
75 /* Push the address of TCC into the REG_TCC */
76 emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
77
78 emit_uncond_jmp(ctx, 2);
79
80 /*
81 * If REG_TCC > MAX_TAIL_CALL_CNT, the value in REG_TCC is an address,
82 * push tcc_ptr into stack
83 */
84 emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
85 } else {
86 *store_offset -= sizeof(long);
87 emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
88 }
89
90 /* Push tcc_ptr into stack */
91 *store_offset -= sizeof(long);
92 emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
93 }
94
95 /*
96 * eBPF prog stack layout:
97 *
98 * high
99 * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
100 * | $ra |
101 * +-------------------------+
102 * | $fp |
103 * +-------------------------+
104 * | $s0 |
105 * +-------------------------+
106 * | $s1 |
107 * +-------------------------+
108 * | $s2 |
109 * +-------------------------+
110 * | $s3 |
111 * +-------------------------+
112 * | $s4 |
113 * +-------------------------+
114 * | $s5 |
115 * +-------------------------+
116 * | tcc |
117 * +-------------------------+
118 * | tcc_ptr |
119 * +-------------------------+ <--BPF_REG_FP
120 * | prog->aux->stack_depth |
121 * | (optional) |
122 * current $sp -------------> +-------------------------+
123 * low
124 */
build_prologue(struct jit_ctx * ctx)125 static void build_prologue(struct jit_ctx *ctx)
126 {
127 int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
128 const struct bpf_prog *prog = ctx->prog;
129 const bool is_main_prog = !bpf_is_subprog(prog);
130
131 bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
132
133 /* To store ra, fp, s0, s1, s2, s3, s4, s5 */
134 stack_adjust += sizeof(long) * 8;
135
136 /* To store tcc and tcc_ptr */
137 stack_adjust += sizeof(long) * 2;
138
139 stack_adjust = round_up(stack_adjust, 16);
140 stack_adjust += bpf_stack_adjust;
141
142 /* Reserve space for the move_imm + jirl instruction */
143 for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
144 emit_insn(ctx, nop);
145
146 /*
147 * First instruction initializes the tail call count (TCC)
148 * register to zero. On tail call we skip this instruction,
149 * and the TCC is passed in REG_TCC from the caller.
150 */
151 if (is_main_prog)
152 emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, 0);
153
154 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
155
156 store_offset = stack_adjust - sizeof(long);
157 emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset);
158
159 store_offset -= sizeof(long);
160 emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset);
161
162 store_offset -= sizeof(long);
163 emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset);
164
165 store_offset -= sizeof(long);
166 emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset);
167
168 store_offset -= sizeof(long);
169 emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset);
170
171 store_offset -= sizeof(long);
172 emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset);
173
174 store_offset -= sizeof(long);
175 emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset);
176
177 store_offset -= sizeof(long);
178 emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
179
180 prepare_bpf_tail_call_cnt(ctx, &store_offset);
181
182 emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
183
184 if (bpf_stack_adjust)
185 emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
186
187 ctx->stack_size = stack_adjust;
188 }
189
__build_epilogue(struct jit_ctx * ctx,bool is_tail_call)190 static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
191 {
192 int stack_adjust = ctx->stack_size;
193 int load_offset;
194
195 load_offset = stack_adjust - sizeof(long);
196 emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset);
197
198 load_offset -= sizeof(long);
199 emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset);
200
201 load_offset -= sizeof(long);
202 emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset);
203
204 load_offset -= sizeof(long);
205 emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset);
206
207 load_offset -= sizeof(long);
208 emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset);
209
210 load_offset -= sizeof(long);
211 emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset);
212
213 load_offset -= sizeof(long);
214 emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset);
215
216 load_offset -= sizeof(long);
217 emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
218
219 /*
220 * When push into the stack, follow the order of tcc then tcc_ptr.
221 * When pop from the stack, first pop tcc_ptr then followed by tcc.
222 */
223 load_offset -= 2 * sizeof(long);
224 emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset);
225
226 load_offset += sizeof(long);
227 emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset);
228
229 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
230
231 if (!is_tail_call) {
232 /* Set return value */
233 emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0);
234 /* Return to the caller */
235 emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
236 } else {
237 /*
238 * Call the next bpf prog and skip the first instruction
239 * of TCC initialization.
240 */
241 emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 6);
242 }
243 }
244
build_epilogue(struct jit_ctx * ctx)245 static void build_epilogue(struct jit_ctx *ctx)
246 {
247 __build_epilogue(ctx, false);
248 }
249
bpf_jit_supports_kfunc_call(void)250 bool bpf_jit_supports_kfunc_call(void)
251 {
252 return true;
253 }
254
bpf_jit_supports_far_kfunc_call(void)255 bool bpf_jit_supports_far_kfunc_call(void)
256 {
257 return true;
258 }
259
emit_bpf_tail_call(struct jit_ctx * ctx,int insn)260 static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn)
261 {
262 int off, tc_ninsn = 0;
263 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size);
264 u8 a1 = LOONGARCH_GPR_A1;
265 u8 a2 = LOONGARCH_GPR_A2;
266 u8 t1 = LOONGARCH_GPR_T1;
267 u8 t2 = LOONGARCH_GPR_T2;
268 u8 t3 = LOONGARCH_GPR_T3;
269 const int idx0 = ctx->idx;
270
271 #define cur_offset (ctx->idx - idx0)
272 #define jmp_offset (tc_ninsn - (cur_offset))
273
274 /*
275 * a0: &ctx
276 * a1: &array
277 * a2: index
278 *
279 * if (index >= array->map.max_entries)
280 * goto out;
281 */
282 tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0];
283 off = offsetof(struct bpf_array, map.max_entries);
284 emit_insn(ctx, ldwu, t1, a1, off);
285 /* bgeu $a2, $t1, jmp_offset */
286 if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0)
287 goto toofar;
288
289 /*
290 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
291 * goto out;
292 */
293 emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off);
294 emit_insn(ctx, ldd, t3, REG_TCC, 0);
295 emit_insn(ctx, addid, t3, t3, 1);
296 emit_insn(ctx, std, t3, REG_TCC, 0);
297 emit_insn(ctx, addid, t2, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
298 if (emit_tailcall_jmp(ctx, BPF_JSGT, t3, t2, jmp_offset) < 0)
299 goto toofar;
300
301 /*
302 * prog = array->ptrs[index];
303 * if (!prog)
304 * goto out;
305 */
306 emit_insn(ctx, alsld, t2, a2, a1, 2);
307 off = offsetof(struct bpf_array, ptrs);
308 emit_insn(ctx, ldd, t2, t2, off);
309 /* beq $t2, $zero, jmp_offset */
310 if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
311 goto toofar;
312
313 /* goto *(prog->bpf_func + 4); */
314 off = offsetof(struct bpf_prog, bpf_func);
315 emit_insn(ctx, ldd, t3, t2, off);
316 __build_epilogue(ctx, true);
317
318 return 0;
319
320 toofar:
321 pr_info_once("tail_call: jump too far\n");
322 return -1;
323 #undef cur_offset
324 #undef jmp_offset
325 }
326
emit_atomic(const struct bpf_insn * insn,struct jit_ctx * ctx)327 static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
328 {
329 const u8 t1 = LOONGARCH_GPR_T1;
330 const u8 t2 = LOONGARCH_GPR_T2;
331 const u8 t3 = LOONGARCH_GPR_T3;
332 const u8 r0 = regmap[BPF_REG_0];
333 const u8 src = regmap[insn->src_reg];
334 const u8 dst = regmap[insn->dst_reg];
335 const s16 off = insn->off;
336 const s32 imm = insn->imm;
337 const bool isdw = BPF_SIZE(insn->code) == BPF_DW;
338
339 move_imm(ctx, t1, off, false);
340 emit_insn(ctx, addd, t1, dst, t1);
341 move_reg(ctx, t3, src);
342
343 switch (imm) {
344 /* lock *(size *)(dst + off) <op>= src */
345 case BPF_ADD:
346 if (isdw)
347 emit_insn(ctx, amaddd, t2, t1, src);
348 else
349 emit_insn(ctx, amaddw, t2, t1, src);
350 break;
351 case BPF_AND:
352 if (isdw)
353 emit_insn(ctx, amandd, t2, t1, src);
354 else
355 emit_insn(ctx, amandw, t2, t1, src);
356 break;
357 case BPF_OR:
358 if (isdw)
359 emit_insn(ctx, amord, t2, t1, src);
360 else
361 emit_insn(ctx, amorw, t2, t1, src);
362 break;
363 case BPF_XOR:
364 if (isdw)
365 emit_insn(ctx, amxord, t2, t1, src);
366 else
367 emit_insn(ctx, amxorw, t2, t1, src);
368 break;
369 /* src = atomic_fetch_<op>(dst + off, src) */
370 case BPF_ADD | BPF_FETCH:
371 if (isdw) {
372 emit_insn(ctx, amaddd, src, t1, t3);
373 } else {
374 emit_insn(ctx, amaddw, src, t1, t3);
375 emit_zext_32(ctx, src, true);
376 }
377 break;
378 case BPF_AND | BPF_FETCH:
379 if (isdw) {
380 emit_insn(ctx, amandd, src, t1, t3);
381 } else {
382 emit_insn(ctx, amandw, src, t1, t3);
383 emit_zext_32(ctx, src, true);
384 }
385 break;
386 case BPF_OR | BPF_FETCH:
387 if (isdw) {
388 emit_insn(ctx, amord, src, t1, t3);
389 } else {
390 emit_insn(ctx, amorw, src, t1, t3);
391 emit_zext_32(ctx, src, true);
392 }
393 break;
394 case BPF_XOR | BPF_FETCH:
395 if (isdw) {
396 emit_insn(ctx, amxord, src, t1, t3);
397 } else {
398 emit_insn(ctx, amxorw, src, t1, t3);
399 emit_zext_32(ctx, src, true);
400 }
401 break;
402 /* src = atomic_xchg(dst + off, src); */
403 case BPF_XCHG:
404 if (isdw) {
405 emit_insn(ctx, amswapd, src, t1, t3);
406 } else {
407 emit_insn(ctx, amswapw, src, t1, t3);
408 emit_zext_32(ctx, src, true);
409 }
410 break;
411 /* r0 = atomic_cmpxchg(dst + off, r0, src); */
412 case BPF_CMPXCHG:
413 move_reg(ctx, t2, r0);
414 if (isdw) {
415 emit_insn(ctx, lld, r0, t1, 0);
416 emit_insn(ctx, bne, t2, r0, 4);
417 move_reg(ctx, t3, src);
418 emit_insn(ctx, scd, t3, t1, 0);
419 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4);
420 } else {
421 emit_insn(ctx, llw, r0, t1, 0);
422 emit_zext_32(ctx, t2, true);
423 emit_zext_32(ctx, r0, true);
424 emit_insn(ctx, bne, t2, r0, 4);
425 move_reg(ctx, t3, src);
426 emit_insn(ctx, scw, t3, t1, 0);
427 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6);
428 emit_zext_32(ctx, r0, true);
429 }
430 break;
431 }
432 }
433
is_signed_bpf_cond(u8 cond)434 static bool is_signed_bpf_cond(u8 cond)
435 {
436 return cond == BPF_JSGT || cond == BPF_JSLT ||
437 cond == BPF_JSGE || cond == BPF_JSLE;
438 }
439
440 #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
441 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
442
ex_handler_bpf(const struct exception_table_entry * ex,struct pt_regs * regs)443 bool ex_handler_bpf(const struct exception_table_entry *ex,
444 struct pt_regs *regs)
445 {
446 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
447 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
448
449 regs->regs[dst_reg] = 0;
450 regs->csr_era = (unsigned long)&ex->fixup - offset;
451
452 return true;
453 }
454
455 /* For accesses to BTF pointers, add an entry to the exception table */
add_exception_handler(const struct bpf_insn * insn,struct jit_ctx * ctx,int dst_reg)456 static int add_exception_handler(const struct bpf_insn *insn,
457 struct jit_ctx *ctx,
458 int dst_reg)
459 {
460 unsigned long pc;
461 off_t offset;
462 struct exception_table_entry *ex;
463
464 if (!ctx->image || !ctx->prog->aux->extable)
465 return 0;
466
467 if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
468 BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
469 return 0;
470
471 if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
472 return -EINVAL;
473
474 ex = &ctx->prog->aux->extable[ctx->num_exentries];
475 pc = (unsigned long)&ctx->image[ctx->idx - 1];
476
477 offset = pc - (long)&ex->insn;
478 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
479 return -ERANGE;
480
481 ex->insn = offset;
482
483 /*
484 * Since the extable follows the program, the fixup offset is always
485 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
486 * to keep things simple, and put the destination register in the upper
487 * bits. We don't need to worry about buildtime or runtime sort
488 * modifying the upper bits because the table is already sorted, and
489 * isn't part of the main exception table.
490 */
491 offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
492 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
493 return -ERANGE;
494
495 ex->type = EX_TYPE_BPF;
496 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
497
498 ctx->num_exentries++;
499
500 return 0;
501 }
502
build_insn(const struct bpf_insn * insn,struct jit_ctx * ctx,bool extra_pass)503 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
504 {
505 u8 tm = -1;
506 u64 func_addr;
507 bool func_addr_fixed, sign_extend;
508 int i = insn - ctx->prog->insnsi;
509 int ret, jmp_offset, tcc_ptr_off;
510 const u8 code = insn->code;
511 const u8 cond = BPF_OP(code);
512 const u8 t1 = LOONGARCH_GPR_T1;
513 const u8 t2 = LOONGARCH_GPR_T2;
514 const u8 src = regmap[insn->src_reg];
515 const u8 dst = regmap[insn->dst_reg];
516 const s16 off = insn->off;
517 const s32 imm = insn->imm;
518 const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
519
520 switch (code) {
521 /* dst = src */
522 case BPF_ALU | BPF_MOV | BPF_X:
523 case BPF_ALU64 | BPF_MOV | BPF_X:
524 switch (off) {
525 case 0:
526 move_reg(ctx, dst, src);
527 emit_zext_32(ctx, dst, is32);
528 break;
529 case 8:
530 move_reg(ctx, t1, src);
531 emit_insn(ctx, extwb, dst, t1);
532 emit_zext_32(ctx, dst, is32);
533 break;
534 case 16:
535 move_reg(ctx, t1, src);
536 emit_insn(ctx, extwh, dst, t1);
537 emit_zext_32(ctx, dst, is32);
538 break;
539 case 32:
540 emit_insn(ctx, addw, dst, src, LOONGARCH_GPR_ZERO);
541 break;
542 }
543 break;
544
545 /* dst = imm */
546 case BPF_ALU | BPF_MOV | BPF_K:
547 case BPF_ALU64 | BPF_MOV | BPF_K:
548 move_imm(ctx, dst, imm, is32);
549 break;
550
551 /* dst = dst + src */
552 case BPF_ALU | BPF_ADD | BPF_X:
553 case BPF_ALU64 | BPF_ADD | BPF_X:
554 emit_insn(ctx, addd, dst, dst, src);
555 emit_zext_32(ctx, dst, is32);
556 break;
557
558 /* dst = dst + imm */
559 case BPF_ALU | BPF_ADD | BPF_K:
560 case BPF_ALU64 | BPF_ADD | BPF_K:
561 if (is_signed_imm12(imm)) {
562 emit_insn(ctx, addid, dst, dst, imm);
563 } else {
564 move_imm(ctx, t1, imm, is32);
565 emit_insn(ctx, addd, dst, dst, t1);
566 }
567 emit_zext_32(ctx, dst, is32);
568 break;
569
570 /* dst = dst - src */
571 case BPF_ALU | BPF_SUB | BPF_X:
572 case BPF_ALU64 | BPF_SUB | BPF_X:
573 emit_insn(ctx, subd, dst, dst, src);
574 emit_zext_32(ctx, dst, is32);
575 break;
576
577 /* dst = dst - imm */
578 case BPF_ALU | BPF_SUB | BPF_K:
579 case BPF_ALU64 | BPF_SUB | BPF_K:
580 if (is_signed_imm12(-imm)) {
581 emit_insn(ctx, addid, dst, dst, -imm);
582 } else {
583 move_imm(ctx, t1, imm, is32);
584 emit_insn(ctx, subd, dst, dst, t1);
585 }
586 emit_zext_32(ctx, dst, is32);
587 break;
588
589 /* dst = dst * src */
590 case BPF_ALU | BPF_MUL | BPF_X:
591 case BPF_ALU64 | BPF_MUL | BPF_X:
592 emit_insn(ctx, muld, dst, dst, src);
593 emit_zext_32(ctx, dst, is32);
594 break;
595
596 /* dst = dst * imm */
597 case BPF_ALU | BPF_MUL | BPF_K:
598 case BPF_ALU64 | BPF_MUL | BPF_K:
599 move_imm(ctx, t1, imm, is32);
600 emit_insn(ctx, muld, dst, dst, t1);
601 emit_zext_32(ctx, dst, is32);
602 break;
603
604 /* dst = dst / src */
605 case BPF_ALU | BPF_DIV | BPF_X:
606 case BPF_ALU64 | BPF_DIV | BPF_X:
607 if (!off) {
608 emit_zext_32(ctx, dst, is32);
609 move_reg(ctx, t1, src);
610 emit_zext_32(ctx, t1, is32);
611 emit_insn(ctx, divdu, dst, dst, t1);
612 emit_zext_32(ctx, dst, is32);
613 } else {
614 emit_sext_32(ctx, dst, is32);
615 move_reg(ctx, t1, src);
616 emit_sext_32(ctx, t1, is32);
617 emit_insn(ctx, divd, dst, dst, t1);
618 emit_sext_32(ctx, dst, is32);
619 }
620 break;
621
622 /* dst = dst / imm */
623 case BPF_ALU | BPF_DIV | BPF_K:
624 case BPF_ALU64 | BPF_DIV | BPF_K:
625 if (!off) {
626 move_imm(ctx, t1, imm, is32);
627 emit_zext_32(ctx, dst, is32);
628 emit_insn(ctx, divdu, dst, dst, t1);
629 emit_zext_32(ctx, dst, is32);
630 } else {
631 move_imm(ctx, t1, imm, false);
632 emit_sext_32(ctx, t1, is32);
633 emit_sext_32(ctx, dst, is32);
634 emit_insn(ctx, divd, dst, dst, t1);
635 emit_sext_32(ctx, dst, is32);
636 }
637 break;
638
639 /* dst = dst % src */
640 case BPF_ALU | BPF_MOD | BPF_X:
641 case BPF_ALU64 | BPF_MOD | BPF_X:
642 if (!off) {
643 emit_zext_32(ctx, dst, is32);
644 move_reg(ctx, t1, src);
645 emit_zext_32(ctx, t1, is32);
646 emit_insn(ctx, moddu, dst, dst, t1);
647 emit_zext_32(ctx, dst, is32);
648 } else {
649 emit_sext_32(ctx, dst, is32);
650 move_reg(ctx, t1, src);
651 emit_sext_32(ctx, t1, is32);
652 emit_insn(ctx, modd, dst, dst, t1);
653 emit_sext_32(ctx, dst, is32);
654 }
655 break;
656
657 /* dst = dst % imm */
658 case BPF_ALU | BPF_MOD | BPF_K:
659 case BPF_ALU64 | BPF_MOD | BPF_K:
660 if (!off) {
661 move_imm(ctx, t1, imm, is32);
662 emit_zext_32(ctx, dst, is32);
663 emit_insn(ctx, moddu, dst, dst, t1);
664 emit_zext_32(ctx, dst, is32);
665 } else {
666 move_imm(ctx, t1, imm, false);
667 emit_sext_32(ctx, t1, is32);
668 emit_sext_32(ctx, dst, is32);
669 emit_insn(ctx, modd, dst, dst, t1);
670 emit_sext_32(ctx, dst, is32);
671 }
672 break;
673
674 /* dst = -dst */
675 case BPF_ALU | BPF_NEG:
676 case BPF_ALU64 | BPF_NEG:
677 move_imm(ctx, t1, imm, is32);
678 emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst);
679 emit_zext_32(ctx, dst, is32);
680 break;
681
682 /* dst = dst & src */
683 case BPF_ALU | BPF_AND | BPF_X:
684 case BPF_ALU64 | BPF_AND | BPF_X:
685 emit_insn(ctx, and, dst, dst, src);
686 emit_zext_32(ctx, dst, is32);
687 break;
688
689 /* dst = dst & imm */
690 case BPF_ALU | BPF_AND | BPF_K:
691 case BPF_ALU64 | BPF_AND | BPF_K:
692 if (is_unsigned_imm12(imm)) {
693 emit_insn(ctx, andi, dst, dst, imm);
694 } else {
695 move_imm(ctx, t1, imm, is32);
696 emit_insn(ctx, and, dst, dst, t1);
697 }
698 emit_zext_32(ctx, dst, is32);
699 break;
700
701 /* dst = dst | src */
702 case BPF_ALU | BPF_OR | BPF_X:
703 case BPF_ALU64 | BPF_OR | BPF_X:
704 emit_insn(ctx, or, dst, dst, src);
705 emit_zext_32(ctx, dst, is32);
706 break;
707
708 /* dst = dst | imm */
709 case BPF_ALU | BPF_OR | BPF_K:
710 case BPF_ALU64 | BPF_OR | BPF_K:
711 if (is_unsigned_imm12(imm)) {
712 emit_insn(ctx, ori, dst, dst, imm);
713 } else {
714 move_imm(ctx, t1, imm, is32);
715 emit_insn(ctx, or, dst, dst, t1);
716 }
717 emit_zext_32(ctx, dst, is32);
718 break;
719
720 /* dst = dst ^ src */
721 case BPF_ALU | BPF_XOR | BPF_X:
722 case BPF_ALU64 | BPF_XOR | BPF_X:
723 emit_insn(ctx, xor, dst, dst, src);
724 emit_zext_32(ctx, dst, is32);
725 break;
726
727 /* dst = dst ^ imm */
728 case BPF_ALU | BPF_XOR | BPF_K:
729 case BPF_ALU64 | BPF_XOR | BPF_K:
730 if (is_unsigned_imm12(imm)) {
731 emit_insn(ctx, xori, dst, dst, imm);
732 } else {
733 move_imm(ctx, t1, imm, is32);
734 emit_insn(ctx, xor, dst, dst, t1);
735 }
736 emit_zext_32(ctx, dst, is32);
737 break;
738
739 /* dst = dst << src (logical) */
740 case BPF_ALU | BPF_LSH | BPF_X:
741 emit_insn(ctx, sllw, dst, dst, src);
742 emit_zext_32(ctx, dst, is32);
743 break;
744
745 case BPF_ALU64 | BPF_LSH | BPF_X:
746 emit_insn(ctx, slld, dst, dst, src);
747 break;
748
749 /* dst = dst << imm (logical) */
750 case BPF_ALU | BPF_LSH | BPF_K:
751 emit_insn(ctx, slliw, dst, dst, imm);
752 emit_zext_32(ctx, dst, is32);
753 break;
754
755 case BPF_ALU64 | BPF_LSH | BPF_K:
756 emit_insn(ctx, sllid, dst, dst, imm);
757 break;
758
759 /* dst = dst >> src (logical) */
760 case BPF_ALU | BPF_RSH | BPF_X:
761 emit_insn(ctx, srlw, dst, dst, src);
762 emit_zext_32(ctx, dst, is32);
763 break;
764
765 case BPF_ALU64 | BPF_RSH | BPF_X:
766 emit_insn(ctx, srld, dst, dst, src);
767 break;
768
769 /* dst = dst >> imm (logical) */
770 case BPF_ALU | BPF_RSH | BPF_K:
771 emit_insn(ctx, srliw, dst, dst, imm);
772 emit_zext_32(ctx, dst, is32);
773 break;
774
775 case BPF_ALU64 | BPF_RSH | BPF_K:
776 emit_insn(ctx, srlid, dst, dst, imm);
777 break;
778
779 /* dst = dst >> src (arithmetic) */
780 case BPF_ALU | BPF_ARSH | BPF_X:
781 emit_insn(ctx, sraw, dst, dst, src);
782 emit_zext_32(ctx, dst, is32);
783 break;
784
785 case BPF_ALU64 | BPF_ARSH | BPF_X:
786 emit_insn(ctx, srad, dst, dst, src);
787 break;
788
789 /* dst = dst >> imm (arithmetic) */
790 case BPF_ALU | BPF_ARSH | BPF_K:
791 emit_insn(ctx, sraiw, dst, dst, imm);
792 emit_zext_32(ctx, dst, is32);
793 break;
794
795 case BPF_ALU64 | BPF_ARSH | BPF_K:
796 emit_insn(ctx, sraid, dst, dst, imm);
797 break;
798
799 /* dst = BSWAP##imm(dst) */
800 case BPF_ALU | BPF_END | BPF_FROM_LE:
801 switch (imm) {
802 case 16:
803 /* zero-extend 16 bits into 64 bits */
804 emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
805 break;
806 case 32:
807 /* zero-extend 32 bits into 64 bits */
808 emit_zext_32(ctx, dst, is32);
809 break;
810 case 64:
811 /* do nothing */
812 break;
813 }
814 break;
815
816 case BPF_ALU | BPF_END | BPF_FROM_BE:
817 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
818 switch (imm) {
819 case 16:
820 emit_insn(ctx, revb2h, dst, dst);
821 /* zero-extend 16 bits into 64 bits */
822 emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
823 break;
824 case 32:
825 emit_insn(ctx, revb2w, dst, dst);
826 /* clear the upper 32 bits */
827 emit_zext_32(ctx, dst, true);
828 break;
829 case 64:
830 emit_insn(ctx, revbd, dst, dst);
831 break;
832 }
833 break;
834
835 /* PC += off if dst cond src */
836 case BPF_JMP | BPF_JEQ | BPF_X:
837 case BPF_JMP | BPF_JNE | BPF_X:
838 case BPF_JMP | BPF_JGT | BPF_X:
839 case BPF_JMP | BPF_JGE | BPF_X:
840 case BPF_JMP | BPF_JLT | BPF_X:
841 case BPF_JMP | BPF_JLE | BPF_X:
842 case BPF_JMP | BPF_JSGT | BPF_X:
843 case BPF_JMP | BPF_JSGE | BPF_X:
844 case BPF_JMP | BPF_JSLT | BPF_X:
845 case BPF_JMP | BPF_JSLE | BPF_X:
846 case BPF_JMP32 | BPF_JEQ | BPF_X:
847 case BPF_JMP32 | BPF_JNE | BPF_X:
848 case BPF_JMP32 | BPF_JGT | BPF_X:
849 case BPF_JMP32 | BPF_JGE | BPF_X:
850 case BPF_JMP32 | BPF_JLT | BPF_X:
851 case BPF_JMP32 | BPF_JLE | BPF_X:
852 case BPF_JMP32 | BPF_JSGT | BPF_X:
853 case BPF_JMP32 | BPF_JSGE | BPF_X:
854 case BPF_JMP32 | BPF_JSLT | BPF_X:
855 case BPF_JMP32 | BPF_JSLE | BPF_X:
856 jmp_offset = bpf2la_offset(i, off, ctx);
857 move_reg(ctx, t1, dst);
858 move_reg(ctx, t2, src);
859 if (is_signed_bpf_cond(BPF_OP(code))) {
860 emit_sext_32(ctx, t1, is32);
861 emit_sext_32(ctx, t2, is32);
862 } else {
863 emit_zext_32(ctx, t1, is32);
864 emit_zext_32(ctx, t2, is32);
865 }
866 if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0)
867 goto toofar;
868 break;
869
870 /* PC += off if dst cond imm */
871 case BPF_JMP | BPF_JEQ | BPF_K:
872 case BPF_JMP | BPF_JNE | BPF_K:
873 case BPF_JMP | BPF_JGT | BPF_K:
874 case BPF_JMP | BPF_JGE | BPF_K:
875 case BPF_JMP | BPF_JLT | BPF_K:
876 case BPF_JMP | BPF_JLE | BPF_K:
877 case BPF_JMP | BPF_JSGT | BPF_K:
878 case BPF_JMP | BPF_JSGE | BPF_K:
879 case BPF_JMP | BPF_JSLT | BPF_K:
880 case BPF_JMP | BPF_JSLE | BPF_K:
881 case BPF_JMP32 | BPF_JEQ | BPF_K:
882 case BPF_JMP32 | BPF_JNE | BPF_K:
883 case BPF_JMP32 | BPF_JGT | BPF_K:
884 case BPF_JMP32 | BPF_JGE | BPF_K:
885 case BPF_JMP32 | BPF_JLT | BPF_K:
886 case BPF_JMP32 | BPF_JLE | BPF_K:
887 case BPF_JMP32 | BPF_JSGT | BPF_K:
888 case BPF_JMP32 | BPF_JSGE | BPF_K:
889 case BPF_JMP32 | BPF_JSLT | BPF_K:
890 case BPF_JMP32 | BPF_JSLE | BPF_K:
891 jmp_offset = bpf2la_offset(i, off, ctx);
892 if (imm) {
893 move_imm(ctx, t1, imm, false);
894 tm = t1;
895 } else {
896 /* If imm is 0, simply use zero register. */
897 tm = LOONGARCH_GPR_ZERO;
898 }
899 move_reg(ctx, t2, dst);
900 if (is_signed_bpf_cond(BPF_OP(code))) {
901 emit_sext_32(ctx, tm, is32);
902 emit_sext_32(ctx, t2, is32);
903 } else {
904 emit_zext_32(ctx, tm, is32);
905 emit_zext_32(ctx, t2, is32);
906 }
907 if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
908 goto toofar;
909 break;
910
911 /* PC += off if dst & src */
912 case BPF_JMP | BPF_JSET | BPF_X:
913 case BPF_JMP32 | BPF_JSET | BPF_X:
914 jmp_offset = bpf2la_offset(i, off, ctx);
915 emit_insn(ctx, and, t1, dst, src);
916 emit_zext_32(ctx, t1, is32);
917 if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
918 goto toofar;
919 break;
920
921 /* PC += off if dst & imm */
922 case BPF_JMP | BPF_JSET | BPF_K:
923 case BPF_JMP32 | BPF_JSET | BPF_K:
924 jmp_offset = bpf2la_offset(i, off, ctx);
925 move_imm(ctx, t1, imm, is32);
926 emit_insn(ctx, and, t1, dst, t1);
927 emit_zext_32(ctx, t1, is32);
928 if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
929 goto toofar;
930 break;
931
932 /* PC += off */
933 case BPF_JMP | BPF_JA:
934 case BPF_JMP32 | BPF_JA:
935 if (BPF_CLASS(code) == BPF_JMP)
936 jmp_offset = bpf2la_offset(i, off, ctx);
937 else
938 jmp_offset = bpf2la_offset(i, imm, ctx);
939 if (emit_uncond_jmp(ctx, jmp_offset) < 0)
940 goto toofar;
941 break;
942
943 /* function call */
944 case BPF_JMP | BPF_CALL:
945 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
946 &func_addr, &func_addr_fixed);
947 if (ret < 0)
948 return ret;
949
950 if (insn->src_reg == BPF_PSEUDO_CALL) {
951 tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size);
952 emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off);
953 }
954
955 move_addr(ctx, t1, func_addr);
956 emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
957
958 if (insn->src_reg != BPF_PSEUDO_CALL)
959 move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
960
961 break;
962
963 /* tail call */
964 case BPF_JMP | BPF_TAIL_CALL:
965 if (emit_bpf_tail_call(ctx, i) < 0)
966 return -EINVAL;
967 break;
968
969 /* function return */
970 case BPF_JMP | BPF_EXIT:
971 if (i == ctx->prog->len - 1)
972 break;
973
974 jmp_offset = epilogue_offset(ctx);
975 if (emit_uncond_jmp(ctx, jmp_offset) < 0)
976 goto toofar;
977 break;
978
979 /* dst = imm64 */
980 case BPF_LD | BPF_IMM | BPF_DW:
981 {
982 const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
983
984 if (bpf_pseudo_func(insn))
985 move_addr(ctx, dst, imm64);
986 else
987 move_imm(ctx, dst, imm64, is32);
988 return 1;
989 }
990
991 /* dst = *(size *)(src + off) */
992 case BPF_LDX | BPF_MEM | BPF_B:
993 case BPF_LDX | BPF_MEM | BPF_H:
994 case BPF_LDX | BPF_MEM | BPF_W:
995 case BPF_LDX | BPF_MEM | BPF_DW:
996 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
997 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
998 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
999 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1000 /* dst_reg = (s64)*(signed size *)(src_reg + off) */
1001 case BPF_LDX | BPF_MEMSX | BPF_B:
1002 case BPF_LDX | BPF_MEMSX | BPF_H:
1003 case BPF_LDX | BPF_MEMSX | BPF_W:
1004 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1005 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1006 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1007 sign_extend = BPF_MODE(insn->code) == BPF_MEMSX ||
1008 BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
1009 switch (BPF_SIZE(code)) {
1010 case BPF_B:
1011 if (is_signed_imm12(off)) {
1012 if (sign_extend)
1013 emit_insn(ctx, ldb, dst, src, off);
1014 else
1015 emit_insn(ctx, ldbu, dst, src, off);
1016 } else {
1017 move_imm(ctx, t1, off, is32);
1018 if (sign_extend)
1019 emit_insn(ctx, ldxb, dst, src, t1);
1020 else
1021 emit_insn(ctx, ldxbu, dst, src, t1);
1022 }
1023 break;
1024 case BPF_H:
1025 if (is_signed_imm12(off)) {
1026 if (sign_extend)
1027 emit_insn(ctx, ldh, dst, src, off);
1028 else
1029 emit_insn(ctx, ldhu, dst, src, off);
1030 } else {
1031 move_imm(ctx, t1, off, is32);
1032 if (sign_extend)
1033 emit_insn(ctx, ldxh, dst, src, t1);
1034 else
1035 emit_insn(ctx, ldxhu, dst, src, t1);
1036 }
1037 break;
1038 case BPF_W:
1039 if (is_signed_imm12(off)) {
1040 if (sign_extend)
1041 emit_insn(ctx, ldw, dst, src, off);
1042 else
1043 emit_insn(ctx, ldwu, dst, src, off);
1044 } else {
1045 move_imm(ctx, t1, off, is32);
1046 if (sign_extend)
1047 emit_insn(ctx, ldxw, dst, src, t1);
1048 else
1049 emit_insn(ctx, ldxwu, dst, src, t1);
1050 }
1051 break;
1052 case BPF_DW:
1053 move_imm(ctx, t1, off, is32);
1054 emit_insn(ctx, ldxd, dst, src, t1);
1055 break;
1056 }
1057
1058 ret = add_exception_handler(insn, ctx, dst);
1059 if (ret)
1060 return ret;
1061 break;
1062
1063 /* *(size *)(dst + off) = imm */
1064 case BPF_ST | BPF_MEM | BPF_B:
1065 case BPF_ST | BPF_MEM | BPF_H:
1066 case BPF_ST | BPF_MEM | BPF_W:
1067 case BPF_ST | BPF_MEM | BPF_DW:
1068 switch (BPF_SIZE(code)) {
1069 case BPF_B:
1070 move_imm(ctx, t1, imm, is32);
1071 if (is_signed_imm12(off)) {
1072 emit_insn(ctx, stb, t1, dst, off);
1073 } else {
1074 move_imm(ctx, t2, off, is32);
1075 emit_insn(ctx, stxb, t1, dst, t2);
1076 }
1077 break;
1078 case BPF_H:
1079 move_imm(ctx, t1, imm, is32);
1080 if (is_signed_imm12(off)) {
1081 emit_insn(ctx, sth, t1, dst, off);
1082 } else {
1083 move_imm(ctx, t2, off, is32);
1084 emit_insn(ctx, stxh, t1, dst, t2);
1085 }
1086 break;
1087 case BPF_W:
1088 move_imm(ctx, t1, imm, is32);
1089 if (is_signed_imm12(off)) {
1090 emit_insn(ctx, stw, t1, dst, off);
1091 } else if (is_signed_imm14(off)) {
1092 emit_insn(ctx, stptrw, t1, dst, off);
1093 } else {
1094 move_imm(ctx, t2, off, is32);
1095 emit_insn(ctx, stxw, t1, dst, t2);
1096 }
1097 break;
1098 case BPF_DW:
1099 move_imm(ctx, t1, imm, is32);
1100 if (is_signed_imm12(off)) {
1101 emit_insn(ctx, std, t1, dst, off);
1102 } else if (is_signed_imm14(off)) {
1103 emit_insn(ctx, stptrd, t1, dst, off);
1104 } else {
1105 move_imm(ctx, t2, off, is32);
1106 emit_insn(ctx, stxd, t1, dst, t2);
1107 }
1108 break;
1109 }
1110 break;
1111
1112 /* *(size *)(dst + off) = src */
1113 case BPF_STX | BPF_MEM | BPF_B:
1114 case BPF_STX | BPF_MEM | BPF_H:
1115 case BPF_STX | BPF_MEM | BPF_W:
1116 case BPF_STX | BPF_MEM | BPF_DW:
1117 switch (BPF_SIZE(code)) {
1118 case BPF_B:
1119 if (is_signed_imm12(off)) {
1120 emit_insn(ctx, stb, src, dst, off);
1121 } else {
1122 move_imm(ctx, t1, off, is32);
1123 emit_insn(ctx, stxb, src, dst, t1);
1124 }
1125 break;
1126 case BPF_H:
1127 if (is_signed_imm12(off)) {
1128 emit_insn(ctx, sth, src, dst, off);
1129 } else {
1130 move_imm(ctx, t1, off, is32);
1131 emit_insn(ctx, stxh, src, dst, t1);
1132 }
1133 break;
1134 case BPF_W:
1135 if (is_signed_imm12(off)) {
1136 emit_insn(ctx, stw, src, dst, off);
1137 } else if (is_signed_imm14(off)) {
1138 emit_insn(ctx, stptrw, src, dst, off);
1139 } else {
1140 move_imm(ctx, t1, off, is32);
1141 emit_insn(ctx, stxw, src, dst, t1);
1142 }
1143 break;
1144 case BPF_DW:
1145 if (is_signed_imm12(off)) {
1146 emit_insn(ctx, std, src, dst, off);
1147 } else if (is_signed_imm14(off)) {
1148 emit_insn(ctx, stptrd, src, dst, off);
1149 } else {
1150 move_imm(ctx, t1, off, is32);
1151 emit_insn(ctx, stxd, src, dst, t1);
1152 }
1153 break;
1154 }
1155 break;
1156
1157 case BPF_STX | BPF_ATOMIC | BPF_W:
1158 case BPF_STX | BPF_ATOMIC | BPF_DW:
1159 emit_atomic(insn, ctx);
1160 break;
1161
1162 /* Speculation barrier */
1163 case BPF_ST | BPF_NOSPEC:
1164 break;
1165
1166 default:
1167 pr_err("bpf_jit: unknown opcode %02x\n", code);
1168 return -EINVAL;
1169 }
1170
1171 return 0;
1172
1173 toofar:
1174 pr_info_once("bpf_jit: opcode %02x, jump too far\n", code);
1175 return -E2BIG;
1176 }
1177
build_body(struct jit_ctx * ctx,bool extra_pass)1178 static int build_body(struct jit_ctx *ctx, bool extra_pass)
1179 {
1180 int i;
1181 const struct bpf_prog *prog = ctx->prog;
1182
1183 for (i = 0; i < prog->len; i++) {
1184 const struct bpf_insn *insn = &prog->insnsi[i];
1185 int ret;
1186
1187 if (ctx->image == NULL)
1188 ctx->offset[i] = ctx->idx;
1189
1190 ret = build_insn(insn, ctx, extra_pass);
1191 if (ret > 0) {
1192 i++;
1193 if (ctx->image == NULL)
1194 ctx->offset[i] = ctx->idx;
1195 continue;
1196 }
1197 if (ret)
1198 return ret;
1199 }
1200
1201 if (ctx->image == NULL)
1202 ctx->offset[i] = ctx->idx;
1203
1204 return 0;
1205 }
1206
1207 /* Fill space with break instructions */
jit_fill_hole(void * area,unsigned int size)1208 static void jit_fill_hole(void *area, unsigned int size)
1209 {
1210 u32 *ptr;
1211
1212 /* We are guaranteed to have aligned memory */
1213 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
1214 *ptr++ = INSN_BREAK;
1215 }
1216
validate_code(struct jit_ctx * ctx)1217 static int validate_code(struct jit_ctx *ctx)
1218 {
1219 int i;
1220 union loongarch_instruction insn;
1221
1222 for (i = 0; i < ctx->idx; i++) {
1223 insn = ctx->image[i];
1224 /* Check INSN_BREAK */
1225 if (insn.word == INSN_BREAK)
1226 return -1;
1227 }
1228
1229 return 0;
1230 }
1231
validate_ctx(struct jit_ctx * ctx)1232 static int validate_ctx(struct jit_ctx *ctx)
1233 {
1234 if (validate_code(ctx))
1235 return -1;
1236
1237 if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
1238 return -1;
1239
1240 return 0;
1241 }
1242
emit_jump_and_link(struct jit_ctx * ctx,u8 rd,u64 target)1243 static int emit_jump_and_link(struct jit_ctx *ctx, u8 rd, u64 target)
1244 {
1245 if (!target) {
1246 pr_err("bpf_jit: jump target address is error\n");
1247 return -EFAULT;
1248 }
1249
1250 move_imm(ctx, LOONGARCH_GPR_T1, target, false);
1251 emit_insn(ctx, jirl, rd, LOONGARCH_GPR_T1, 0);
1252
1253 return 0;
1254 }
1255
emit_jump_or_nops(void * target,void * ip,u32 * insns,bool is_call)1256 static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
1257 {
1258 int i;
1259 struct jit_ctx ctx;
1260
1261 ctx.idx = 0;
1262 ctx.image = (union loongarch_instruction *)insns;
1263
1264 if (!target) {
1265 for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
1266 emit_insn((&ctx), nop);
1267 return 0;
1268 }
1269
1270 return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target);
1271 }
1272
emit_call(struct jit_ctx * ctx,u64 addr)1273 static int emit_call(struct jit_ctx *ctx, u64 addr)
1274 {
1275 return emit_jump_and_link(ctx, LOONGARCH_GPR_RA, addr);
1276 }
1277
bpf_arch_text_copy(void * dst,void * src,size_t len)1278 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
1279 {
1280 int ret;
1281
1282 mutex_lock(&text_mutex);
1283 ret = larch_insn_text_copy(dst, src, len);
1284 mutex_unlock(&text_mutex);
1285
1286 return ret ? ERR_PTR(-EINVAL) : dst;
1287 }
1288
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type poke_type,void * old_addr,void * new_addr)1289 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
1290 void *old_addr, void *new_addr)
1291 {
1292 int ret;
1293 bool is_call = (poke_type == BPF_MOD_CALL);
1294 u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
1295 u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
1296
1297 if (!is_kernel_text((unsigned long)ip) &&
1298 !is_bpf_text_address((unsigned long)ip))
1299 return -ENOTSUPP;
1300
1301 ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
1302 if (ret)
1303 return ret;
1304
1305 if (memcmp(ip, old_insns, LOONGARCH_LONG_JUMP_NBYTES))
1306 return -EFAULT;
1307
1308 ret = emit_jump_or_nops(new_addr, ip, new_insns, is_call);
1309 if (ret)
1310 return ret;
1311
1312 mutex_lock(&text_mutex);
1313 if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES))
1314 ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES);
1315 mutex_unlock(&text_mutex);
1316
1317 return ret;
1318 }
1319
bpf_arch_text_invalidate(void * dst,size_t len)1320 int bpf_arch_text_invalidate(void *dst, size_t len)
1321 {
1322 int i;
1323 int ret = 0;
1324 u32 *inst;
1325
1326 inst = kvmalloc(len, GFP_KERNEL);
1327 if (!inst)
1328 return -ENOMEM;
1329
1330 for (i = 0; i < (len / sizeof(u32)); i++)
1331 inst[i] = INSN_BREAK;
1332
1333 mutex_lock(&text_mutex);
1334 if (larch_insn_text_copy(dst, inst, len))
1335 ret = -EINVAL;
1336 mutex_unlock(&text_mutex);
1337
1338 kvfree(inst);
1339
1340 return ret;
1341 }
1342
store_args(struct jit_ctx * ctx,int nargs,int args_off)1343 static void store_args(struct jit_ctx *ctx, int nargs, int args_off)
1344 {
1345 int i;
1346
1347 for (i = 0; i < nargs; i++) {
1348 emit_insn(ctx, std, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off);
1349 args_off -= 8;
1350 }
1351 }
1352
restore_args(struct jit_ctx * ctx,int nargs,int args_off)1353 static void restore_args(struct jit_ctx *ctx, int nargs, int args_off)
1354 {
1355 int i;
1356
1357 for (i = 0; i < nargs; i++) {
1358 emit_insn(ctx, ldd, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off);
1359 args_off -= 8;
1360 }
1361 }
1362
invoke_bpf_prog(struct jit_ctx * ctx,struct bpf_tramp_link * l,int args_off,int retval_off,int run_ctx_off,bool save_ret)1363 static int invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
1364 int args_off, int retval_off, int run_ctx_off, bool save_ret)
1365 {
1366 int ret;
1367 u32 *branch;
1368 struct bpf_prog *p = l->link.prog;
1369 int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
1370
1371 if (l->cookie) {
1372 move_imm(ctx, LOONGARCH_GPR_T1, l->cookie, false);
1373 emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off);
1374 } else {
1375 emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off);
1376 }
1377
1378 /* arg1: prog */
1379 move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false);
1380 /* arg2: &run_ctx */
1381 emit_insn(ctx, addid, LOONGARCH_GPR_A1, LOONGARCH_GPR_FP, -run_ctx_off);
1382 ret = emit_call(ctx, (const u64)bpf_trampoline_enter(p));
1383 if (ret)
1384 return ret;
1385
1386 /* store prog start time */
1387 move_reg(ctx, LOONGARCH_GPR_S1, LOONGARCH_GPR_A0);
1388
1389 /*
1390 * if (__bpf_prog_enter(prog) == 0)
1391 * goto skip_exec_of_prog;
1392 */
1393 branch = (u32 *)ctx->image + ctx->idx;
1394 /* nop reserved for conditional jump */
1395 emit_insn(ctx, nop);
1396
1397 /* arg1: &args_off */
1398 emit_insn(ctx, addid, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -args_off);
1399 if (!p->jited)
1400 move_imm(ctx, LOONGARCH_GPR_A1, (const s64)p->insnsi, false);
1401 ret = emit_call(ctx, (const u64)p->bpf_func);
1402 if (ret)
1403 return ret;
1404
1405 if (save_ret) {
1406 emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
1407 emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
1408 }
1409
1410 /* update branch with beqz */
1411 if (ctx->image) {
1412 int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branch;
1413 *branch = larch_insn_gen_beq(LOONGARCH_GPR_A0, LOONGARCH_GPR_ZERO, offset);
1414 }
1415
1416 /* arg1: prog */
1417 move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false);
1418 /* arg2: prog start time */
1419 move_reg(ctx, LOONGARCH_GPR_A1, LOONGARCH_GPR_S1);
1420 /* arg3: &run_ctx */
1421 emit_insn(ctx, addid, LOONGARCH_GPR_A2, LOONGARCH_GPR_FP, -run_ctx_off);
1422 ret = emit_call(ctx, (const u64)bpf_trampoline_exit(p));
1423
1424 return ret;
1425 }
1426
invoke_bpf_mod_ret(struct jit_ctx * ctx,struct bpf_tramp_links * tl,int args_off,int retval_off,int run_ctx_off,u32 ** branches)1427 static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
1428 int args_off, int retval_off, int run_ctx_off, u32 **branches)
1429 {
1430 int i;
1431
1432 emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -retval_off);
1433 for (i = 0; i < tl->nr_links; i++) {
1434 invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, run_ctx_off, true);
1435 emit_insn(ctx, ldd, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -retval_off);
1436 branches[i] = (u32 *)ctx->image + ctx->idx;
1437 emit_insn(ctx, nop);
1438 }
1439 }
1440
arch_alloc_bpf_trampoline(unsigned int size)1441 void *arch_alloc_bpf_trampoline(unsigned int size)
1442 {
1443 return bpf_prog_pack_alloc(size, jit_fill_hole);
1444 }
1445
arch_free_bpf_trampoline(void * image,unsigned int size)1446 void arch_free_bpf_trampoline(void *image, unsigned int size)
1447 {
1448 bpf_prog_pack_free(image, size);
1449 }
1450
__arch_prepare_bpf_trampoline(struct jit_ctx * ctx,struct bpf_tramp_image * im,const struct btf_func_model * m,struct bpf_tramp_links * tlinks,void * func_addr,u32 flags)1451 static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
1452 const struct btf_func_model *m, struct bpf_tramp_links *tlinks,
1453 void *func_addr, u32 flags)
1454 {
1455 int i, ret, save_ret;
1456 int stack_size = 0, nargs = 0;
1457 int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off;
1458 bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
1459 void *orig_call = func_addr;
1460 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
1461 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
1462 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
1463 u32 **branches = NULL;
1464
1465 if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
1466 return -ENOTSUPP;
1467
1468 /*
1469 * FP + 8 [ RA to parent func ] return address to parent
1470 * function
1471 * FP + 0 [ FP of parent func ] frame pointer of parent
1472 * function
1473 * FP - 8 [ T0 to traced func ] return address of traced
1474 * function
1475 * FP - 16 [ FP of traced func ] frame pointer of traced
1476 * function
1477 *
1478 * FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
1479 * BPF_TRAMP_F_RET_FENTRY_RET
1480 * [ argN ]
1481 * [ ... ]
1482 * FP - args_off [ arg1 ]
1483 *
1484 * FP - nargs_off [ regs count ]
1485 *
1486 * FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
1487 *
1488 * FP - run_ctx_off [ bpf_tramp_run_ctx ]
1489 *
1490 * FP - sreg_off [ callee saved reg ]
1491 *
1492 * FP - tcc_ptr_off [ tail_call_cnt_ptr ]
1493 */
1494
1495 if (m->nr_args > LOONGARCH_MAX_REG_ARGS)
1496 return -ENOTSUPP;
1497
1498 if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
1499 return -ENOTSUPP;
1500
1501 stack_size = 0;
1502
1503 /* Room of trampoline frame to store return address and frame pointer */
1504 stack_size += 16;
1505
1506 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
1507 if (save_ret) {
1508 /* Save BPF R0 and A0 */
1509 stack_size += 16;
1510 retval_off = stack_size;
1511 }
1512
1513 /* Room of trampoline frame to store args */
1514 nargs = m->nr_args;
1515 stack_size += nargs * 8;
1516 args_off = stack_size;
1517
1518 /* Room of trampoline frame to store args number */
1519 stack_size += 8;
1520 nargs_off = stack_size;
1521
1522 /* Room of trampoline frame to store ip address */
1523 if (flags & BPF_TRAMP_F_IP_ARG) {
1524 stack_size += 8;
1525 ip_off = stack_size;
1526 }
1527
1528 /* Room of trampoline frame to store struct bpf_tramp_run_ctx */
1529 stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
1530 run_ctx_off = stack_size;
1531
1532 stack_size += 8;
1533 sreg_off = stack_size;
1534
1535 /* Room of trampoline frame to store tail_call_cnt_ptr */
1536 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
1537 stack_size += 8;
1538 tcc_ptr_off = stack_size;
1539 }
1540
1541 stack_size = round_up(stack_size, 16);
1542
1543 if (is_struct_ops) {
1544 /*
1545 * For the trampoline called directly, just handle
1546 * the frame of trampoline.
1547 */
1548 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size);
1549 emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8);
1550 emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
1551 emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size);
1552 } else {
1553 /*
1554 * For the trampoline called from function entry,
1555 * the frame of traced function and the frame of
1556 * trampoline need to be considered.
1557 */
1558 /* RA and FP for parent function */
1559 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -16);
1560 emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8);
1561 emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
1562 emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 16);
1563
1564 /* RA and FP for traced function */
1565 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size);
1566 emit_insn(ctx, std, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8);
1567 emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
1568 emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size);
1569 }
1570
1571 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
1572 emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
1573
1574 /* callee saved register S1 to pass start time */
1575 emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
1576
1577 /* store ip address of the traced function */
1578 if (flags & BPF_TRAMP_F_IP_ARG) {
1579 move_imm(ctx, LOONGARCH_GPR_T1, (const s64)func_addr, false);
1580 emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -ip_off);
1581 }
1582
1583 /* store nargs number */
1584 move_imm(ctx, LOONGARCH_GPR_T1, nargs, false);
1585 emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -nargs_off);
1586
1587 store_args(ctx, nargs, args_off);
1588
1589 /* To traced function */
1590 /* Ftrace jump skips 2 NOP instructions */
1591 if (is_kernel_text((unsigned long)orig_call))
1592 orig_call += LOONGARCH_FENTRY_NBYTES;
1593 /* Direct jump skips 5 NOP instructions */
1594 else if (is_bpf_text_address((unsigned long)orig_call))
1595 orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
1596
1597 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1598 move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false);
1599 ret = emit_call(ctx, (const u64)__bpf_tramp_enter);
1600 if (ret)
1601 return ret;
1602 }
1603
1604 for (i = 0; i < fentry->nr_links; i++) {
1605 ret = invoke_bpf_prog(ctx, fentry->links[i], args_off, retval_off,
1606 run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET);
1607 if (ret)
1608 return ret;
1609 }
1610 if (fmod_ret->nr_links) {
1611 branches = kcalloc(fmod_ret->nr_links, sizeof(u32 *), GFP_KERNEL);
1612 if (!branches)
1613 return -ENOMEM;
1614
1615 invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off, run_ctx_off, branches);
1616 }
1617
1618 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1619 restore_args(ctx, m->nr_args, args_off);
1620
1621 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
1622 emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
1623
1624 ret = emit_call(ctx, (const u64)orig_call);
1625 if (ret)
1626 goto out;
1627 emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
1628 emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
1629 im->ip_after_call = ctx->ro_image + ctx->idx;
1630 /* Reserve space for the move_imm + jirl instruction */
1631 for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
1632 emit_insn(ctx, nop);
1633 }
1634
1635 for (i = 0; ctx->image && i < fmod_ret->nr_links; i++) {
1636 int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branches[i];
1637 *branches[i] = larch_insn_gen_bne(LOONGARCH_GPR_T1, LOONGARCH_GPR_ZERO, offset);
1638 }
1639
1640 for (i = 0; i < fexit->nr_links; i++) {
1641 ret = invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, run_ctx_off, false);
1642 if (ret)
1643 goto out;
1644 }
1645
1646 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1647 im->ip_epilogue = ctx->ro_image + ctx->idx;
1648 move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false);
1649 ret = emit_call(ctx, (const u64)__bpf_tramp_exit);
1650 if (ret)
1651 goto out;
1652 }
1653
1654 if (flags & BPF_TRAMP_F_RESTORE_REGS)
1655 restore_args(ctx, m->nr_args, args_off);
1656
1657 if (save_ret) {
1658 emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
1659 emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
1660 }
1661
1662 emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
1663
1664 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
1665 emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
1666
1667 if (is_struct_ops) {
1668 /* trampoline called directly */
1669 emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8);
1670 emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
1671 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size);
1672
1673 emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
1674 } else {
1675 /* trampoline called from function entry */
1676 emit_insn(ctx, ldd, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8);
1677 emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
1678 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size);
1679
1680 emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8);
1681 emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
1682 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, 16);
1683
1684 if (flags & BPF_TRAMP_F_SKIP_FRAME)
1685 /* return to parent function */
1686 emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
1687 else
1688 /* return to traced function */
1689 emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T0, 0);
1690 }
1691
1692 ret = ctx->idx;
1693 out:
1694 kfree(branches);
1695
1696 return ret;
1697 }
1698
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * ro_image,void * ro_image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)1699 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
1700 void *ro_image_end, const struct btf_func_model *m,
1701 u32 flags, struct bpf_tramp_links *tlinks, void *func_addr)
1702 {
1703 int ret, size;
1704 void *image, *tmp;
1705 struct jit_ctx ctx;
1706
1707 size = ro_image_end - ro_image;
1708 image = kvmalloc(size, GFP_KERNEL);
1709 if (!image)
1710 return -ENOMEM;
1711
1712 ctx.image = (union loongarch_instruction *)image;
1713 ctx.ro_image = (union loongarch_instruction *)ro_image;
1714 ctx.idx = 0;
1715
1716 jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image));
1717 ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags);
1718 if (ret > 0 && validate_code(&ctx) < 0) {
1719 ret = -EINVAL;
1720 goto out;
1721 }
1722
1723 tmp = bpf_arch_text_copy(ro_image, image, size);
1724 if (IS_ERR(tmp)) {
1725 ret = PTR_ERR(tmp);
1726 goto out;
1727 }
1728
1729 bpf_flush_icache(ro_image, ro_image_end);
1730 out:
1731 kvfree(image);
1732 return ret < 0 ? ret : size;
1733 }
1734
arch_bpf_trampoline_size(const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)1735 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
1736 struct bpf_tramp_links *tlinks, void *func_addr)
1737 {
1738 int ret;
1739 struct jit_ctx ctx;
1740 struct bpf_tramp_image im;
1741
1742 ctx.image = NULL;
1743 ctx.idx = 0;
1744
1745 ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags);
1746
1747 /* Page align */
1748 return ret < 0 ? ret : round_up(ret * LOONGARCH_INSN_SIZE, PAGE_SIZE);
1749 }
1750
bpf_int_jit_compile(struct bpf_prog * prog)1751 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1752 {
1753 bool tmp_blinded = false, extra_pass = false;
1754 u8 *image_ptr;
1755 int image_size, prog_size, extable_size;
1756 struct jit_ctx ctx;
1757 struct jit_data *jit_data;
1758 struct bpf_binary_header *header;
1759 struct bpf_prog *tmp, *orig_prog = prog;
1760
1761 /*
1762 * If BPF JIT was not enabled then we must fall back to
1763 * the interpreter.
1764 */
1765 if (!prog->jit_requested)
1766 return orig_prog;
1767
1768 tmp = bpf_jit_blind_constants(prog);
1769 /*
1770 * If blinding was requested and we failed during blinding,
1771 * we must fall back to the interpreter. Otherwise, we save
1772 * the new JITed code.
1773 */
1774 if (IS_ERR(tmp))
1775 return orig_prog;
1776
1777 if (tmp != prog) {
1778 tmp_blinded = true;
1779 prog = tmp;
1780 }
1781
1782 jit_data = prog->aux->jit_data;
1783 if (!jit_data) {
1784 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1785 if (!jit_data) {
1786 prog = orig_prog;
1787 goto out;
1788 }
1789 prog->aux->jit_data = jit_data;
1790 }
1791 if (jit_data->ctx.offset) {
1792 ctx = jit_data->ctx;
1793 image_ptr = jit_data->image;
1794 header = jit_data->header;
1795 extra_pass = true;
1796 prog_size = sizeof(u32) * ctx.idx;
1797 goto skip_init_ctx;
1798 }
1799
1800 memset(&ctx, 0, sizeof(ctx));
1801 ctx.prog = prog;
1802
1803 ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
1804 if (ctx.offset == NULL) {
1805 prog = orig_prog;
1806 goto out_offset;
1807 }
1808
1809 /* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
1810 build_prologue(&ctx);
1811 if (build_body(&ctx, extra_pass)) {
1812 prog = orig_prog;
1813 goto out_offset;
1814 }
1815 ctx.epilogue_offset = ctx.idx;
1816 build_epilogue(&ctx);
1817
1818 extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry);
1819
1820 /* Now we know the actual image size.
1821 * As each LoongArch instruction is of length 32bit,
1822 * we are translating number of JITed intructions into
1823 * the size required to store these JITed code.
1824 */
1825 prog_size = sizeof(u32) * ctx.idx;
1826 image_size = prog_size + extable_size;
1827 /* Now we know the size of the structure to make */
1828 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1829 sizeof(u32), jit_fill_hole);
1830 if (header == NULL) {
1831 prog = orig_prog;
1832 goto out_offset;
1833 }
1834
1835 /* 2. Now, the actual pass to generate final JIT code */
1836 ctx.image = (union loongarch_instruction *)image_ptr;
1837 if (extable_size)
1838 prog->aux->extable = (void *)image_ptr + prog_size;
1839
1840 skip_init_ctx:
1841 ctx.idx = 0;
1842 ctx.num_exentries = 0;
1843
1844 build_prologue(&ctx);
1845 if (build_body(&ctx, extra_pass)) {
1846 bpf_jit_binary_free(header);
1847 prog = orig_prog;
1848 goto out_offset;
1849 }
1850 build_epilogue(&ctx);
1851
1852 /* 3. Extra pass to validate JITed code */
1853 if (validate_ctx(&ctx)) {
1854 bpf_jit_binary_free(header);
1855 prog = orig_prog;
1856 goto out_offset;
1857 }
1858
1859 /* And we're done */
1860 if (bpf_jit_enable > 1)
1861 bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1862
1863 /* Update the icache */
1864 flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
1865
1866 if (!prog->is_func || extra_pass) {
1867 int err;
1868
1869 if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1870 pr_err_once("multi-func JIT bug %d != %d\n",
1871 ctx.idx, jit_data->ctx.idx);
1872 goto out_free;
1873 }
1874 err = bpf_jit_binary_lock_ro(header);
1875 if (err) {
1876 pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
1877 err);
1878 goto out_free;
1879 }
1880 } else {
1881 jit_data->ctx = ctx;
1882 jit_data->image = image_ptr;
1883 jit_data->header = header;
1884 }
1885 prog->jited = 1;
1886 prog->jited_len = prog_size;
1887 prog->bpf_func = (void *)ctx.image;
1888
1889 if (!prog->is_func || extra_pass) {
1890 int i;
1891
1892 /* offset[prog->len] is the size of program */
1893 for (i = 0; i <= prog->len; i++)
1894 ctx.offset[i] *= LOONGARCH_INSN_SIZE;
1895 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1896
1897 out_offset:
1898 kvfree(ctx.offset);
1899 kfree(jit_data);
1900 prog->aux->jit_data = NULL;
1901 }
1902
1903 out:
1904 if (tmp_blinded)
1905 bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
1906
1907
1908 return prog;
1909
1910 out_free:
1911 bpf_jit_binary_free(header);
1912 prog->bpf_func = NULL;
1913 prog->jited = 0;
1914 prog->jited_len = 0;
1915 goto out_offset;
1916 }
1917
bpf_jit_bypass_spec_v1(void)1918 bool bpf_jit_bypass_spec_v1(void)
1919 {
1920 return true;
1921 }
1922
bpf_jit_bypass_spec_v4(void)1923 bool bpf_jit_bypass_spec_v4(void)
1924 {
1925 return true;
1926 }
1927
1928 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)1929 bool bpf_jit_supports_subprog_tailcalls(void)
1930 {
1931 return true;
1932 }
1933