xref: /linux/arch/riscv/net/bpf_jit_comp64.c (revision 8f9eb8bb5c5af846a8b1729bd7778d08ca852379)
1 // SPDX-License-Identifier: GPL-2.0
2 /* BPF JIT compiler for RV64G
3  *
4  * Copyright(c) 2019 Björn Töpel <bjorn.topel@gmail.com>
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/bpf.h>
10 #include <linux/filter.h>
11 #include <linux/memory.h>
12 #include <linux/stop_machine.h>
13 #include <asm/patch.h>
14 #include <asm/cfi.h>
15 #include <asm/percpu.h>
16 #include "bpf_jit.h"
17 
18 #define RV_FENTRY_NINSNS 2
19 
20 #define RV_REG_TCC RV_REG_A6
21 #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
22 #define RV_REG_ARENA RV_REG_S7 /* For storing arena_vm_start */
23 
24 static const int regmap[] = {
25 	[BPF_REG_0] =	RV_REG_A5,
26 	[BPF_REG_1] =	RV_REG_A0,
27 	[BPF_REG_2] =	RV_REG_A1,
28 	[BPF_REG_3] =	RV_REG_A2,
29 	[BPF_REG_4] =	RV_REG_A3,
30 	[BPF_REG_5] =	RV_REG_A4,
31 	[BPF_REG_6] =	RV_REG_S1,
32 	[BPF_REG_7] =	RV_REG_S2,
33 	[BPF_REG_8] =	RV_REG_S3,
34 	[BPF_REG_9] =	RV_REG_S4,
35 	[BPF_REG_FP] =	RV_REG_S5,
36 	[BPF_REG_AX] =	RV_REG_T0,
37 };
38 
39 static const int pt_regmap[] = {
40 	[RV_REG_A0] = offsetof(struct pt_regs, a0),
41 	[RV_REG_A1] = offsetof(struct pt_regs, a1),
42 	[RV_REG_A2] = offsetof(struct pt_regs, a2),
43 	[RV_REG_A3] = offsetof(struct pt_regs, a3),
44 	[RV_REG_A4] = offsetof(struct pt_regs, a4),
45 	[RV_REG_A5] = offsetof(struct pt_regs, a5),
46 	[RV_REG_S1] = offsetof(struct pt_regs, s1),
47 	[RV_REG_S2] = offsetof(struct pt_regs, s2),
48 	[RV_REG_S3] = offsetof(struct pt_regs, s3),
49 	[RV_REG_S4] = offsetof(struct pt_regs, s4),
50 	[RV_REG_S5] = offsetof(struct pt_regs, s5),
51 	[RV_REG_T0] = offsetof(struct pt_regs, t0),
52 };
53 
54 enum {
55 	RV_CTX_F_SEEN_TAIL_CALL =	0,
56 	RV_CTX_F_SEEN_CALL =		RV_REG_RA,
57 	RV_CTX_F_SEEN_S1 =		RV_REG_S1,
58 	RV_CTX_F_SEEN_S2 =		RV_REG_S2,
59 	RV_CTX_F_SEEN_S3 =		RV_REG_S3,
60 	RV_CTX_F_SEEN_S4 =		RV_REG_S4,
61 	RV_CTX_F_SEEN_S5 =		RV_REG_S5,
62 	RV_CTX_F_SEEN_S6 =		RV_REG_S6,
63 };
64 
65 static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx)
66 {
67 	u8 reg = regmap[bpf_reg];
68 
69 	switch (reg) {
70 	case RV_CTX_F_SEEN_S1:
71 	case RV_CTX_F_SEEN_S2:
72 	case RV_CTX_F_SEEN_S3:
73 	case RV_CTX_F_SEEN_S4:
74 	case RV_CTX_F_SEEN_S5:
75 	case RV_CTX_F_SEEN_S6:
76 		__set_bit(reg, &ctx->flags);
77 	}
78 	return reg;
79 };
80 
81 static bool seen_reg(int reg, struct rv_jit_context *ctx)
82 {
83 	switch (reg) {
84 	case RV_CTX_F_SEEN_CALL:
85 	case RV_CTX_F_SEEN_S1:
86 	case RV_CTX_F_SEEN_S2:
87 	case RV_CTX_F_SEEN_S3:
88 	case RV_CTX_F_SEEN_S4:
89 	case RV_CTX_F_SEEN_S5:
90 	case RV_CTX_F_SEEN_S6:
91 		return test_bit(reg, &ctx->flags);
92 	}
93 	return false;
94 }
95 
96 static void mark_fp(struct rv_jit_context *ctx)
97 {
98 	__set_bit(RV_CTX_F_SEEN_S5, &ctx->flags);
99 }
100 
101 static void mark_call(struct rv_jit_context *ctx)
102 {
103 	__set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
104 }
105 
106 static bool seen_call(struct rv_jit_context *ctx)
107 {
108 	return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
109 }
110 
111 static void mark_tail_call(struct rv_jit_context *ctx)
112 {
113 	__set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
114 }
115 
116 static bool seen_tail_call(struct rv_jit_context *ctx)
117 {
118 	return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
119 }
120 
121 static u8 rv_tail_call_reg(struct rv_jit_context *ctx)
122 {
123 	mark_tail_call(ctx);
124 
125 	if (seen_call(ctx)) {
126 		__set_bit(RV_CTX_F_SEEN_S6, &ctx->flags);
127 		return RV_REG_S6;
128 	}
129 	return RV_REG_A6;
130 }
131 
132 static bool is_32b_int(s64 val)
133 {
134 	return -(1L << 31) <= val && val < (1L << 31);
135 }
136 
137 static bool in_auipc_jalr_range(s64 val)
138 {
139 	/*
140 	 * auipc+jalr can reach any signed PC-relative offset in the range
141 	 * [-2^31 - 2^11, 2^31 - 2^11).
142 	 */
143 	return (-(1L << 31) - (1L << 11)) <= val &&
144 		val < ((1L << 31) - (1L << 11));
145 }
146 
147 /* Modify rd pointer to alternate reg to avoid corrupting original reg */
148 static void emit_sextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx)
149 {
150 	emit_sextw(ra, *rd, ctx);
151 	*rd = ra;
152 }
153 
154 static void emit_zextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx)
155 {
156 	emit_zextw(ra, *rd, ctx);
157 	*rd = ra;
158 }
159 
160 /* Emit fixed-length instructions for address */
161 static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
162 {
163 	/*
164 	 * Use the ro_insns(RX) to calculate the offset as the BPF program will
165 	 * finally run from this memory region.
166 	 */
167 	u64 ip = (u64)(ctx->ro_insns + ctx->ninsns);
168 	s64 off = addr - ip;
169 	s64 upper = (off + (1 << 11)) >> 12;
170 	s64 lower = off & 0xfff;
171 
172 	if (extra_pass && !in_auipc_jalr_range(off)) {
173 		pr_err("bpf-jit: target offset 0x%llx is out of range\n", off);
174 		return -ERANGE;
175 	}
176 
177 	emit(rv_auipc(rd, upper), ctx);
178 	emit(rv_addi(rd, rd, lower), ctx);
179 	return 0;
180 }
181 
182 /* Emit variable-length instructions for 32-bit and 64-bit imm */
183 static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
184 {
185 	/* Note that the immediate from the add is sign-extended,
186 	 * which means that we need to compensate this by adding 2^12,
187 	 * when the 12th bit is set. A simpler way of doing this, and
188 	 * getting rid of the check, is to just add 2**11 before the
189 	 * shift. The "Loading a 32-Bit constant" example from the
190 	 * "Computer Organization and Design, RISC-V edition" book by
191 	 * Patterson/Hennessy highlights this fact.
192 	 *
193 	 * This also means that we need to process LSB to MSB.
194 	 */
195 	s64 upper = (val + (1 << 11)) >> 12;
196 	/* Sign-extend lower 12 bits to 64 bits since immediates for li, addiw,
197 	 * and addi are signed and RVC checks will perform signed comparisons.
198 	 */
199 	s64 lower = ((val & 0xfff) << 52) >> 52;
200 	int shift;
201 
202 	if (is_32b_int(val)) {
203 		if (upper)
204 			emit_lui(rd, upper, ctx);
205 
206 		if (!upper) {
207 			emit_li(rd, lower, ctx);
208 			return;
209 		}
210 
211 		emit_addiw(rd, rd, lower, ctx);
212 		return;
213 	}
214 
215 	shift = __ffs(upper);
216 	upper >>= shift;
217 	shift += 12;
218 
219 	emit_imm(rd, upper, ctx);
220 
221 	emit_slli(rd, rd, shift, ctx);
222 	if (lower)
223 		emit_addi(rd, rd, lower, ctx);
224 }
225 
226 static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
227 {
228 	int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8;
229 
230 	if (seen_reg(RV_REG_RA, ctx)) {
231 		emit_ld(RV_REG_RA, store_offset, RV_REG_SP, ctx);
232 		store_offset -= 8;
233 	}
234 	emit_ld(RV_REG_FP, store_offset, RV_REG_SP, ctx);
235 	store_offset -= 8;
236 	if (seen_reg(RV_REG_S1, ctx)) {
237 		emit_ld(RV_REG_S1, store_offset, RV_REG_SP, ctx);
238 		store_offset -= 8;
239 	}
240 	if (seen_reg(RV_REG_S2, ctx)) {
241 		emit_ld(RV_REG_S2, store_offset, RV_REG_SP, ctx);
242 		store_offset -= 8;
243 	}
244 	if (seen_reg(RV_REG_S3, ctx)) {
245 		emit_ld(RV_REG_S3, store_offset, RV_REG_SP, ctx);
246 		store_offset -= 8;
247 	}
248 	if (seen_reg(RV_REG_S4, ctx)) {
249 		emit_ld(RV_REG_S4, store_offset, RV_REG_SP, ctx);
250 		store_offset -= 8;
251 	}
252 	if (seen_reg(RV_REG_S5, ctx)) {
253 		emit_ld(RV_REG_S5, store_offset, RV_REG_SP, ctx);
254 		store_offset -= 8;
255 	}
256 	if (seen_reg(RV_REG_S6, ctx)) {
257 		emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx);
258 		store_offset -= 8;
259 	}
260 	if (ctx->arena_vm_start) {
261 		emit_ld(RV_REG_ARENA, store_offset, RV_REG_SP, ctx);
262 		store_offset -= 8;
263 	}
264 
265 	emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
266 	/* Set return value. */
267 	if (!is_tail_call)
268 		emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
269 	emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
270 		  is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */
271 		  ctx);
272 }
273 
274 static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff,
275 		     struct rv_jit_context *ctx)
276 {
277 	switch (cond) {
278 	case BPF_JEQ:
279 		emit(rv_beq(rd, rs, rvoff >> 1), ctx);
280 		return;
281 	case BPF_JGT:
282 		emit(rv_bltu(rs, rd, rvoff >> 1), ctx);
283 		return;
284 	case BPF_JLT:
285 		emit(rv_bltu(rd, rs, rvoff >> 1), ctx);
286 		return;
287 	case BPF_JGE:
288 		emit(rv_bgeu(rd, rs, rvoff >> 1), ctx);
289 		return;
290 	case BPF_JLE:
291 		emit(rv_bgeu(rs, rd, rvoff >> 1), ctx);
292 		return;
293 	case BPF_JNE:
294 		emit(rv_bne(rd, rs, rvoff >> 1), ctx);
295 		return;
296 	case BPF_JSGT:
297 		emit(rv_blt(rs, rd, rvoff >> 1), ctx);
298 		return;
299 	case BPF_JSLT:
300 		emit(rv_blt(rd, rs, rvoff >> 1), ctx);
301 		return;
302 	case BPF_JSGE:
303 		emit(rv_bge(rd, rs, rvoff >> 1), ctx);
304 		return;
305 	case BPF_JSLE:
306 		emit(rv_bge(rs, rd, rvoff >> 1), ctx);
307 	}
308 }
309 
310 static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff,
311 			struct rv_jit_context *ctx)
312 {
313 	s64 upper, lower;
314 
315 	if (is_13b_int(rvoff)) {
316 		emit_bcc(cond, rd, rs, rvoff, ctx);
317 		return;
318 	}
319 
320 	/* Adjust for jal */
321 	rvoff -= 4;
322 
323 	/* Transform, e.g.:
324 	 *   bne rd,rs,foo
325 	 * to
326 	 *   beq rd,rs,<.L1>
327 	 *   (auipc foo)
328 	 *   jal(r) foo
329 	 * .L1
330 	 */
331 	cond = invert_bpf_cond(cond);
332 	if (is_21b_int(rvoff)) {
333 		emit_bcc(cond, rd, rs, 8, ctx);
334 		emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx);
335 		return;
336 	}
337 
338 	/* 32b No need for an additional rvoff adjustment, since we
339 	 * get that from the auipc at PC', where PC = PC' + 4.
340 	 */
341 	upper = (rvoff + (1 << 11)) >> 12;
342 	lower = rvoff & 0xfff;
343 
344 	emit_bcc(cond, rd, rs, 12, ctx);
345 	emit(rv_auipc(RV_REG_T1, upper), ctx);
346 	emit(rv_jalr(RV_REG_ZERO, RV_REG_T1, lower), ctx);
347 }
348 
349 static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
350 {
351 	int tc_ninsn, off, start_insn = ctx->ninsns;
352 	u8 tcc = rv_tail_call_reg(ctx);
353 
354 	/* a0: &ctx
355 	 * a1: &array
356 	 * a2: index
357 	 *
358 	 * if (index >= array->map.max_entries)
359 	 *	goto out;
360 	 */
361 	tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
362 		   ctx->offset[0];
363 	emit_zextw(RV_REG_A2, RV_REG_A2, ctx);
364 
365 	off = offsetof(struct bpf_array, map.max_entries);
366 	if (is_12b_check(off, insn))
367 		return -1;
368 	emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx);
369 	off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
370 	emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx);
371 
372 	/* if (--TCC < 0)
373 	 *     goto out;
374 	 */
375 	emit_addi(RV_REG_TCC, tcc, -1, ctx);
376 	off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
377 	emit_branch(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx);
378 
379 	/* prog = array->ptrs[index];
380 	 * if (!prog)
381 	 *     goto out;
382 	 */
383 	emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx);
384 	emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx);
385 	off = offsetof(struct bpf_array, ptrs);
386 	if (is_12b_check(off, insn))
387 		return -1;
388 	emit_ld(RV_REG_T2, off, RV_REG_T2, ctx);
389 	off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
390 	emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx);
391 
392 	/* goto *(prog->bpf_func + 4); */
393 	off = offsetof(struct bpf_prog, bpf_func);
394 	if (is_12b_check(off, insn))
395 		return -1;
396 	emit_ld(RV_REG_T3, off, RV_REG_T2, ctx);
397 	__build_epilogue(true, ctx);
398 	return 0;
399 }
400 
401 static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
402 		      struct rv_jit_context *ctx)
403 {
404 	u8 code = insn->code;
405 
406 	switch (code) {
407 	case BPF_JMP | BPF_JA:
408 	case BPF_JMP | BPF_CALL:
409 	case BPF_JMP | BPF_EXIT:
410 	case BPF_JMP | BPF_TAIL_CALL:
411 		break;
412 	default:
413 		*rd = bpf_to_rv_reg(insn->dst_reg, ctx);
414 	}
415 
416 	if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) ||
417 	    code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) ||
418 	    code & BPF_LDX || code & BPF_STX)
419 		*rs = bpf_to_rv_reg(insn->src_reg, ctx);
420 }
421 
422 static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
423 			      struct rv_jit_context *ctx)
424 {
425 	s64 upper, lower;
426 
427 	if (rvoff && fixed_addr && is_21b_int(rvoff)) {
428 		emit(rv_jal(rd, rvoff >> 1), ctx);
429 		return 0;
430 	} else if (in_auipc_jalr_range(rvoff)) {
431 		upper = (rvoff + (1 << 11)) >> 12;
432 		lower = rvoff & 0xfff;
433 		emit(rv_auipc(RV_REG_T1, upper), ctx);
434 		emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
435 		return 0;
436 	}
437 
438 	pr_err("bpf-jit: target offset 0x%llx is out of range\n", rvoff);
439 	return -ERANGE;
440 }
441 
442 static bool is_signed_bpf_cond(u8 cond)
443 {
444 	return cond == BPF_JSGT || cond == BPF_JSLT ||
445 		cond == BPF_JSGE || cond == BPF_JSLE;
446 }
447 
448 static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
449 {
450 	s64 off = 0;
451 	u64 ip;
452 
453 	if (addr && ctx->insns && ctx->ro_insns) {
454 		/*
455 		 * Use the ro_insns(RX) to calculate the offset as the BPF
456 		 * program will finally run from this memory region.
457 		 */
458 		ip = (u64)(long)(ctx->ro_insns + ctx->ninsns);
459 		off = addr - ip;
460 	}
461 
462 	return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
463 }
464 
465 static inline void emit_kcfi(u32 hash, struct rv_jit_context *ctx)
466 {
467 	if (IS_ENABLED(CONFIG_CFI_CLANG))
468 		emit(hash, ctx);
469 }
470 
471 static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
472 			struct rv_jit_context *ctx)
473 {
474 	u8 r0;
475 	int jmp_offset;
476 
477 	if (off) {
478 		if (is_12b_int(off)) {
479 			emit_addi(RV_REG_T1, rd, off, ctx);
480 		} else {
481 			emit_imm(RV_REG_T1, off, ctx);
482 			emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
483 		}
484 		rd = RV_REG_T1;
485 	}
486 
487 	switch (imm) {
488 	/* lock *(u32/u64 *)(dst_reg + off16) <op>= src_reg */
489 	case BPF_ADD:
490 		emit(is64 ? rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0) :
491 		     rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
492 		break;
493 	case BPF_AND:
494 		emit(is64 ? rv_amoand_d(RV_REG_ZERO, rs, rd, 0, 0) :
495 		     rv_amoand_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
496 		break;
497 	case BPF_OR:
498 		emit(is64 ? rv_amoor_d(RV_REG_ZERO, rs, rd, 0, 0) :
499 		     rv_amoor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
500 		break;
501 	case BPF_XOR:
502 		emit(is64 ? rv_amoxor_d(RV_REG_ZERO, rs, rd, 0, 0) :
503 		     rv_amoxor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
504 		break;
505 	/* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
506 	case BPF_ADD | BPF_FETCH:
507 		emit(is64 ? rv_amoadd_d(rs, rs, rd, 1, 1) :
508 		     rv_amoadd_w(rs, rs, rd, 1, 1), ctx);
509 		if (!is64)
510 			emit_zextw(rs, rs, ctx);
511 		break;
512 	case BPF_AND | BPF_FETCH:
513 		emit(is64 ? rv_amoand_d(rs, rs, rd, 1, 1) :
514 		     rv_amoand_w(rs, rs, rd, 1, 1), ctx);
515 		if (!is64)
516 			emit_zextw(rs, rs, ctx);
517 		break;
518 	case BPF_OR | BPF_FETCH:
519 		emit(is64 ? rv_amoor_d(rs, rs, rd, 1, 1) :
520 		     rv_amoor_w(rs, rs, rd, 1, 1), ctx);
521 		if (!is64)
522 			emit_zextw(rs, rs, ctx);
523 		break;
524 	case BPF_XOR | BPF_FETCH:
525 		emit(is64 ? rv_amoxor_d(rs, rs, rd, 1, 1) :
526 		     rv_amoxor_w(rs, rs, rd, 1, 1), ctx);
527 		if (!is64)
528 			emit_zextw(rs, rs, ctx);
529 		break;
530 	/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
531 	case BPF_XCHG:
532 		emit(is64 ? rv_amoswap_d(rs, rs, rd, 1, 1) :
533 		     rv_amoswap_w(rs, rs, rd, 1, 1), ctx);
534 		if (!is64)
535 			emit_zextw(rs, rs, ctx);
536 		break;
537 	/* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */
538 	case BPF_CMPXCHG:
539 		r0 = bpf_to_rv_reg(BPF_REG_0, ctx);
540 		if (is64)
541 			emit_mv(RV_REG_T2, r0, ctx);
542 		else
543 			emit_addiw(RV_REG_T2, r0, 0, ctx);
544 		emit(is64 ? rv_lr_d(r0, 0, rd, 0, 0) :
545 		     rv_lr_w(r0, 0, rd, 0, 0), ctx);
546 		jmp_offset = ninsns_rvoff(8);
547 		emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx);
548 		emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) :
549 		     rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx);
550 		jmp_offset = ninsns_rvoff(-6);
551 		emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx);
552 		emit(rv_fence(0x3, 0x3), ctx);
553 		break;
554 	}
555 }
556 
557 #define BPF_FIXUP_OFFSET_MASK   GENMASK(26, 0)
558 #define BPF_FIXUP_REG_MASK      GENMASK(31, 27)
559 #define REG_DONT_CLEAR_MARKER	0	/* RV_REG_ZERO unused in pt_regmap */
560 
561 bool ex_handler_bpf(const struct exception_table_entry *ex,
562 		    struct pt_regs *regs)
563 {
564 	off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
565 	int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
566 
567 	if (regs_offset != REG_DONT_CLEAR_MARKER)
568 		*(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
569 	regs->epc = (unsigned long)&ex->fixup - offset;
570 
571 	return true;
572 }
573 
574 /* For accesses to BTF pointers, add an entry to the exception table */
575 static int add_exception_handler(const struct bpf_insn *insn,
576 				 struct rv_jit_context *ctx,
577 				 int dst_reg, int insn_len)
578 {
579 	struct exception_table_entry *ex;
580 	unsigned long pc;
581 	off_t ins_offset;
582 	off_t fixup_offset;
583 
584 	if (!ctx->insns || !ctx->ro_insns || !ctx->prog->aux->extable ||
585 	    (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
586 	     BPF_MODE(insn->code) != BPF_PROBE_MEM32))
587 		return 0;
588 
589 	if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries))
590 		return -EINVAL;
591 
592 	if (WARN_ON_ONCE(insn_len > ctx->ninsns))
593 		return -EINVAL;
594 
595 	if (WARN_ON_ONCE(!rvc_enabled() && insn_len == 1))
596 		return -EINVAL;
597 
598 	ex = &ctx->prog->aux->extable[ctx->nexentries];
599 	pc = (unsigned long)&ctx->ro_insns[ctx->ninsns - insn_len];
600 
601 	/*
602 	 * This is the relative offset of the instruction that may fault from
603 	 * the exception table itself. This will be written to the exception
604 	 * table and if this instruction faults, the destination register will
605 	 * be set to '0' and the execution will jump to the next instruction.
606 	 */
607 	ins_offset = pc - (long)&ex->insn;
608 	if (WARN_ON_ONCE(ins_offset >= 0 || ins_offset < INT_MIN))
609 		return -ERANGE;
610 
611 	/*
612 	 * Since the extable follows the program, the fixup offset is always
613 	 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
614 	 * to keep things simple, and put the destination register in the upper
615 	 * bits. We don't need to worry about buildtime or runtime sort
616 	 * modifying the upper bits because the table is already sorted, and
617 	 * isn't part of the main exception table.
618 	 *
619 	 * The fixup_offset is set to the next instruction from the instruction
620 	 * that may fault. The execution will jump to this after handling the
621 	 * fault.
622 	 */
623 	fixup_offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16));
624 	if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset))
625 		return -ERANGE;
626 
627 	/*
628 	 * The offsets above have been calculated using the RO buffer but we
629 	 * need to use the R/W buffer for writes.
630 	 * switch ex to rw buffer for writing.
631 	 */
632 	ex = (void *)ctx->insns + ((void *)ex - (void *)ctx->ro_insns);
633 
634 	ex->insn = ins_offset;
635 
636 	ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
637 		FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
638 	ex->type = EX_TYPE_BPF;
639 
640 	ctx->nexentries++;
641 	return 0;
642 }
643 
644 static int gen_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
645 {
646 	s64 rvoff;
647 	struct rv_jit_context ctx;
648 
649 	ctx.ninsns = 0;
650 	ctx.insns = (u16 *)insns;
651 
652 	if (!target) {
653 		emit(rv_nop(), &ctx);
654 		emit(rv_nop(), &ctx);
655 		return 0;
656 	}
657 
658 	rvoff = (s64)(target - ip);
659 	return emit_jump_and_link(is_call ? RV_REG_T0 : RV_REG_ZERO, rvoff, false, &ctx);
660 }
661 
662 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
663 		       void *old_addr, void *new_addr)
664 {
665 	u32 old_insns[RV_FENTRY_NINSNS], new_insns[RV_FENTRY_NINSNS];
666 	bool is_call = poke_type == BPF_MOD_CALL;
667 	int ret;
668 
669 	if (!is_kernel_text((unsigned long)ip) &&
670 	    !is_bpf_text_address((unsigned long)ip))
671 		return -ENOTSUPP;
672 
673 	ret = gen_jump_or_nops(old_addr, ip, old_insns, is_call);
674 	if (ret)
675 		return ret;
676 
677 	if (memcmp(ip, old_insns, RV_FENTRY_NINSNS * 4))
678 		return -EFAULT;
679 
680 	ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call);
681 	if (ret)
682 		return ret;
683 
684 	cpus_read_lock();
685 	mutex_lock(&text_mutex);
686 	if (memcmp(ip, new_insns, RV_FENTRY_NINSNS * 4))
687 		ret = patch_text(ip, new_insns, RV_FENTRY_NINSNS);
688 	mutex_unlock(&text_mutex);
689 	cpus_read_unlock();
690 
691 	return ret;
692 }
693 
694 static void store_args(int nregs, int args_off, struct rv_jit_context *ctx)
695 {
696 	int i;
697 
698 	for (i = 0; i < nregs; i++) {
699 		emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx);
700 		args_off -= 8;
701 	}
702 }
703 
704 static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx)
705 {
706 	int i;
707 
708 	for (i = 0; i < nregs; i++) {
709 		emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx);
710 		args_off -= 8;
711 	}
712 }
713 
714 static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off,
715 			   int run_ctx_off, bool save_ret, struct rv_jit_context *ctx)
716 {
717 	int ret, branch_off;
718 	struct bpf_prog *p = l->link.prog;
719 	int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
720 
721 	if (l->cookie) {
722 		emit_imm(RV_REG_T1, l->cookie, ctx);
723 		emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_T1, ctx);
724 	} else {
725 		emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_ZERO, ctx);
726 	}
727 
728 	/* arg1: prog */
729 	emit_imm(RV_REG_A0, (const s64)p, ctx);
730 	/* arg2: &run_ctx */
731 	emit_addi(RV_REG_A1, RV_REG_FP, -run_ctx_off, ctx);
732 	ret = emit_call((const u64)bpf_trampoline_enter(p), true, ctx);
733 	if (ret)
734 		return ret;
735 
736 	/* store prog start time */
737 	emit_mv(RV_REG_S1, RV_REG_A0, ctx);
738 
739 	/* if (__bpf_prog_enter(prog) == 0)
740 	 *	goto skip_exec_of_prog;
741 	 */
742 	branch_off = ctx->ninsns;
743 	/* nop reserved for conditional jump */
744 	emit(rv_nop(), ctx);
745 
746 	/* arg1: &args_off */
747 	emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx);
748 	if (!p->jited)
749 		/* arg2: progs[i]->insnsi for interpreter */
750 		emit_imm(RV_REG_A1, (const s64)p->insnsi, ctx);
751 	ret = emit_call((const u64)p->bpf_func, true, ctx);
752 	if (ret)
753 		return ret;
754 
755 	if (save_ret) {
756 		emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
757 		emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
758 	}
759 
760 	/* update branch with beqz */
761 	if (ctx->insns) {
762 		int offset = ninsns_rvoff(ctx->ninsns - branch_off);
763 		u32 insn = rv_beq(RV_REG_A0, RV_REG_ZERO, offset >> 1);
764 		*(u32 *)(ctx->insns + branch_off) = insn;
765 	}
766 
767 	/* arg1: prog */
768 	emit_imm(RV_REG_A0, (const s64)p, ctx);
769 	/* arg2: prog start time */
770 	emit_mv(RV_REG_A1, RV_REG_S1, ctx);
771 	/* arg3: &run_ctx */
772 	emit_addi(RV_REG_A2, RV_REG_FP, -run_ctx_off, ctx);
773 	ret = emit_call((const u64)bpf_trampoline_exit(p), true, ctx);
774 
775 	return ret;
776 }
777 
778 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
779 					 const struct btf_func_model *m,
780 					 struct bpf_tramp_links *tlinks,
781 					 void *func_addr, u32 flags,
782 					 struct rv_jit_context *ctx)
783 {
784 	int i, ret, offset;
785 	int *branches_off = NULL;
786 	int stack_size = 0, nregs = m->nr_args;
787 	int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off;
788 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
789 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
790 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
791 	bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
792 	void *orig_call = func_addr;
793 	bool save_ret;
794 	u32 insn;
795 
796 	/* Two types of generated trampoline stack layout:
797 	 *
798 	 * 1. trampoline called from function entry
799 	 * --------------------------------------
800 	 * FP + 8	    [ RA to parent func	] return address to parent
801 	 *					  function
802 	 * FP + 0	    [ FP of parent func ] frame pointer of parent
803 	 *					  function
804 	 * FP - 8           [ T0 to traced func ] return address of traced
805 	 *					  function
806 	 * FP - 16	    [ FP of traced func ] frame pointer of traced
807 	 *					  function
808 	 * --------------------------------------
809 	 *
810 	 * 2. trampoline called directly
811 	 * --------------------------------------
812 	 * FP - 8	    [ RA to caller func ] return address to caller
813 	 *					  function
814 	 * FP - 16	    [ FP of caller func	] frame pointer of caller
815 	 *					  function
816 	 * --------------------------------------
817 	 *
818 	 * FP - retval_off  [ return value      ] BPF_TRAMP_F_CALL_ORIG or
819 	 *					  BPF_TRAMP_F_RET_FENTRY_RET
820 	 *                  [ argN              ]
821 	 *                  [ ...               ]
822 	 * FP - args_off    [ arg1              ]
823 	 *
824 	 * FP - nregs_off   [ regs count        ]
825 	 *
826 	 * FP - ip_off      [ traced func	] BPF_TRAMP_F_IP_ARG
827 	 *
828 	 * FP - run_ctx_off [ bpf_tramp_run_ctx ]
829 	 *
830 	 * FP - sreg_off    [ callee saved reg	]
831 	 *
832 	 *		    [ pads              ] pads for 16 bytes alignment
833 	 */
834 
835 	if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
836 		return -ENOTSUPP;
837 
838 	/* extra regiters for struct arguments */
839 	for (i = 0; i < m->nr_args; i++)
840 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
841 			nregs += round_up(m->arg_size[i], 8) / 8 - 1;
842 
843 	/* 8 arguments passed by registers */
844 	if (nregs > 8)
845 		return -ENOTSUPP;
846 
847 	/* room of trampoline frame to store return address and frame pointer */
848 	stack_size += 16;
849 
850 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
851 	if (save_ret) {
852 		stack_size += 16; /* Save both A5 (BPF R0) and A0 */
853 		retval_off = stack_size;
854 	}
855 
856 	stack_size += nregs * 8;
857 	args_off = stack_size;
858 
859 	stack_size += 8;
860 	nregs_off = stack_size;
861 
862 	if (flags & BPF_TRAMP_F_IP_ARG) {
863 		stack_size += 8;
864 		ip_off = stack_size;
865 	}
866 
867 	stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
868 	run_ctx_off = stack_size;
869 
870 	stack_size += 8;
871 	sreg_off = stack_size;
872 
873 	stack_size = round_up(stack_size, STACK_ALIGN);
874 
875 	if (!is_struct_ops) {
876 		/* For the trampoline called from function entry,
877 		 * the frame of traced function and the frame of
878 		 * trampoline need to be considered.
879 		 */
880 		emit_addi(RV_REG_SP, RV_REG_SP, -16, ctx);
881 		emit_sd(RV_REG_SP, 8, RV_REG_RA, ctx);
882 		emit_sd(RV_REG_SP, 0, RV_REG_FP, ctx);
883 		emit_addi(RV_REG_FP, RV_REG_SP, 16, ctx);
884 
885 		emit_addi(RV_REG_SP, RV_REG_SP, -stack_size, ctx);
886 		emit_sd(RV_REG_SP, stack_size - 8, RV_REG_T0, ctx);
887 		emit_sd(RV_REG_SP, stack_size - 16, RV_REG_FP, ctx);
888 		emit_addi(RV_REG_FP, RV_REG_SP, stack_size, ctx);
889 	} else {
890 		/* emit kcfi hash */
891 		emit_kcfi(cfi_get_func_hash(func_addr), ctx);
892 		/* For the trampoline called directly, just handle
893 		 * the frame of trampoline.
894 		 */
895 		emit_addi(RV_REG_SP, RV_REG_SP, -stack_size, ctx);
896 		emit_sd(RV_REG_SP, stack_size - 8, RV_REG_RA, ctx);
897 		emit_sd(RV_REG_SP, stack_size - 16, RV_REG_FP, ctx);
898 		emit_addi(RV_REG_FP, RV_REG_SP, stack_size, ctx);
899 	}
900 
901 	/* callee saved register S1 to pass start time */
902 	emit_sd(RV_REG_FP, -sreg_off, RV_REG_S1, ctx);
903 
904 	/* store ip address of the traced function */
905 	if (flags & BPF_TRAMP_F_IP_ARG) {
906 		emit_imm(RV_REG_T1, (const s64)func_addr, ctx);
907 		emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx);
908 	}
909 
910 	emit_li(RV_REG_T1, nregs, ctx);
911 	emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx);
912 
913 	store_args(nregs, args_off, ctx);
914 
915 	/* skip to actual body of traced function */
916 	if (flags & BPF_TRAMP_F_SKIP_FRAME)
917 		orig_call += RV_FENTRY_NINSNS * 4;
918 
919 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
920 		emit_imm(RV_REG_A0, (const s64)im, ctx);
921 		ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
922 		if (ret)
923 			return ret;
924 	}
925 
926 	for (i = 0; i < fentry->nr_links; i++) {
927 		ret = invoke_bpf_prog(fentry->links[i], args_off, retval_off, run_ctx_off,
928 				      flags & BPF_TRAMP_F_RET_FENTRY_RET, ctx);
929 		if (ret)
930 			return ret;
931 	}
932 
933 	if (fmod_ret->nr_links) {
934 		branches_off = kcalloc(fmod_ret->nr_links, sizeof(int), GFP_KERNEL);
935 		if (!branches_off)
936 			return -ENOMEM;
937 
938 		/* cleanup to avoid garbage return value confusion */
939 		emit_sd(RV_REG_FP, -retval_off, RV_REG_ZERO, ctx);
940 		for (i = 0; i < fmod_ret->nr_links; i++) {
941 			ret = invoke_bpf_prog(fmod_ret->links[i], args_off, retval_off,
942 					      run_ctx_off, true, ctx);
943 			if (ret)
944 				goto out;
945 			emit_ld(RV_REG_T1, -retval_off, RV_REG_FP, ctx);
946 			branches_off[i] = ctx->ninsns;
947 			/* nop reserved for conditional jump */
948 			emit(rv_nop(), ctx);
949 		}
950 	}
951 
952 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
953 		restore_args(nregs, args_off, ctx);
954 		ret = emit_call((const u64)orig_call, true, ctx);
955 		if (ret)
956 			goto out;
957 		emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
958 		emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
959 		im->ip_after_call = ctx->insns + ctx->ninsns;
960 		/* 2 nops reserved for auipc+jalr pair */
961 		emit(rv_nop(), ctx);
962 		emit(rv_nop(), ctx);
963 	}
964 
965 	/* update branches saved in invoke_bpf_mod_ret with bnez */
966 	for (i = 0; ctx->insns && i < fmod_ret->nr_links; i++) {
967 		offset = ninsns_rvoff(ctx->ninsns - branches_off[i]);
968 		insn = rv_bne(RV_REG_T1, RV_REG_ZERO, offset >> 1);
969 		*(u32 *)(ctx->insns + branches_off[i]) = insn;
970 	}
971 
972 	for (i = 0; i < fexit->nr_links; i++) {
973 		ret = invoke_bpf_prog(fexit->links[i], args_off, retval_off,
974 				      run_ctx_off, false, ctx);
975 		if (ret)
976 			goto out;
977 	}
978 
979 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
980 		im->ip_epilogue = ctx->insns + ctx->ninsns;
981 		emit_imm(RV_REG_A0, (const s64)im, ctx);
982 		ret = emit_call((const u64)__bpf_tramp_exit, true, ctx);
983 		if (ret)
984 			goto out;
985 	}
986 
987 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
988 		restore_args(nregs, args_off, ctx);
989 
990 	if (save_ret) {
991 		emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
992 		emit_ld(regmap[BPF_REG_0], -(retval_off - 8), RV_REG_FP, ctx);
993 	}
994 
995 	emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
996 
997 	if (!is_struct_ops) {
998 		/* trampoline called from function entry */
999 		emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx);
1000 		emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);
1001 		emit_addi(RV_REG_SP, RV_REG_SP, stack_size, ctx);
1002 
1003 		emit_ld(RV_REG_RA, 8, RV_REG_SP, ctx);
1004 		emit_ld(RV_REG_FP, 0, RV_REG_SP, ctx);
1005 		emit_addi(RV_REG_SP, RV_REG_SP, 16, ctx);
1006 
1007 		if (flags & BPF_TRAMP_F_SKIP_FRAME)
1008 			/* return to parent function */
1009 			emit_jalr(RV_REG_ZERO, RV_REG_RA, 0, ctx);
1010 		else
1011 			/* return to traced function */
1012 			emit_jalr(RV_REG_ZERO, RV_REG_T0, 0, ctx);
1013 	} else {
1014 		/* trampoline called directly */
1015 		emit_ld(RV_REG_RA, stack_size - 8, RV_REG_SP, ctx);
1016 		emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);
1017 		emit_addi(RV_REG_SP, RV_REG_SP, stack_size, ctx);
1018 
1019 		emit_jalr(RV_REG_ZERO, RV_REG_RA, 0, ctx);
1020 	}
1021 
1022 	ret = ctx->ninsns;
1023 out:
1024 	kfree(branches_off);
1025 	return ret;
1026 }
1027 
1028 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
1029 			     struct bpf_tramp_links *tlinks, void *func_addr)
1030 {
1031 	struct bpf_tramp_image im;
1032 	struct rv_jit_context ctx;
1033 	int ret;
1034 
1035 	ctx.ninsns = 0;
1036 	ctx.insns = NULL;
1037 	ctx.ro_insns = NULL;
1038 	ret = __arch_prepare_bpf_trampoline(&im, m, tlinks, func_addr, flags, &ctx);
1039 
1040 	return ret < 0 ? ret : ninsns_rvoff(ctx.ninsns);
1041 }
1042 
1043 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
1044 				void *image_end, const struct btf_func_model *m,
1045 				u32 flags, struct bpf_tramp_links *tlinks,
1046 				void *func_addr)
1047 {
1048 	int ret;
1049 	struct rv_jit_context ctx;
1050 
1051 	ctx.ninsns = 0;
1052 	/*
1053 	 * The bpf_int_jit_compile() uses a RW buffer (ctx.insns) to write the
1054 	 * JITed instructions and later copies it to a RX region (ctx.ro_insns).
1055 	 * It also uses ctx.ro_insns to calculate offsets for jumps etc. As the
1056 	 * trampoline image uses the same memory area for writing and execution,
1057 	 * both ctx.insns and ctx.ro_insns can be set to image.
1058 	 */
1059 	ctx.insns = image;
1060 	ctx.ro_insns = image;
1061 	ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
1062 	if (ret < 0)
1063 		return ret;
1064 
1065 	bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns);
1066 
1067 	return ninsns_rvoff(ret);
1068 }
1069 
1070 int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
1071 		      bool extra_pass)
1072 {
1073 	bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
1074 		    BPF_CLASS(insn->code) == BPF_JMP;
1075 	int s, e, rvoff, ret, i = insn - ctx->prog->insnsi;
1076 	struct bpf_prog_aux *aux = ctx->prog->aux;
1077 	u8 rd = -1, rs = -1, code = insn->code;
1078 	s16 off = insn->off;
1079 	s32 imm = insn->imm;
1080 
1081 	init_regs(&rd, &rs, insn, ctx);
1082 
1083 	switch (code) {
1084 	/* dst = src */
1085 	case BPF_ALU | BPF_MOV | BPF_X:
1086 	case BPF_ALU64 | BPF_MOV | BPF_X:
1087 		if (insn_is_cast_user(insn)) {
1088 			emit_mv(RV_REG_T1, rs, ctx);
1089 			emit_zextw(RV_REG_T1, RV_REG_T1, ctx);
1090 			emit_imm(rd, (ctx->user_vm_start >> 32) << 32, ctx);
1091 			emit(rv_beq(RV_REG_T1, RV_REG_ZERO, 4), ctx);
1092 			emit_or(RV_REG_T1, rd, RV_REG_T1, ctx);
1093 			emit_mv(rd, RV_REG_T1, ctx);
1094 			break;
1095 		} else if (insn_is_mov_percpu_addr(insn)) {
1096 			if (rd != rs)
1097 				emit_mv(rd, rs, ctx);
1098 #ifdef CONFIG_SMP
1099 			/* Load current CPU number in T1 */
1100 			emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu),
1101 				RV_REG_TP, ctx);
1102 			/* << 3 because offsets are 8 bytes */
1103 			emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx);
1104 			/* Load address of __per_cpu_offset array in T2 */
1105 			emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx);
1106 			/* Add offset of current CPU to  __per_cpu_offset */
1107 			emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx);
1108 			/* Load __per_cpu_offset[cpu] in T1 */
1109 			emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx);
1110 			/* Add the offset to Rd */
1111 			emit_add(rd, rd, RV_REG_T1, ctx);
1112 #endif
1113 		}
1114 		if (imm == 1) {
1115 			/* Special mov32 for zext */
1116 			emit_zextw(rd, rd, ctx);
1117 			break;
1118 		}
1119 		switch (insn->off) {
1120 		case 0:
1121 			emit_mv(rd, rs, ctx);
1122 			break;
1123 		case 8:
1124 			emit_sextb(rd, rs, ctx);
1125 			break;
1126 		case 16:
1127 			emit_sexth(rd, rs, ctx);
1128 			break;
1129 		case 32:
1130 			emit_sextw(rd, rs, ctx);
1131 			break;
1132 		}
1133 		if (!is64 && !aux->verifier_zext)
1134 			emit_zextw(rd, rd, ctx);
1135 		break;
1136 
1137 	/* dst = dst OP src */
1138 	case BPF_ALU | BPF_ADD | BPF_X:
1139 	case BPF_ALU64 | BPF_ADD | BPF_X:
1140 		emit_add(rd, rd, rs, ctx);
1141 		if (!is64 && !aux->verifier_zext)
1142 			emit_zextw(rd, rd, ctx);
1143 		break;
1144 	case BPF_ALU | BPF_SUB | BPF_X:
1145 	case BPF_ALU64 | BPF_SUB | BPF_X:
1146 		if (is64)
1147 			emit_sub(rd, rd, rs, ctx);
1148 		else
1149 			emit_subw(rd, rd, rs, ctx);
1150 
1151 		if (!is64 && !aux->verifier_zext)
1152 			emit_zextw(rd, rd, ctx);
1153 		break;
1154 	case BPF_ALU | BPF_AND | BPF_X:
1155 	case BPF_ALU64 | BPF_AND | BPF_X:
1156 		emit_and(rd, rd, rs, ctx);
1157 		if (!is64 && !aux->verifier_zext)
1158 			emit_zextw(rd, rd, ctx);
1159 		break;
1160 	case BPF_ALU | BPF_OR | BPF_X:
1161 	case BPF_ALU64 | BPF_OR | BPF_X:
1162 		emit_or(rd, rd, rs, ctx);
1163 		if (!is64 && !aux->verifier_zext)
1164 			emit_zextw(rd, rd, ctx);
1165 		break;
1166 	case BPF_ALU | BPF_XOR | BPF_X:
1167 	case BPF_ALU64 | BPF_XOR | BPF_X:
1168 		emit_xor(rd, rd, rs, ctx);
1169 		if (!is64 && !aux->verifier_zext)
1170 			emit_zextw(rd, rd, ctx);
1171 		break;
1172 	case BPF_ALU | BPF_MUL | BPF_X:
1173 	case BPF_ALU64 | BPF_MUL | BPF_X:
1174 		emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx);
1175 		if (!is64 && !aux->verifier_zext)
1176 			emit_zextw(rd, rd, ctx);
1177 		break;
1178 	case BPF_ALU | BPF_DIV | BPF_X:
1179 	case BPF_ALU64 | BPF_DIV | BPF_X:
1180 		if (off)
1181 			emit(is64 ? rv_div(rd, rd, rs) : rv_divw(rd, rd, rs), ctx);
1182 		else
1183 			emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
1184 		if (!is64 && !aux->verifier_zext)
1185 			emit_zextw(rd, rd, ctx);
1186 		break;
1187 	case BPF_ALU | BPF_MOD | BPF_X:
1188 	case BPF_ALU64 | BPF_MOD | BPF_X:
1189 		if (off)
1190 			emit(is64 ? rv_rem(rd, rd, rs) : rv_remw(rd, rd, rs), ctx);
1191 		else
1192 			emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
1193 		if (!is64 && !aux->verifier_zext)
1194 			emit_zextw(rd, rd, ctx);
1195 		break;
1196 	case BPF_ALU | BPF_LSH | BPF_X:
1197 	case BPF_ALU64 | BPF_LSH | BPF_X:
1198 		emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
1199 		if (!is64 && !aux->verifier_zext)
1200 			emit_zextw(rd, rd, ctx);
1201 		break;
1202 	case BPF_ALU | BPF_RSH | BPF_X:
1203 	case BPF_ALU64 | BPF_RSH | BPF_X:
1204 		emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
1205 		if (!is64 && !aux->verifier_zext)
1206 			emit_zextw(rd, rd, ctx);
1207 		break;
1208 	case BPF_ALU | BPF_ARSH | BPF_X:
1209 	case BPF_ALU64 | BPF_ARSH | BPF_X:
1210 		emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
1211 		if (!is64 && !aux->verifier_zext)
1212 			emit_zextw(rd, rd, ctx);
1213 		break;
1214 
1215 	/* dst = -dst */
1216 	case BPF_ALU | BPF_NEG:
1217 	case BPF_ALU64 | BPF_NEG:
1218 		emit_sub(rd, RV_REG_ZERO, rd, ctx);
1219 		if (!is64 && !aux->verifier_zext)
1220 			emit_zextw(rd, rd, ctx);
1221 		break;
1222 
1223 	/* dst = BSWAP##imm(dst) */
1224 	case BPF_ALU | BPF_END | BPF_FROM_LE:
1225 		switch (imm) {
1226 		case 16:
1227 			emit_zexth(rd, rd, ctx);
1228 			break;
1229 		case 32:
1230 			if (!aux->verifier_zext)
1231 				emit_zextw(rd, rd, ctx);
1232 			break;
1233 		case 64:
1234 			/* Do nothing */
1235 			break;
1236 		}
1237 		break;
1238 	case BPF_ALU | BPF_END | BPF_FROM_BE:
1239 	case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1240 		emit_bswap(rd, imm, ctx);
1241 		break;
1242 
1243 	/* dst = imm */
1244 	case BPF_ALU | BPF_MOV | BPF_K:
1245 	case BPF_ALU64 | BPF_MOV | BPF_K:
1246 		emit_imm(rd, imm, ctx);
1247 		if (!is64 && !aux->verifier_zext)
1248 			emit_zextw(rd, rd, ctx);
1249 		break;
1250 
1251 	/* dst = dst OP imm */
1252 	case BPF_ALU | BPF_ADD | BPF_K:
1253 	case BPF_ALU64 | BPF_ADD | BPF_K:
1254 		if (is_12b_int(imm)) {
1255 			emit_addi(rd, rd, imm, ctx);
1256 		} else {
1257 			emit_imm(RV_REG_T1, imm, ctx);
1258 			emit_add(rd, rd, RV_REG_T1, ctx);
1259 		}
1260 		if (!is64 && !aux->verifier_zext)
1261 			emit_zextw(rd, rd, ctx);
1262 		break;
1263 	case BPF_ALU | BPF_SUB | BPF_K:
1264 	case BPF_ALU64 | BPF_SUB | BPF_K:
1265 		if (is_12b_int(-imm)) {
1266 			emit_addi(rd, rd, -imm, ctx);
1267 		} else {
1268 			emit_imm(RV_REG_T1, imm, ctx);
1269 			emit_sub(rd, rd, RV_REG_T1, ctx);
1270 		}
1271 		if (!is64 && !aux->verifier_zext)
1272 			emit_zextw(rd, rd, ctx);
1273 		break;
1274 	case BPF_ALU | BPF_AND | BPF_K:
1275 	case BPF_ALU64 | BPF_AND | BPF_K:
1276 		if (is_12b_int(imm)) {
1277 			emit_andi(rd, rd, imm, ctx);
1278 		} else {
1279 			emit_imm(RV_REG_T1, imm, ctx);
1280 			emit_and(rd, rd, RV_REG_T1, ctx);
1281 		}
1282 		if (!is64 && !aux->verifier_zext)
1283 			emit_zextw(rd, rd, ctx);
1284 		break;
1285 	case BPF_ALU | BPF_OR | BPF_K:
1286 	case BPF_ALU64 | BPF_OR | BPF_K:
1287 		if (is_12b_int(imm)) {
1288 			emit(rv_ori(rd, rd, imm), ctx);
1289 		} else {
1290 			emit_imm(RV_REG_T1, imm, ctx);
1291 			emit_or(rd, rd, RV_REG_T1, ctx);
1292 		}
1293 		if (!is64 && !aux->verifier_zext)
1294 			emit_zextw(rd, rd, ctx);
1295 		break;
1296 	case BPF_ALU | BPF_XOR | BPF_K:
1297 	case BPF_ALU64 | BPF_XOR | BPF_K:
1298 		if (is_12b_int(imm)) {
1299 			emit(rv_xori(rd, rd, imm), ctx);
1300 		} else {
1301 			emit_imm(RV_REG_T1, imm, ctx);
1302 			emit_xor(rd, rd, RV_REG_T1, ctx);
1303 		}
1304 		if (!is64 && !aux->verifier_zext)
1305 			emit_zextw(rd, rd, ctx);
1306 		break;
1307 	case BPF_ALU | BPF_MUL | BPF_K:
1308 	case BPF_ALU64 | BPF_MUL | BPF_K:
1309 		emit_imm(RV_REG_T1, imm, ctx);
1310 		emit(is64 ? rv_mul(rd, rd, RV_REG_T1) :
1311 		     rv_mulw(rd, rd, RV_REG_T1), ctx);
1312 		if (!is64 && !aux->verifier_zext)
1313 			emit_zextw(rd, rd, ctx);
1314 		break;
1315 	case BPF_ALU | BPF_DIV | BPF_K:
1316 	case BPF_ALU64 | BPF_DIV | BPF_K:
1317 		emit_imm(RV_REG_T1, imm, ctx);
1318 		if (off)
1319 			emit(is64 ? rv_div(rd, rd, RV_REG_T1) :
1320 			     rv_divw(rd, rd, RV_REG_T1), ctx);
1321 		else
1322 			emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
1323 			     rv_divuw(rd, rd, RV_REG_T1), ctx);
1324 		if (!is64 && !aux->verifier_zext)
1325 			emit_zextw(rd, rd, ctx);
1326 		break;
1327 	case BPF_ALU | BPF_MOD | BPF_K:
1328 	case BPF_ALU64 | BPF_MOD | BPF_K:
1329 		emit_imm(RV_REG_T1, imm, ctx);
1330 		if (off)
1331 			emit(is64 ? rv_rem(rd, rd, RV_REG_T1) :
1332 			     rv_remw(rd, rd, RV_REG_T1), ctx);
1333 		else
1334 			emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
1335 			     rv_remuw(rd, rd, RV_REG_T1), ctx);
1336 		if (!is64 && !aux->verifier_zext)
1337 			emit_zextw(rd, rd, ctx);
1338 		break;
1339 	case BPF_ALU | BPF_LSH | BPF_K:
1340 	case BPF_ALU64 | BPF_LSH | BPF_K:
1341 		emit_slli(rd, rd, imm, ctx);
1342 
1343 		if (!is64 && !aux->verifier_zext)
1344 			emit_zextw(rd, rd, ctx);
1345 		break;
1346 	case BPF_ALU | BPF_RSH | BPF_K:
1347 	case BPF_ALU64 | BPF_RSH | BPF_K:
1348 		if (is64)
1349 			emit_srli(rd, rd, imm, ctx);
1350 		else
1351 			emit(rv_srliw(rd, rd, imm), ctx);
1352 
1353 		if (!is64 && !aux->verifier_zext)
1354 			emit_zextw(rd, rd, ctx);
1355 		break;
1356 	case BPF_ALU | BPF_ARSH | BPF_K:
1357 	case BPF_ALU64 | BPF_ARSH | BPF_K:
1358 		if (is64)
1359 			emit_srai(rd, rd, imm, ctx);
1360 		else
1361 			emit(rv_sraiw(rd, rd, imm), ctx);
1362 
1363 		if (!is64 && !aux->verifier_zext)
1364 			emit_zextw(rd, rd, ctx);
1365 		break;
1366 
1367 	/* JUMP off */
1368 	case BPF_JMP | BPF_JA:
1369 	case BPF_JMP32 | BPF_JA:
1370 		if (BPF_CLASS(code) == BPF_JMP)
1371 			rvoff = rv_offset(i, off, ctx);
1372 		else
1373 			rvoff = rv_offset(i, imm, ctx);
1374 		ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
1375 		if (ret)
1376 			return ret;
1377 		break;
1378 
1379 	/* IF (dst COND src) JUMP off */
1380 	case BPF_JMP | BPF_JEQ | BPF_X:
1381 	case BPF_JMP32 | BPF_JEQ | BPF_X:
1382 	case BPF_JMP | BPF_JGT | BPF_X:
1383 	case BPF_JMP32 | BPF_JGT | BPF_X:
1384 	case BPF_JMP | BPF_JLT | BPF_X:
1385 	case BPF_JMP32 | BPF_JLT | BPF_X:
1386 	case BPF_JMP | BPF_JGE | BPF_X:
1387 	case BPF_JMP32 | BPF_JGE | BPF_X:
1388 	case BPF_JMP | BPF_JLE | BPF_X:
1389 	case BPF_JMP32 | BPF_JLE | BPF_X:
1390 	case BPF_JMP | BPF_JNE | BPF_X:
1391 	case BPF_JMP32 | BPF_JNE | BPF_X:
1392 	case BPF_JMP | BPF_JSGT | BPF_X:
1393 	case BPF_JMP32 | BPF_JSGT | BPF_X:
1394 	case BPF_JMP | BPF_JSLT | BPF_X:
1395 	case BPF_JMP32 | BPF_JSLT | BPF_X:
1396 	case BPF_JMP | BPF_JSGE | BPF_X:
1397 	case BPF_JMP32 | BPF_JSGE | BPF_X:
1398 	case BPF_JMP | BPF_JSLE | BPF_X:
1399 	case BPF_JMP32 | BPF_JSLE | BPF_X:
1400 	case BPF_JMP | BPF_JSET | BPF_X:
1401 	case BPF_JMP32 | BPF_JSET | BPF_X:
1402 		rvoff = rv_offset(i, off, ctx);
1403 		if (!is64) {
1404 			s = ctx->ninsns;
1405 			if (is_signed_bpf_cond(BPF_OP(code))) {
1406 				emit_sextw_alt(&rs, RV_REG_T1, ctx);
1407 				emit_sextw_alt(&rd, RV_REG_T2, ctx);
1408 			} else {
1409 				emit_zextw_alt(&rs, RV_REG_T1, ctx);
1410 				emit_zextw_alt(&rd, RV_REG_T2, ctx);
1411 			}
1412 			e = ctx->ninsns;
1413 
1414 			/* Adjust for extra insns */
1415 			rvoff -= ninsns_rvoff(e - s);
1416 		}
1417 
1418 		if (BPF_OP(code) == BPF_JSET) {
1419 			/* Adjust for and */
1420 			rvoff -= 4;
1421 			emit_and(RV_REG_T1, rd, rs, ctx);
1422 			emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
1423 		} else {
1424 			emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
1425 		}
1426 		break;
1427 
1428 	/* IF (dst COND imm) JUMP off */
1429 	case BPF_JMP | BPF_JEQ | BPF_K:
1430 	case BPF_JMP32 | BPF_JEQ | BPF_K:
1431 	case BPF_JMP | BPF_JGT | BPF_K:
1432 	case BPF_JMP32 | BPF_JGT | BPF_K:
1433 	case BPF_JMP | BPF_JLT | BPF_K:
1434 	case BPF_JMP32 | BPF_JLT | BPF_K:
1435 	case BPF_JMP | BPF_JGE | BPF_K:
1436 	case BPF_JMP32 | BPF_JGE | BPF_K:
1437 	case BPF_JMP | BPF_JLE | BPF_K:
1438 	case BPF_JMP32 | BPF_JLE | BPF_K:
1439 	case BPF_JMP | BPF_JNE | BPF_K:
1440 	case BPF_JMP32 | BPF_JNE | BPF_K:
1441 	case BPF_JMP | BPF_JSGT | BPF_K:
1442 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1443 	case BPF_JMP | BPF_JSLT | BPF_K:
1444 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1445 	case BPF_JMP | BPF_JSGE | BPF_K:
1446 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1447 	case BPF_JMP | BPF_JSLE | BPF_K:
1448 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1449 		rvoff = rv_offset(i, off, ctx);
1450 		s = ctx->ninsns;
1451 		if (imm)
1452 			emit_imm(RV_REG_T1, imm, ctx);
1453 		rs = imm ? RV_REG_T1 : RV_REG_ZERO;
1454 		if (!is64) {
1455 			if (is_signed_bpf_cond(BPF_OP(code))) {
1456 				emit_sextw_alt(&rd, RV_REG_T2, ctx);
1457 				/* rs has been sign extended */
1458 			} else {
1459 				emit_zextw_alt(&rd, RV_REG_T2, ctx);
1460 				if (imm)
1461 					emit_zextw(rs, rs, ctx);
1462 			}
1463 		}
1464 		e = ctx->ninsns;
1465 
1466 		/* Adjust for extra insns */
1467 		rvoff -= ninsns_rvoff(e - s);
1468 		emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
1469 		break;
1470 
1471 	case BPF_JMP | BPF_JSET | BPF_K:
1472 	case BPF_JMP32 | BPF_JSET | BPF_K:
1473 		rvoff = rv_offset(i, off, ctx);
1474 		s = ctx->ninsns;
1475 		if (is_12b_int(imm)) {
1476 			emit_andi(RV_REG_T1, rd, imm, ctx);
1477 		} else {
1478 			emit_imm(RV_REG_T1, imm, ctx);
1479 			emit_and(RV_REG_T1, rd, RV_REG_T1, ctx);
1480 		}
1481 		/* For jset32, we should clear the upper 32 bits of t1, but
1482 		 * sign-extension is sufficient here and saves one instruction,
1483 		 * as t1 is used only in comparison against zero.
1484 		 */
1485 		if (!is64 && imm < 0)
1486 			emit_sextw(RV_REG_T1, RV_REG_T1, ctx);
1487 		e = ctx->ninsns;
1488 		rvoff -= ninsns_rvoff(e - s);
1489 		emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
1490 		break;
1491 
1492 	/* function call */
1493 	case BPF_JMP | BPF_CALL:
1494 	{
1495 		bool fixed_addr;
1496 		u64 addr;
1497 
1498 		/* Inline calls to bpf_get_smp_processor_id()
1499 		 *
1500 		 * RV_REG_TP holds the address of the current CPU's task_struct and thread_info is
1501 		 * at offset 0 in task_struct.
1502 		 * Load cpu from thread_info:
1503 		 *     Set R0 to ((struct thread_info *)(RV_REG_TP))->cpu
1504 		 *
1505 		 * This replicates the implementation of raw_smp_processor_id() on RISCV
1506 		 */
1507 		if (insn->src_reg == 0 && insn->imm == BPF_FUNC_get_smp_processor_id) {
1508 			/* Load current CPU number in R0 */
1509 			emit_ld(bpf_to_rv_reg(BPF_REG_0, ctx), offsetof(struct thread_info, cpu),
1510 				RV_REG_TP, ctx);
1511 			break;
1512 		}
1513 
1514 		mark_call(ctx);
1515 		ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
1516 					    &addr, &fixed_addr);
1517 		if (ret < 0)
1518 			return ret;
1519 
1520 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
1521 			const struct btf_func_model *fm;
1522 			int idx;
1523 
1524 			fm = bpf_jit_find_kfunc_model(ctx->prog, insn);
1525 			if (!fm)
1526 				return -EINVAL;
1527 
1528 			for (idx = 0; idx < fm->nr_args; idx++) {
1529 				u8 reg = bpf_to_rv_reg(BPF_REG_1 + idx, ctx);
1530 
1531 				if (fm->arg_size[idx] == sizeof(int))
1532 					emit_sextw(reg, reg, ctx);
1533 			}
1534 		}
1535 
1536 		ret = emit_call(addr, fixed_addr, ctx);
1537 		if (ret)
1538 			return ret;
1539 
1540 		if (insn->src_reg != BPF_PSEUDO_CALL)
1541 			emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
1542 		break;
1543 	}
1544 	/* tail call */
1545 	case BPF_JMP | BPF_TAIL_CALL:
1546 		if (emit_bpf_tail_call(i, ctx))
1547 			return -1;
1548 		break;
1549 
1550 	/* function return */
1551 	case BPF_JMP | BPF_EXIT:
1552 		if (i == ctx->prog->len - 1)
1553 			break;
1554 
1555 		rvoff = epilogue_offset(ctx);
1556 		ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
1557 		if (ret)
1558 			return ret;
1559 		break;
1560 
1561 	/* dst = imm64 */
1562 	case BPF_LD | BPF_IMM | BPF_DW:
1563 	{
1564 		struct bpf_insn insn1 = insn[1];
1565 		u64 imm64;
1566 
1567 		imm64 = (u64)insn1.imm << 32 | (u32)imm;
1568 		if (bpf_pseudo_func(insn)) {
1569 			/* fixed-length insns for extra jit pass */
1570 			ret = emit_addr(rd, imm64, extra_pass, ctx);
1571 			if (ret)
1572 				return ret;
1573 		} else {
1574 			emit_imm(rd, imm64, ctx);
1575 		}
1576 
1577 		return 1;
1578 	}
1579 
1580 	/* LDX: dst = *(unsigned size *)(src + off) */
1581 	case BPF_LDX | BPF_MEM | BPF_B:
1582 	case BPF_LDX | BPF_MEM | BPF_H:
1583 	case BPF_LDX | BPF_MEM | BPF_W:
1584 	case BPF_LDX | BPF_MEM | BPF_DW:
1585 	case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1586 	case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1587 	case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1588 	case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1589 	/* LDSX: dst = *(signed size *)(src + off) */
1590 	case BPF_LDX | BPF_MEMSX | BPF_B:
1591 	case BPF_LDX | BPF_MEMSX | BPF_H:
1592 	case BPF_LDX | BPF_MEMSX | BPF_W:
1593 	case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1594 	case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1595 	case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1596 	/* LDX | PROBE_MEM32: dst = *(unsigned size *)(src + RV_REG_ARENA + off) */
1597 	case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
1598 	case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
1599 	case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
1600 	case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
1601 	{
1602 		int insn_len, insns_start;
1603 		bool sign_ext;
1604 
1605 		sign_ext = BPF_MODE(insn->code) == BPF_MEMSX ||
1606 			   BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
1607 
1608 		if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
1609 			emit_add(RV_REG_T2, rs, RV_REG_ARENA, ctx);
1610 			rs = RV_REG_T2;
1611 		}
1612 
1613 		switch (BPF_SIZE(code)) {
1614 		case BPF_B:
1615 			if (is_12b_int(off)) {
1616 				insns_start = ctx->ninsns;
1617 				if (sign_ext)
1618 					emit(rv_lb(rd, off, rs), ctx);
1619 				else
1620 					emit(rv_lbu(rd, off, rs), ctx);
1621 				insn_len = ctx->ninsns - insns_start;
1622 				break;
1623 			}
1624 
1625 			emit_imm(RV_REG_T1, off, ctx);
1626 			emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
1627 			insns_start = ctx->ninsns;
1628 			if (sign_ext)
1629 				emit(rv_lb(rd, 0, RV_REG_T1), ctx);
1630 			else
1631 				emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
1632 			insn_len = ctx->ninsns - insns_start;
1633 			break;
1634 		case BPF_H:
1635 			if (is_12b_int(off)) {
1636 				insns_start = ctx->ninsns;
1637 				if (sign_ext)
1638 					emit(rv_lh(rd, off, rs), ctx);
1639 				else
1640 					emit(rv_lhu(rd, off, rs), ctx);
1641 				insn_len = ctx->ninsns - insns_start;
1642 				break;
1643 			}
1644 
1645 			emit_imm(RV_REG_T1, off, ctx);
1646 			emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
1647 			insns_start = ctx->ninsns;
1648 			if (sign_ext)
1649 				emit(rv_lh(rd, 0, RV_REG_T1), ctx);
1650 			else
1651 				emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
1652 			insn_len = ctx->ninsns - insns_start;
1653 			break;
1654 		case BPF_W:
1655 			if (is_12b_int(off)) {
1656 				insns_start = ctx->ninsns;
1657 				if (sign_ext)
1658 					emit(rv_lw(rd, off, rs), ctx);
1659 				else
1660 					emit(rv_lwu(rd, off, rs), ctx);
1661 				insn_len = ctx->ninsns - insns_start;
1662 				break;
1663 			}
1664 
1665 			emit_imm(RV_REG_T1, off, ctx);
1666 			emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
1667 			insns_start = ctx->ninsns;
1668 			if (sign_ext)
1669 				emit(rv_lw(rd, 0, RV_REG_T1), ctx);
1670 			else
1671 				emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
1672 			insn_len = ctx->ninsns - insns_start;
1673 			break;
1674 		case BPF_DW:
1675 			if (is_12b_int(off)) {
1676 				insns_start = ctx->ninsns;
1677 				emit_ld(rd, off, rs, ctx);
1678 				insn_len = ctx->ninsns - insns_start;
1679 				break;
1680 			}
1681 
1682 			emit_imm(RV_REG_T1, off, ctx);
1683 			emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
1684 			insns_start = ctx->ninsns;
1685 			emit_ld(rd, 0, RV_REG_T1, ctx);
1686 			insn_len = ctx->ninsns - insns_start;
1687 			break;
1688 		}
1689 
1690 		ret = add_exception_handler(insn, ctx, rd, insn_len);
1691 		if (ret)
1692 			return ret;
1693 
1694 		if (BPF_SIZE(code) != BPF_DW && insn_is_zext(&insn[1]))
1695 			return 1;
1696 		break;
1697 	}
1698 	/* speculation barrier */
1699 	case BPF_ST | BPF_NOSPEC:
1700 		break;
1701 
1702 	/* ST: *(size *)(dst + off) = imm */
1703 	case BPF_ST | BPF_MEM | BPF_B:
1704 		emit_imm(RV_REG_T1, imm, ctx);
1705 		if (is_12b_int(off)) {
1706 			emit(rv_sb(rd, off, RV_REG_T1), ctx);
1707 			break;
1708 		}
1709 
1710 		emit_imm(RV_REG_T2, off, ctx);
1711 		emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1712 		emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
1713 		break;
1714 
1715 	case BPF_ST | BPF_MEM | BPF_H:
1716 		emit_imm(RV_REG_T1, imm, ctx);
1717 		if (is_12b_int(off)) {
1718 			emit(rv_sh(rd, off, RV_REG_T1), ctx);
1719 			break;
1720 		}
1721 
1722 		emit_imm(RV_REG_T2, off, ctx);
1723 		emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1724 		emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
1725 		break;
1726 	case BPF_ST | BPF_MEM | BPF_W:
1727 		emit_imm(RV_REG_T1, imm, ctx);
1728 		if (is_12b_int(off)) {
1729 			emit_sw(rd, off, RV_REG_T1, ctx);
1730 			break;
1731 		}
1732 
1733 		emit_imm(RV_REG_T2, off, ctx);
1734 		emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1735 		emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx);
1736 		break;
1737 	case BPF_ST | BPF_MEM | BPF_DW:
1738 		emit_imm(RV_REG_T1, imm, ctx);
1739 		if (is_12b_int(off)) {
1740 			emit_sd(rd, off, RV_REG_T1, ctx);
1741 			break;
1742 		}
1743 
1744 		emit_imm(RV_REG_T2, off, ctx);
1745 		emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1746 		emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
1747 		break;
1748 
1749 	case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1750 	case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1751 	case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1752 	case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1753 	{
1754 		int insn_len, insns_start;
1755 
1756 		emit_add(RV_REG_T3, rd, RV_REG_ARENA, ctx);
1757 		rd = RV_REG_T3;
1758 
1759 		/* Load imm to a register then store it */
1760 		emit_imm(RV_REG_T1, imm, ctx);
1761 
1762 		switch (BPF_SIZE(code)) {
1763 		case BPF_B:
1764 			if (is_12b_int(off)) {
1765 				insns_start = ctx->ninsns;
1766 				emit(rv_sb(rd, off, RV_REG_T1), ctx);
1767 				insn_len = ctx->ninsns - insns_start;
1768 				break;
1769 			}
1770 
1771 			emit_imm(RV_REG_T2, off, ctx);
1772 			emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1773 			insns_start = ctx->ninsns;
1774 			emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
1775 			insn_len = ctx->ninsns - insns_start;
1776 			break;
1777 		case BPF_H:
1778 			if (is_12b_int(off)) {
1779 				insns_start = ctx->ninsns;
1780 				emit(rv_sh(rd, off, RV_REG_T1), ctx);
1781 				insn_len = ctx->ninsns - insns_start;
1782 				break;
1783 			}
1784 
1785 			emit_imm(RV_REG_T2, off, ctx);
1786 			emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1787 			insns_start = ctx->ninsns;
1788 			emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
1789 			insn_len = ctx->ninsns - insns_start;
1790 			break;
1791 		case BPF_W:
1792 			if (is_12b_int(off)) {
1793 				insns_start = ctx->ninsns;
1794 				emit_sw(rd, off, RV_REG_T1, ctx);
1795 				insn_len = ctx->ninsns - insns_start;
1796 				break;
1797 			}
1798 
1799 			emit_imm(RV_REG_T2, off, ctx);
1800 			emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1801 			insns_start = ctx->ninsns;
1802 			emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx);
1803 			insn_len = ctx->ninsns - insns_start;
1804 			break;
1805 		case BPF_DW:
1806 			if (is_12b_int(off)) {
1807 				insns_start = ctx->ninsns;
1808 				emit_sd(rd, off, RV_REG_T1, ctx);
1809 				insn_len = ctx->ninsns - insns_start;
1810 				break;
1811 			}
1812 
1813 			emit_imm(RV_REG_T2, off, ctx);
1814 			emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1815 			insns_start = ctx->ninsns;
1816 			emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
1817 			insn_len = ctx->ninsns - insns_start;
1818 			break;
1819 		}
1820 
1821 		ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER,
1822 					    insn_len);
1823 		if (ret)
1824 			return ret;
1825 
1826 		break;
1827 	}
1828 
1829 	/* STX: *(size *)(dst + off) = src */
1830 	case BPF_STX | BPF_MEM | BPF_B:
1831 		if (is_12b_int(off)) {
1832 			emit(rv_sb(rd, off, rs), ctx);
1833 			break;
1834 		}
1835 
1836 		emit_imm(RV_REG_T1, off, ctx);
1837 		emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1838 		emit(rv_sb(RV_REG_T1, 0, rs), ctx);
1839 		break;
1840 	case BPF_STX | BPF_MEM | BPF_H:
1841 		if (is_12b_int(off)) {
1842 			emit(rv_sh(rd, off, rs), ctx);
1843 			break;
1844 		}
1845 
1846 		emit_imm(RV_REG_T1, off, ctx);
1847 		emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1848 		emit(rv_sh(RV_REG_T1, 0, rs), ctx);
1849 		break;
1850 	case BPF_STX | BPF_MEM | BPF_W:
1851 		if (is_12b_int(off)) {
1852 			emit_sw(rd, off, rs, ctx);
1853 			break;
1854 		}
1855 
1856 		emit_imm(RV_REG_T1, off, ctx);
1857 		emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1858 		emit_sw(RV_REG_T1, 0, rs, ctx);
1859 		break;
1860 	case BPF_STX | BPF_MEM | BPF_DW:
1861 		if (is_12b_int(off)) {
1862 			emit_sd(rd, off, rs, ctx);
1863 			break;
1864 		}
1865 
1866 		emit_imm(RV_REG_T1, off, ctx);
1867 		emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1868 		emit_sd(RV_REG_T1, 0, rs, ctx);
1869 		break;
1870 	case BPF_STX | BPF_ATOMIC | BPF_W:
1871 	case BPF_STX | BPF_ATOMIC | BPF_DW:
1872 		emit_atomic(rd, rs, off, imm,
1873 			    BPF_SIZE(code) == BPF_DW, ctx);
1874 		break;
1875 
1876 	case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
1877 	case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
1878 	case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
1879 	case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
1880 	{
1881 		int insn_len, insns_start;
1882 
1883 		emit_add(RV_REG_T2, rd, RV_REG_ARENA, ctx);
1884 		rd = RV_REG_T2;
1885 
1886 		switch (BPF_SIZE(code)) {
1887 		case BPF_B:
1888 			if (is_12b_int(off)) {
1889 				insns_start = ctx->ninsns;
1890 				emit(rv_sb(rd, off, rs), ctx);
1891 				insn_len = ctx->ninsns - insns_start;
1892 				break;
1893 			}
1894 
1895 			emit_imm(RV_REG_T1, off, ctx);
1896 			emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1897 			insns_start = ctx->ninsns;
1898 			emit(rv_sb(RV_REG_T1, 0, rs), ctx);
1899 			insn_len = ctx->ninsns - insns_start;
1900 			break;
1901 		case BPF_H:
1902 			if (is_12b_int(off)) {
1903 				insns_start = ctx->ninsns;
1904 				emit(rv_sh(rd, off, rs), ctx);
1905 				insn_len = ctx->ninsns - insns_start;
1906 				break;
1907 			}
1908 
1909 			emit_imm(RV_REG_T1, off, ctx);
1910 			emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1911 			insns_start = ctx->ninsns;
1912 			emit(rv_sh(RV_REG_T1, 0, rs), ctx);
1913 			insn_len = ctx->ninsns - insns_start;
1914 			break;
1915 		case BPF_W:
1916 			if (is_12b_int(off)) {
1917 				insns_start = ctx->ninsns;
1918 				emit_sw(rd, off, rs, ctx);
1919 				insn_len = ctx->ninsns - insns_start;
1920 				break;
1921 			}
1922 
1923 			emit_imm(RV_REG_T1, off, ctx);
1924 			emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1925 			insns_start = ctx->ninsns;
1926 			emit_sw(RV_REG_T1, 0, rs, ctx);
1927 			insn_len = ctx->ninsns - insns_start;
1928 			break;
1929 		case BPF_DW:
1930 			if (is_12b_int(off)) {
1931 				insns_start = ctx->ninsns;
1932 				emit_sd(rd, off, rs, ctx);
1933 				insn_len = ctx->ninsns - insns_start;
1934 				break;
1935 			}
1936 
1937 			emit_imm(RV_REG_T1, off, ctx);
1938 			emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1939 			insns_start = ctx->ninsns;
1940 			emit_sd(RV_REG_T1, 0, rs, ctx);
1941 			insn_len = ctx->ninsns - insns_start;
1942 			break;
1943 		}
1944 
1945 		ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER,
1946 					    insn_len);
1947 		if (ret)
1948 			return ret;
1949 
1950 		break;
1951 	}
1952 
1953 	default:
1954 		pr_err("bpf-jit: unknown opcode %02x\n", code);
1955 		return -EINVAL;
1956 	}
1957 
1958 	return 0;
1959 }
1960 
1961 void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
1962 {
1963 	int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
1964 
1965 	bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, STACK_ALIGN);
1966 	if (bpf_stack_adjust)
1967 		mark_fp(ctx);
1968 
1969 	if (seen_reg(RV_REG_RA, ctx))
1970 		stack_adjust += 8;
1971 	stack_adjust += 8; /* RV_REG_FP */
1972 	if (seen_reg(RV_REG_S1, ctx))
1973 		stack_adjust += 8;
1974 	if (seen_reg(RV_REG_S2, ctx))
1975 		stack_adjust += 8;
1976 	if (seen_reg(RV_REG_S3, ctx))
1977 		stack_adjust += 8;
1978 	if (seen_reg(RV_REG_S4, ctx))
1979 		stack_adjust += 8;
1980 	if (seen_reg(RV_REG_S5, ctx))
1981 		stack_adjust += 8;
1982 	if (seen_reg(RV_REG_S6, ctx))
1983 		stack_adjust += 8;
1984 	if (ctx->arena_vm_start)
1985 		stack_adjust += 8;
1986 
1987 	stack_adjust = round_up(stack_adjust, STACK_ALIGN);
1988 	stack_adjust += bpf_stack_adjust;
1989 
1990 	store_offset = stack_adjust - 8;
1991 
1992 	/* emit kcfi type preamble immediately before the  first insn */
1993 	emit_kcfi(is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash, ctx);
1994 
1995 	/* nops reserved for auipc+jalr pair */
1996 	for (i = 0; i < RV_FENTRY_NINSNS; i++)
1997 		emit(rv_nop(), ctx);
1998 
1999 	/* First instruction is always setting the tail-call-counter
2000 	 * (TCC) register. This instruction is skipped for tail calls.
2001 	 * Force using a 4-byte (non-compressed) instruction.
2002 	 */
2003 	emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
2004 
2005 	emit_addi(RV_REG_SP, RV_REG_SP, -stack_adjust, ctx);
2006 
2007 	if (seen_reg(RV_REG_RA, ctx)) {
2008 		emit_sd(RV_REG_SP, store_offset, RV_REG_RA, ctx);
2009 		store_offset -= 8;
2010 	}
2011 	emit_sd(RV_REG_SP, store_offset, RV_REG_FP, ctx);
2012 	store_offset -= 8;
2013 	if (seen_reg(RV_REG_S1, ctx)) {
2014 		emit_sd(RV_REG_SP, store_offset, RV_REG_S1, ctx);
2015 		store_offset -= 8;
2016 	}
2017 	if (seen_reg(RV_REG_S2, ctx)) {
2018 		emit_sd(RV_REG_SP, store_offset, RV_REG_S2, ctx);
2019 		store_offset -= 8;
2020 	}
2021 	if (seen_reg(RV_REG_S3, ctx)) {
2022 		emit_sd(RV_REG_SP, store_offset, RV_REG_S3, ctx);
2023 		store_offset -= 8;
2024 	}
2025 	if (seen_reg(RV_REG_S4, ctx)) {
2026 		emit_sd(RV_REG_SP, store_offset, RV_REG_S4, ctx);
2027 		store_offset -= 8;
2028 	}
2029 	if (seen_reg(RV_REG_S5, ctx)) {
2030 		emit_sd(RV_REG_SP, store_offset, RV_REG_S5, ctx);
2031 		store_offset -= 8;
2032 	}
2033 	if (seen_reg(RV_REG_S6, ctx)) {
2034 		emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx);
2035 		store_offset -= 8;
2036 	}
2037 	if (ctx->arena_vm_start) {
2038 		emit_sd(RV_REG_SP, store_offset, RV_REG_ARENA, ctx);
2039 		store_offset -= 8;
2040 	}
2041 
2042 	emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx);
2043 
2044 	if (bpf_stack_adjust)
2045 		emit_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust, ctx);
2046 
2047 	/* Program contains calls and tail calls, so RV_REG_TCC need
2048 	 * to be saved across calls.
2049 	 */
2050 	if (seen_tail_call(ctx) && seen_call(ctx))
2051 		emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx);
2052 
2053 	ctx->stack_size = stack_adjust;
2054 
2055 	if (ctx->arena_vm_start)
2056 		emit_imm(RV_REG_ARENA, ctx->arena_vm_start, ctx);
2057 }
2058 
2059 void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
2060 {
2061 	__build_epilogue(false, ctx);
2062 }
2063 
2064 bool bpf_jit_supports_kfunc_call(void)
2065 {
2066 	return true;
2067 }
2068 
2069 bool bpf_jit_supports_ptr_xchg(void)
2070 {
2071 	return true;
2072 }
2073 
2074 bool bpf_jit_supports_arena(void)
2075 {
2076 	return true;
2077 }
2078 
2079 bool bpf_jit_supports_percpu_insn(void)
2080 {
2081 	return true;
2082 }
2083 
2084 bool bpf_jit_inlines_helper_call(s32 imm)
2085 {
2086 	switch (imm) {
2087 	case BPF_FUNC_get_smp_processor_id:
2088 		return true;
2089 	default:
2090 		return false;
2091 	}
2092 }
2093