1 // SPDX-License-Identifier: GPL-2.0 2 /* BPF JIT compiler for RV64G 3 * 4 * Copyright(c) 2019 Björn Töpel <bjorn.topel@gmail.com> 5 * 6 */ 7 8 #include <linux/bpf.h> 9 #include <linux/filter.h> 10 #include "bpf_jit.h" 11 12 #define RV_REG_TCC RV_REG_A6 13 #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ 14 15 static const int regmap[] = { 16 [BPF_REG_0] = RV_REG_A5, 17 [BPF_REG_1] = RV_REG_A0, 18 [BPF_REG_2] = RV_REG_A1, 19 [BPF_REG_3] = RV_REG_A2, 20 [BPF_REG_4] = RV_REG_A3, 21 [BPF_REG_5] = RV_REG_A4, 22 [BPF_REG_6] = RV_REG_S1, 23 [BPF_REG_7] = RV_REG_S2, 24 [BPF_REG_8] = RV_REG_S3, 25 [BPF_REG_9] = RV_REG_S4, 26 [BPF_REG_FP] = RV_REG_S5, 27 [BPF_REG_AX] = RV_REG_T0, 28 }; 29 30 enum { 31 RV_CTX_F_SEEN_TAIL_CALL = 0, 32 RV_CTX_F_SEEN_CALL = RV_REG_RA, 33 RV_CTX_F_SEEN_S1 = RV_REG_S1, 34 RV_CTX_F_SEEN_S2 = RV_REG_S2, 35 RV_CTX_F_SEEN_S3 = RV_REG_S3, 36 RV_CTX_F_SEEN_S4 = RV_REG_S4, 37 RV_CTX_F_SEEN_S5 = RV_REG_S5, 38 RV_CTX_F_SEEN_S6 = RV_REG_S6, 39 }; 40 41 static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx) 42 { 43 u8 reg = regmap[bpf_reg]; 44 45 switch (reg) { 46 case RV_CTX_F_SEEN_S1: 47 case RV_CTX_F_SEEN_S2: 48 case RV_CTX_F_SEEN_S3: 49 case RV_CTX_F_SEEN_S4: 50 case RV_CTX_F_SEEN_S5: 51 case RV_CTX_F_SEEN_S6: 52 __set_bit(reg, &ctx->flags); 53 } 54 return reg; 55 }; 56 57 static bool seen_reg(int reg, struct rv_jit_context *ctx) 58 { 59 switch (reg) { 60 case RV_CTX_F_SEEN_CALL: 61 case RV_CTX_F_SEEN_S1: 62 case RV_CTX_F_SEEN_S2: 63 case RV_CTX_F_SEEN_S3: 64 case RV_CTX_F_SEEN_S4: 65 case RV_CTX_F_SEEN_S5: 66 case RV_CTX_F_SEEN_S6: 67 return test_bit(reg, &ctx->flags); 68 } 69 return false; 70 } 71 72 static void mark_fp(struct rv_jit_context *ctx) 73 { 74 __set_bit(RV_CTX_F_SEEN_S5, &ctx->flags); 75 } 76 77 static void mark_call(struct rv_jit_context *ctx) 78 { 79 __set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); 80 } 81 82 static bool seen_call(struct rv_jit_context *ctx) 83 { 84 return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); 85 } 86 87 static void mark_tail_call(struct rv_jit_context *ctx) 88 { 89 __set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags); 90 } 91 92 static bool seen_tail_call(struct rv_jit_context *ctx) 93 { 94 return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags); 95 } 96 97 static u8 rv_tail_call_reg(struct rv_jit_context *ctx) 98 { 99 mark_tail_call(ctx); 100 101 if (seen_call(ctx)) { 102 __set_bit(RV_CTX_F_SEEN_S6, &ctx->flags); 103 return RV_REG_S6; 104 } 105 return RV_REG_A6; 106 } 107 108 static bool is_32b_int(s64 val) 109 { 110 return -(1L << 31) <= val && val < (1L << 31); 111 } 112 113 static bool in_auipc_jalr_range(s64 val) 114 { 115 /* 116 * auipc+jalr can reach any signed PC-relative offset in the range 117 * [-2^31 - 2^11, 2^31 - 2^11). 118 */ 119 return (-(1L << 31) - (1L << 11)) <= val && 120 val < ((1L << 31) - (1L << 11)); 121 } 122 123 static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) 124 { 125 /* Note that the immediate from the add is sign-extended, 126 * which means that we need to compensate this by adding 2^12, 127 * when the 12th bit is set. A simpler way of doing this, and 128 * getting rid of the check, is to just add 2**11 before the 129 * shift. The "Loading a 32-Bit constant" example from the 130 * "Computer Organization and Design, RISC-V edition" book by 131 * Patterson/Hennessy highlights this fact. 132 * 133 * This also means that we need to process LSB to MSB. 134 */ 135 s64 upper = (val + (1 << 11)) >> 12, lower = val & 0xfff; 136 int shift; 137 138 if (is_32b_int(val)) { 139 if (upper) 140 emit(rv_lui(rd, upper), ctx); 141 142 if (!upper) { 143 emit(rv_addi(rd, RV_REG_ZERO, lower), ctx); 144 return; 145 } 146 147 emit(rv_addiw(rd, rd, lower), ctx); 148 return; 149 } 150 151 shift = __ffs(upper); 152 upper >>= shift; 153 shift += 12; 154 155 emit_imm(rd, upper, ctx); 156 157 emit(rv_slli(rd, rd, shift), ctx); 158 if (lower) 159 emit(rv_addi(rd, rd, lower), ctx); 160 } 161 162 static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) 163 { 164 int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8; 165 166 if (seen_reg(RV_REG_RA, ctx)) { 167 emit(rv_ld(RV_REG_RA, store_offset, RV_REG_SP), ctx); 168 store_offset -= 8; 169 } 170 emit(rv_ld(RV_REG_FP, store_offset, RV_REG_SP), ctx); 171 store_offset -= 8; 172 if (seen_reg(RV_REG_S1, ctx)) { 173 emit(rv_ld(RV_REG_S1, store_offset, RV_REG_SP), ctx); 174 store_offset -= 8; 175 } 176 if (seen_reg(RV_REG_S2, ctx)) { 177 emit(rv_ld(RV_REG_S2, store_offset, RV_REG_SP), ctx); 178 store_offset -= 8; 179 } 180 if (seen_reg(RV_REG_S3, ctx)) { 181 emit(rv_ld(RV_REG_S3, store_offset, RV_REG_SP), ctx); 182 store_offset -= 8; 183 } 184 if (seen_reg(RV_REG_S4, ctx)) { 185 emit(rv_ld(RV_REG_S4, store_offset, RV_REG_SP), ctx); 186 store_offset -= 8; 187 } 188 if (seen_reg(RV_REG_S5, ctx)) { 189 emit(rv_ld(RV_REG_S5, store_offset, RV_REG_SP), ctx); 190 store_offset -= 8; 191 } 192 if (seen_reg(RV_REG_S6, ctx)) { 193 emit(rv_ld(RV_REG_S6, store_offset, RV_REG_SP), ctx); 194 store_offset -= 8; 195 } 196 197 emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx); 198 /* Set return value. */ 199 if (!is_tail_call) 200 emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx); 201 emit(rv_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA, 202 is_tail_call ? 4 : 0), /* skip TCC init */ 203 ctx); 204 } 205 206 static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff, 207 struct rv_jit_context *ctx) 208 { 209 switch (cond) { 210 case BPF_JEQ: 211 emit(rv_beq(rd, rs, rvoff >> 1), ctx); 212 return; 213 case BPF_JGT: 214 emit(rv_bltu(rs, rd, rvoff >> 1), ctx); 215 return; 216 case BPF_JLT: 217 emit(rv_bltu(rd, rs, rvoff >> 1), ctx); 218 return; 219 case BPF_JGE: 220 emit(rv_bgeu(rd, rs, rvoff >> 1), ctx); 221 return; 222 case BPF_JLE: 223 emit(rv_bgeu(rs, rd, rvoff >> 1), ctx); 224 return; 225 case BPF_JNE: 226 emit(rv_bne(rd, rs, rvoff >> 1), ctx); 227 return; 228 case BPF_JSGT: 229 emit(rv_blt(rs, rd, rvoff >> 1), ctx); 230 return; 231 case BPF_JSLT: 232 emit(rv_blt(rd, rs, rvoff >> 1), ctx); 233 return; 234 case BPF_JSGE: 235 emit(rv_bge(rd, rs, rvoff >> 1), ctx); 236 return; 237 case BPF_JSLE: 238 emit(rv_bge(rs, rd, rvoff >> 1), ctx); 239 } 240 } 241 242 static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff, 243 struct rv_jit_context *ctx) 244 { 245 s64 upper, lower; 246 247 if (is_13b_int(rvoff)) { 248 emit_bcc(cond, rd, rs, rvoff, ctx); 249 return; 250 } 251 252 /* Adjust for jal */ 253 rvoff -= 4; 254 255 /* Transform, e.g.: 256 * bne rd,rs,foo 257 * to 258 * beq rd,rs,<.L1> 259 * (auipc foo) 260 * jal(r) foo 261 * .L1 262 */ 263 cond = invert_bpf_cond(cond); 264 if (is_21b_int(rvoff)) { 265 emit_bcc(cond, rd, rs, 8, ctx); 266 emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx); 267 return; 268 } 269 270 /* 32b No need for an additional rvoff adjustment, since we 271 * get that from the auipc at PC', where PC = PC' + 4. 272 */ 273 upper = (rvoff + (1 << 11)) >> 12; 274 lower = rvoff & 0xfff; 275 276 emit_bcc(cond, rd, rs, 12, ctx); 277 emit(rv_auipc(RV_REG_T1, upper), ctx); 278 emit(rv_jalr(RV_REG_ZERO, RV_REG_T1, lower), ctx); 279 } 280 281 static void emit_zext_32(u8 reg, struct rv_jit_context *ctx) 282 { 283 emit(rv_slli(reg, reg, 32), ctx); 284 emit(rv_srli(reg, reg, 32), ctx); 285 } 286 287 static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) 288 { 289 int tc_ninsn, off, start_insn = ctx->ninsns; 290 u8 tcc = rv_tail_call_reg(ctx); 291 292 /* a0: &ctx 293 * a1: &array 294 * a2: index 295 * 296 * if (index >= array->map.max_entries) 297 * goto out; 298 */ 299 tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] : 300 ctx->offset[0]; 301 emit_zext_32(RV_REG_A2, ctx); 302 303 off = offsetof(struct bpf_array, map.max_entries); 304 if (is_12b_check(off, insn)) 305 return -1; 306 emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx); 307 off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; 308 emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx); 309 310 /* if (TCC-- < 0) 311 * goto out; 312 */ 313 emit(rv_addi(RV_REG_T1, tcc, -1), ctx); 314 off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; 315 emit_branch(BPF_JSLT, tcc, RV_REG_ZERO, off, ctx); 316 317 /* prog = array->ptrs[index]; 318 * if (!prog) 319 * goto out; 320 */ 321 emit(rv_slli(RV_REG_T2, RV_REG_A2, 3), ctx); 322 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_A1), ctx); 323 off = offsetof(struct bpf_array, ptrs); 324 if (is_12b_check(off, insn)) 325 return -1; 326 emit(rv_ld(RV_REG_T2, off, RV_REG_T2), ctx); 327 off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; 328 emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx); 329 330 /* goto *(prog->bpf_func + 4); */ 331 off = offsetof(struct bpf_prog, bpf_func); 332 if (is_12b_check(off, insn)) 333 return -1; 334 emit(rv_ld(RV_REG_T3, off, RV_REG_T2), ctx); 335 emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx); 336 __build_epilogue(true, ctx); 337 return 0; 338 } 339 340 static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn, 341 struct rv_jit_context *ctx) 342 { 343 u8 code = insn->code; 344 345 switch (code) { 346 case BPF_JMP | BPF_JA: 347 case BPF_JMP | BPF_CALL: 348 case BPF_JMP | BPF_EXIT: 349 case BPF_JMP | BPF_TAIL_CALL: 350 break; 351 default: 352 *rd = bpf_to_rv_reg(insn->dst_reg, ctx); 353 } 354 355 if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) || 356 code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) || 357 code & BPF_LDX || code & BPF_STX) 358 *rs = bpf_to_rv_reg(insn->src_reg, ctx); 359 } 360 361 static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) 362 { 363 emit(rv_addi(RV_REG_T2, *rd, 0), ctx); 364 emit_zext_32(RV_REG_T2, ctx); 365 emit(rv_addi(RV_REG_T1, *rs, 0), ctx); 366 emit_zext_32(RV_REG_T1, ctx); 367 *rd = RV_REG_T2; 368 *rs = RV_REG_T1; 369 } 370 371 static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) 372 { 373 emit(rv_addiw(RV_REG_T2, *rd, 0), ctx); 374 emit(rv_addiw(RV_REG_T1, *rs, 0), ctx); 375 *rd = RV_REG_T2; 376 *rs = RV_REG_T1; 377 } 378 379 static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx) 380 { 381 emit(rv_addi(RV_REG_T2, *rd, 0), ctx); 382 emit_zext_32(RV_REG_T2, ctx); 383 emit_zext_32(RV_REG_T1, ctx); 384 *rd = RV_REG_T2; 385 } 386 387 static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) 388 { 389 emit(rv_addiw(RV_REG_T2, *rd, 0), ctx); 390 *rd = RV_REG_T2; 391 } 392 393 static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr, 394 struct rv_jit_context *ctx) 395 { 396 s64 upper, lower; 397 398 if (rvoff && is_21b_int(rvoff) && !force_jalr) { 399 emit(rv_jal(rd, rvoff >> 1), ctx); 400 return 0; 401 } else if (in_auipc_jalr_range(rvoff)) { 402 upper = (rvoff + (1 << 11)) >> 12; 403 lower = rvoff & 0xfff; 404 emit(rv_auipc(RV_REG_T1, upper), ctx); 405 emit(rv_jalr(rd, RV_REG_T1, lower), ctx); 406 return 0; 407 } 408 409 pr_err("bpf-jit: target offset 0x%llx is out of range\n", rvoff); 410 return -ERANGE; 411 } 412 413 static bool is_signed_bpf_cond(u8 cond) 414 { 415 return cond == BPF_JSGT || cond == BPF_JSLT || 416 cond == BPF_JSGE || cond == BPF_JSLE; 417 } 418 419 static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx) 420 { 421 s64 off = 0; 422 u64 ip; 423 u8 rd; 424 int ret; 425 426 if (addr && ctx->insns) { 427 ip = (u64)(long)(ctx->insns + ctx->ninsns); 428 off = addr - ip; 429 } 430 431 ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx); 432 if (ret) 433 return ret; 434 rd = bpf_to_rv_reg(BPF_REG_0, ctx); 435 emit(rv_addi(rd, RV_REG_A0, 0), ctx); 436 return 0; 437 } 438 439 int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, 440 bool extra_pass) 441 { 442 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || 443 BPF_CLASS(insn->code) == BPF_JMP; 444 int s, e, rvoff, ret, i = insn - ctx->prog->insnsi; 445 struct bpf_prog_aux *aux = ctx->prog->aux; 446 u8 rd = -1, rs = -1, code = insn->code; 447 s16 off = insn->off; 448 s32 imm = insn->imm; 449 450 init_regs(&rd, &rs, insn, ctx); 451 452 switch (code) { 453 /* dst = src */ 454 case BPF_ALU | BPF_MOV | BPF_X: 455 case BPF_ALU64 | BPF_MOV | BPF_X: 456 if (imm == 1) { 457 /* Special mov32 for zext */ 458 emit_zext_32(rd, ctx); 459 break; 460 } 461 emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx); 462 if (!is64 && !aux->verifier_zext) 463 emit_zext_32(rd, ctx); 464 break; 465 466 /* dst = dst OP src */ 467 case BPF_ALU | BPF_ADD | BPF_X: 468 case BPF_ALU64 | BPF_ADD | BPF_X: 469 emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx); 470 if (!is64 && !aux->verifier_zext) 471 emit_zext_32(rd, ctx); 472 break; 473 case BPF_ALU | BPF_SUB | BPF_X: 474 case BPF_ALU64 | BPF_SUB | BPF_X: 475 emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx); 476 if (!is64 && !aux->verifier_zext) 477 emit_zext_32(rd, ctx); 478 break; 479 case BPF_ALU | BPF_AND | BPF_X: 480 case BPF_ALU64 | BPF_AND | BPF_X: 481 emit(rv_and(rd, rd, rs), ctx); 482 if (!is64 && !aux->verifier_zext) 483 emit_zext_32(rd, ctx); 484 break; 485 case BPF_ALU | BPF_OR | BPF_X: 486 case BPF_ALU64 | BPF_OR | BPF_X: 487 emit(rv_or(rd, rd, rs), ctx); 488 if (!is64 && !aux->verifier_zext) 489 emit_zext_32(rd, ctx); 490 break; 491 case BPF_ALU | BPF_XOR | BPF_X: 492 case BPF_ALU64 | BPF_XOR | BPF_X: 493 emit(rv_xor(rd, rd, rs), ctx); 494 if (!is64 && !aux->verifier_zext) 495 emit_zext_32(rd, ctx); 496 break; 497 case BPF_ALU | BPF_MUL | BPF_X: 498 case BPF_ALU64 | BPF_MUL | BPF_X: 499 emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx); 500 if (!is64 && !aux->verifier_zext) 501 emit_zext_32(rd, ctx); 502 break; 503 case BPF_ALU | BPF_DIV | BPF_X: 504 case BPF_ALU64 | BPF_DIV | BPF_X: 505 emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx); 506 if (!is64 && !aux->verifier_zext) 507 emit_zext_32(rd, ctx); 508 break; 509 case BPF_ALU | BPF_MOD | BPF_X: 510 case BPF_ALU64 | BPF_MOD | BPF_X: 511 emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx); 512 if (!is64 && !aux->verifier_zext) 513 emit_zext_32(rd, ctx); 514 break; 515 case BPF_ALU | BPF_LSH | BPF_X: 516 case BPF_ALU64 | BPF_LSH | BPF_X: 517 emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx); 518 if (!is64 && !aux->verifier_zext) 519 emit_zext_32(rd, ctx); 520 break; 521 case BPF_ALU | BPF_RSH | BPF_X: 522 case BPF_ALU64 | BPF_RSH | BPF_X: 523 emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx); 524 if (!is64 && !aux->verifier_zext) 525 emit_zext_32(rd, ctx); 526 break; 527 case BPF_ALU | BPF_ARSH | BPF_X: 528 case BPF_ALU64 | BPF_ARSH | BPF_X: 529 emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx); 530 if (!is64 && !aux->verifier_zext) 531 emit_zext_32(rd, ctx); 532 break; 533 534 /* dst = -dst */ 535 case BPF_ALU | BPF_NEG: 536 case BPF_ALU64 | BPF_NEG: 537 emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) : 538 rv_subw(rd, RV_REG_ZERO, rd), ctx); 539 if (!is64 && !aux->verifier_zext) 540 emit_zext_32(rd, ctx); 541 break; 542 543 /* dst = BSWAP##imm(dst) */ 544 case BPF_ALU | BPF_END | BPF_FROM_LE: 545 switch (imm) { 546 case 16: 547 emit(rv_slli(rd, rd, 48), ctx); 548 emit(rv_srli(rd, rd, 48), ctx); 549 break; 550 case 32: 551 if (!aux->verifier_zext) 552 emit_zext_32(rd, ctx); 553 break; 554 case 64: 555 /* Do nothing */ 556 break; 557 } 558 break; 559 560 case BPF_ALU | BPF_END | BPF_FROM_BE: 561 emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx); 562 563 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); 564 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); 565 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); 566 emit(rv_srli(rd, rd, 8), ctx); 567 if (imm == 16) 568 goto out_be; 569 570 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); 571 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); 572 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); 573 emit(rv_srli(rd, rd, 8), ctx); 574 575 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); 576 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); 577 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); 578 emit(rv_srli(rd, rd, 8), ctx); 579 if (imm == 32) 580 goto out_be; 581 582 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); 583 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); 584 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); 585 emit(rv_srli(rd, rd, 8), ctx); 586 587 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); 588 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); 589 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); 590 emit(rv_srli(rd, rd, 8), ctx); 591 592 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); 593 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); 594 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); 595 emit(rv_srli(rd, rd, 8), ctx); 596 597 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); 598 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); 599 emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); 600 emit(rv_srli(rd, rd, 8), ctx); 601 out_be: 602 emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); 603 emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); 604 605 emit(rv_addi(rd, RV_REG_T2, 0), ctx); 606 break; 607 608 /* dst = imm */ 609 case BPF_ALU | BPF_MOV | BPF_K: 610 case BPF_ALU64 | BPF_MOV | BPF_K: 611 emit_imm(rd, imm, ctx); 612 if (!is64 && !aux->verifier_zext) 613 emit_zext_32(rd, ctx); 614 break; 615 616 /* dst = dst OP imm */ 617 case BPF_ALU | BPF_ADD | BPF_K: 618 case BPF_ALU64 | BPF_ADD | BPF_K: 619 if (is_12b_int(imm)) { 620 emit(is64 ? rv_addi(rd, rd, imm) : 621 rv_addiw(rd, rd, imm), ctx); 622 } else { 623 emit_imm(RV_REG_T1, imm, ctx); 624 emit(is64 ? rv_add(rd, rd, RV_REG_T1) : 625 rv_addw(rd, rd, RV_REG_T1), ctx); 626 } 627 if (!is64 && !aux->verifier_zext) 628 emit_zext_32(rd, ctx); 629 break; 630 case BPF_ALU | BPF_SUB | BPF_K: 631 case BPF_ALU64 | BPF_SUB | BPF_K: 632 if (is_12b_int(-imm)) { 633 emit(is64 ? rv_addi(rd, rd, -imm) : 634 rv_addiw(rd, rd, -imm), ctx); 635 } else { 636 emit_imm(RV_REG_T1, imm, ctx); 637 emit(is64 ? rv_sub(rd, rd, RV_REG_T1) : 638 rv_subw(rd, rd, RV_REG_T1), ctx); 639 } 640 if (!is64 && !aux->verifier_zext) 641 emit_zext_32(rd, ctx); 642 break; 643 case BPF_ALU | BPF_AND | BPF_K: 644 case BPF_ALU64 | BPF_AND | BPF_K: 645 if (is_12b_int(imm)) { 646 emit(rv_andi(rd, rd, imm), ctx); 647 } else { 648 emit_imm(RV_REG_T1, imm, ctx); 649 emit(rv_and(rd, rd, RV_REG_T1), ctx); 650 } 651 if (!is64 && !aux->verifier_zext) 652 emit_zext_32(rd, ctx); 653 break; 654 case BPF_ALU | BPF_OR | BPF_K: 655 case BPF_ALU64 | BPF_OR | BPF_K: 656 if (is_12b_int(imm)) { 657 emit(rv_ori(rd, rd, imm), ctx); 658 } else { 659 emit_imm(RV_REG_T1, imm, ctx); 660 emit(rv_or(rd, rd, RV_REG_T1), ctx); 661 } 662 if (!is64 && !aux->verifier_zext) 663 emit_zext_32(rd, ctx); 664 break; 665 case BPF_ALU | BPF_XOR | BPF_K: 666 case BPF_ALU64 | BPF_XOR | BPF_K: 667 if (is_12b_int(imm)) { 668 emit(rv_xori(rd, rd, imm), ctx); 669 } else { 670 emit_imm(RV_REG_T1, imm, ctx); 671 emit(rv_xor(rd, rd, RV_REG_T1), ctx); 672 } 673 if (!is64 && !aux->verifier_zext) 674 emit_zext_32(rd, ctx); 675 break; 676 case BPF_ALU | BPF_MUL | BPF_K: 677 case BPF_ALU64 | BPF_MUL | BPF_K: 678 emit_imm(RV_REG_T1, imm, ctx); 679 emit(is64 ? rv_mul(rd, rd, RV_REG_T1) : 680 rv_mulw(rd, rd, RV_REG_T1), ctx); 681 if (!is64 && !aux->verifier_zext) 682 emit_zext_32(rd, ctx); 683 break; 684 case BPF_ALU | BPF_DIV | BPF_K: 685 case BPF_ALU64 | BPF_DIV | BPF_K: 686 emit_imm(RV_REG_T1, imm, ctx); 687 emit(is64 ? rv_divu(rd, rd, RV_REG_T1) : 688 rv_divuw(rd, rd, RV_REG_T1), ctx); 689 if (!is64 && !aux->verifier_zext) 690 emit_zext_32(rd, ctx); 691 break; 692 case BPF_ALU | BPF_MOD | BPF_K: 693 case BPF_ALU64 | BPF_MOD | BPF_K: 694 emit_imm(RV_REG_T1, imm, ctx); 695 emit(is64 ? rv_remu(rd, rd, RV_REG_T1) : 696 rv_remuw(rd, rd, RV_REG_T1), ctx); 697 if (!is64 && !aux->verifier_zext) 698 emit_zext_32(rd, ctx); 699 break; 700 case BPF_ALU | BPF_LSH | BPF_K: 701 case BPF_ALU64 | BPF_LSH | BPF_K: 702 emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx); 703 if (!is64 && !aux->verifier_zext) 704 emit_zext_32(rd, ctx); 705 break; 706 case BPF_ALU | BPF_RSH | BPF_K: 707 case BPF_ALU64 | BPF_RSH | BPF_K: 708 emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx); 709 if (!is64 && !aux->verifier_zext) 710 emit_zext_32(rd, ctx); 711 break; 712 case BPF_ALU | BPF_ARSH | BPF_K: 713 case BPF_ALU64 | BPF_ARSH | BPF_K: 714 emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx); 715 if (!is64 && !aux->verifier_zext) 716 emit_zext_32(rd, ctx); 717 break; 718 719 /* JUMP off */ 720 case BPF_JMP | BPF_JA: 721 rvoff = rv_offset(i, off, ctx); 722 ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); 723 if (ret) 724 return ret; 725 break; 726 727 /* IF (dst COND src) JUMP off */ 728 case BPF_JMP | BPF_JEQ | BPF_X: 729 case BPF_JMP32 | BPF_JEQ | BPF_X: 730 case BPF_JMP | BPF_JGT | BPF_X: 731 case BPF_JMP32 | BPF_JGT | BPF_X: 732 case BPF_JMP | BPF_JLT | BPF_X: 733 case BPF_JMP32 | BPF_JLT | BPF_X: 734 case BPF_JMP | BPF_JGE | BPF_X: 735 case BPF_JMP32 | BPF_JGE | BPF_X: 736 case BPF_JMP | BPF_JLE | BPF_X: 737 case BPF_JMP32 | BPF_JLE | BPF_X: 738 case BPF_JMP | BPF_JNE | BPF_X: 739 case BPF_JMP32 | BPF_JNE | BPF_X: 740 case BPF_JMP | BPF_JSGT | BPF_X: 741 case BPF_JMP32 | BPF_JSGT | BPF_X: 742 case BPF_JMP | BPF_JSLT | BPF_X: 743 case BPF_JMP32 | BPF_JSLT | BPF_X: 744 case BPF_JMP | BPF_JSGE | BPF_X: 745 case BPF_JMP32 | BPF_JSGE | BPF_X: 746 case BPF_JMP | BPF_JSLE | BPF_X: 747 case BPF_JMP32 | BPF_JSLE | BPF_X: 748 case BPF_JMP | BPF_JSET | BPF_X: 749 case BPF_JMP32 | BPF_JSET | BPF_X: 750 rvoff = rv_offset(i, off, ctx); 751 if (!is64) { 752 s = ctx->ninsns; 753 if (is_signed_bpf_cond(BPF_OP(code))) 754 emit_sext_32_rd_rs(&rd, &rs, ctx); 755 else 756 emit_zext_32_rd_rs(&rd, &rs, ctx); 757 e = ctx->ninsns; 758 759 /* Adjust for extra insns */ 760 rvoff -= (e - s) << 2; 761 } 762 763 if (BPF_OP(code) == BPF_JSET) { 764 /* Adjust for and */ 765 rvoff -= 4; 766 emit(rv_and(RV_REG_T1, rd, rs), ctx); 767 emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, 768 ctx); 769 } else { 770 emit_branch(BPF_OP(code), rd, rs, rvoff, ctx); 771 } 772 break; 773 774 /* IF (dst COND imm) JUMP off */ 775 case BPF_JMP | BPF_JEQ | BPF_K: 776 case BPF_JMP32 | BPF_JEQ | BPF_K: 777 case BPF_JMP | BPF_JGT | BPF_K: 778 case BPF_JMP32 | BPF_JGT | BPF_K: 779 case BPF_JMP | BPF_JLT | BPF_K: 780 case BPF_JMP32 | BPF_JLT | BPF_K: 781 case BPF_JMP | BPF_JGE | BPF_K: 782 case BPF_JMP32 | BPF_JGE | BPF_K: 783 case BPF_JMP | BPF_JLE | BPF_K: 784 case BPF_JMP32 | BPF_JLE | BPF_K: 785 case BPF_JMP | BPF_JNE | BPF_K: 786 case BPF_JMP32 | BPF_JNE | BPF_K: 787 case BPF_JMP | BPF_JSGT | BPF_K: 788 case BPF_JMP32 | BPF_JSGT | BPF_K: 789 case BPF_JMP | BPF_JSLT | BPF_K: 790 case BPF_JMP32 | BPF_JSLT | BPF_K: 791 case BPF_JMP | BPF_JSGE | BPF_K: 792 case BPF_JMP32 | BPF_JSGE | BPF_K: 793 case BPF_JMP | BPF_JSLE | BPF_K: 794 case BPF_JMP32 | BPF_JSLE | BPF_K: 795 rvoff = rv_offset(i, off, ctx); 796 s = ctx->ninsns; 797 if (imm) { 798 emit_imm(RV_REG_T1, imm, ctx); 799 rs = RV_REG_T1; 800 } else { 801 /* If imm is 0, simply use zero register. */ 802 rs = RV_REG_ZERO; 803 } 804 if (!is64) { 805 if (is_signed_bpf_cond(BPF_OP(code))) 806 emit_sext_32_rd(&rd, ctx); 807 else 808 emit_zext_32_rd_t1(&rd, ctx); 809 } 810 e = ctx->ninsns; 811 812 /* Adjust for extra insns */ 813 rvoff -= (e - s) << 2; 814 emit_branch(BPF_OP(code), rd, rs, rvoff, ctx); 815 break; 816 817 case BPF_JMP | BPF_JSET | BPF_K: 818 case BPF_JMP32 | BPF_JSET | BPF_K: 819 rvoff = rv_offset(i, off, ctx); 820 s = ctx->ninsns; 821 if (is_12b_int(imm)) { 822 emit(rv_andi(RV_REG_T1, rd, imm), ctx); 823 } else { 824 emit_imm(RV_REG_T1, imm, ctx); 825 emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx); 826 } 827 /* For jset32, we should clear the upper 32 bits of t1, but 828 * sign-extension is sufficient here and saves one instruction, 829 * as t1 is used only in comparison against zero. 830 */ 831 if (!is64 && imm < 0) 832 emit(rv_addiw(RV_REG_T1, RV_REG_T1, 0), ctx); 833 e = ctx->ninsns; 834 rvoff -= (e - s) << 2; 835 emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); 836 break; 837 838 /* function call */ 839 case BPF_JMP | BPF_CALL: 840 { 841 bool fixed; 842 u64 addr; 843 844 mark_call(ctx); 845 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr, 846 &fixed); 847 if (ret < 0) 848 return ret; 849 ret = emit_call(fixed, addr, ctx); 850 if (ret) 851 return ret; 852 break; 853 } 854 /* tail call */ 855 case BPF_JMP | BPF_TAIL_CALL: 856 if (emit_bpf_tail_call(i, ctx)) 857 return -1; 858 break; 859 860 /* function return */ 861 case BPF_JMP | BPF_EXIT: 862 if (i == ctx->prog->len - 1) 863 break; 864 865 rvoff = epilogue_offset(ctx); 866 ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); 867 if (ret) 868 return ret; 869 break; 870 871 /* dst = imm64 */ 872 case BPF_LD | BPF_IMM | BPF_DW: 873 { 874 struct bpf_insn insn1 = insn[1]; 875 u64 imm64; 876 877 imm64 = (u64)insn1.imm << 32 | (u32)imm; 878 emit_imm(rd, imm64, ctx); 879 return 1; 880 } 881 882 /* LDX: dst = *(size *)(src + off) */ 883 case BPF_LDX | BPF_MEM | BPF_B: 884 if (is_12b_int(off)) { 885 emit(rv_lbu(rd, off, rs), ctx); 886 break; 887 } 888 889 emit_imm(RV_REG_T1, off, ctx); 890 emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); 891 emit(rv_lbu(rd, 0, RV_REG_T1), ctx); 892 if (insn_is_zext(&insn[1])) 893 return 1; 894 break; 895 case BPF_LDX | BPF_MEM | BPF_H: 896 if (is_12b_int(off)) { 897 emit(rv_lhu(rd, off, rs), ctx); 898 break; 899 } 900 901 emit_imm(RV_REG_T1, off, ctx); 902 emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); 903 emit(rv_lhu(rd, 0, RV_REG_T1), ctx); 904 if (insn_is_zext(&insn[1])) 905 return 1; 906 break; 907 case BPF_LDX | BPF_MEM | BPF_W: 908 if (is_12b_int(off)) { 909 emit(rv_lwu(rd, off, rs), ctx); 910 break; 911 } 912 913 emit_imm(RV_REG_T1, off, ctx); 914 emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); 915 emit(rv_lwu(rd, 0, RV_REG_T1), ctx); 916 if (insn_is_zext(&insn[1])) 917 return 1; 918 break; 919 case BPF_LDX | BPF_MEM | BPF_DW: 920 if (is_12b_int(off)) { 921 emit(rv_ld(rd, off, rs), ctx); 922 break; 923 } 924 925 emit_imm(RV_REG_T1, off, ctx); 926 emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); 927 emit(rv_ld(rd, 0, RV_REG_T1), ctx); 928 break; 929 930 /* ST: *(size *)(dst + off) = imm */ 931 case BPF_ST | BPF_MEM | BPF_B: 932 emit_imm(RV_REG_T1, imm, ctx); 933 if (is_12b_int(off)) { 934 emit(rv_sb(rd, off, RV_REG_T1), ctx); 935 break; 936 } 937 938 emit_imm(RV_REG_T2, off, ctx); 939 emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); 940 emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx); 941 break; 942 943 case BPF_ST | BPF_MEM | BPF_H: 944 emit_imm(RV_REG_T1, imm, ctx); 945 if (is_12b_int(off)) { 946 emit(rv_sh(rd, off, RV_REG_T1), ctx); 947 break; 948 } 949 950 emit_imm(RV_REG_T2, off, ctx); 951 emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); 952 emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx); 953 break; 954 case BPF_ST | BPF_MEM | BPF_W: 955 emit_imm(RV_REG_T1, imm, ctx); 956 if (is_12b_int(off)) { 957 emit(rv_sw(rd, off, RV_REG_T1), ctx); 958 break; 959 } 960 961 emit_imm(RV_REG_T2, off, ctx); 962 emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); 963 emit(rv_sw(RV_REG_T2, 0, RV_REG_T1), ctx); 964 break; 965 case BPF_ST | BPF_MEM | BPF_DW: 966 emit_imm(RV_REG_T1, imm, ctx); 967 if (is_12b_int(off)) { 968 emit(rv_sd(rd, off, RV_REG_T1), ctx); 969 break; 970 } 971 972 emit_imm(RV_REG_T2, off, ctx); 973 emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); 974 emit(rv_sd(RV_REG_T2, 0, RV_REG_T1), ctx); 975 break; 976 977 /* STX: *(size *)(dst + off) = src */ 978 case BPF_STX | BPF_MEM | BPF_B: 979 if (is_12b_int(off)) { 980 emit(rv_sb(rd, off, rs), ctx); 981 break; 982 } 983 984 emit_imm(RV_REG_T1, off, ctx); 985 emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); 986 emit(rv_sb(RV_REG_T1, 0, rs), ctx); 987 break; 988 case BPF_STX | BPF_MEM | BPF_H: 989 if (is_12b_int(off)) { 990 emit(rv_sh(rd, off, rs), ctx); 991 break; 992 } 993 994 emit_imm(RV_REG_T1, off, ctx); 995 emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); 996 emit(rv_sh(RV_REG_T1, 0, rs), ctx); 997 break; 998 case BPF_STX | BPF_MEM | BPF_W: 999 if (is_12b_int(off)) { 1000 emit(rv_sw(rd, off, rs), ctx); 1001 break; 1002 } 1003 1004 emit_imm(RV_REG_T1, off, ctx); 1005 emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); 1006 emit(rv_sw(RV_REG_T1, 0, rs), ctx); 1007 break; 1008 case BPF_STX | BPF_MEM | BPF_DW: 1009 if (is_12b_int(off)) { 1010 emit(rv_sd(rd, off, rs), ctx); 1011 break; 1012 } 1013 1014 emit_imm(RV_REG_T1, off, ctx); 1015 emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); 1016 emit(rv_sd(RV_REG_T1, 0, rs), ctx); 1017 break; 1018 /* STX XADD: lock *(u32 *)(dst + off) += src */ 1019 case BPF_STX | BPF_XADD | BPF_W: 1020 /* STX XADD: lock *(u64 *)(dst + off) += src */ 1021 case BPF_STX | BPF_XADD | BPF_DW: 1022 if (off) { 1023 if (is_12b_int(off)) { 1024 emit(rv_addi(RV_REG_T1, rd, off), ctx); 1025 } else { 1026 emit_imm(RV_REG_T1, off, ctx); 1027 emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); 1028 } 1029 1030 rd = RV_REG_T1; 1031 } 1032 1033 emit(BPF_SIZE(code) == BPF_W ? 1034 rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0) : 1035 rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0), ctx); 1036 break; 1037 default: 1038 pr_err("bpf-jit: unknown opcode %02x\n", code); 1039 return -EINVAL; 1040 } 1041 1042 return 0; 1043 } 1044 1045 void bpf_jit_build_prologue(struct rv_jit_context *ctx) 1046 { 1047 int stack_adjust = 0, store_offset, bpf_stack_adjust; 1048 1049 bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); 1050 if (bpf_stack_adjust) 1051 mark_fp(ctx); 1052 1053 if (seen_reg(RV_REG_RA, ctx)) 1054 stack_adjust += 8; 1055 stack_adjust += 8; /* RV_REG_FP */ 1056 if (seen_reg(RV_REG_S1, ctx)) 1057 stack_adjust += 8; 1058 if (seen_reg(RV_REG_S2, ctx)) 1059 stack_adjust += 8; 1060 if (seen_reg(RV_REG_S3, ctx)) 1061 stack_adjust += 8; 1062 if (seen_reg(RV_REG_S4, ctx)) 1063 stack_adjust += 8; 1064 if (seen_reg(RV_REG_S5, ctx)) 1065 stack_adjust += 8; 1066 if (seen_reg(RV_REG_S6, ctx)) 1067 stack_adjust += 8; 1068 1069 stack_adjust = round_up(stack_adjust, 16); 1070 stack_adjust += bpf_stack_adjust; 1071 1072 store_offset = stack_adjust - 8; 1073 1074 /* First instruction is always setting the tail-call-counter 1075 * (TCC) register. This instruction is skipped for tail calls. 1076 */ 1077 emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx); 1078 1079 emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx); 1080 1081 if (seen_reg(RV_REG_RA, ctx)) { 1082 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_RA), ctx); 1083 store_offset -= 8; 1084 } 1085 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_FP), ctx); 1086 store_offset -= 8; 1087 if (seen_reg(RV_REG_S1, ctx)) { 1088 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S1), ctx); 1089 store_offset -= 8; 1090 } 1091 if (seen_reg(RV_REG_S2, ctx)) { 1092 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S2), ctx); 1093 store_offset -= 8; 1094 } 1095 if (seen_reg(RV_REG_S3, ctx)) { 1096 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S3), ctx); 1097 store_offset -= 8; 1098 } 1099 if (seen_reg(RV_REG_S4, ctx)) { 1100 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S4), ctx); 1101 store_offset -= 8; 1102 } 1103 if (seen_reg(RV_REG_S5, ctx)) { 1104 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S5), ctx); 1105 store_offset -= 8; 1106 } 1107 if (seen_reg(RV_REG_S6, ctx)) { 1108 emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S6), ctx); 1109 store_offset -= 8; 1110 } 1111 1112 emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx); 1113 1114 if (bpf_stack_adjust) 1115 emit(rv_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust), ctx); 1116 1117 /* Program contains calls and tail calls, so RV_REG_TCC need 1118 * to be saved across calls. 1119 */ 1120 if (seen_tail_call(ctx) && seen_call(ctx)) 1121 emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx); 1122 1123 ctx->stack_size = stack_adjust; 1124 } 1125 1126 void bpf_jit_build_epilogue(struct rv_jit_context *ctx) 1127 { 1128 __build_epilogue(false, ctx); 1129 } 1130 1131 void *bpf_jit_alloc_exec(unsigned long size) 1132 { 1133 return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, 1134 BPF_JIT_REGION_END, GFP_KERNEL, 1135 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, 1136 __builtin_return_address(0)); 1137 } 1138 1139 void bpf_jit_free_exec(void *addr) 1140 { 1141 return vfree(addr); 1142 } 1143