1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * eBPF JIT compiler 4 * 5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> 6 * IBM Corporation 7 * 8 * Based on the powerpc classic BPF JIT compiler by Matt Evans 9 */ 10 #include <linux/moduleloader.h> 11 #include <asm/cacheflush.h> 12 #include <asm/asm-compat.h> 13 #include <linux/netdevice.h> 14 #include <linux/filter.h> 15 #include <linux/if_vlan.h> 16 #include <linux/kernel.h> 17 #include <linux/memory.h> 18 #include <linux/bpf.h> 19 20 #include <asm/kprobes.h> 21 #include <asm/code-patching.h> 22 23 #include "bpf_jit.h" 24 25 static void bpf_jit_fill_ill_insns(void *area, unsigned int size) 26 { 27 memset32(area, BREAKPOINT_INSTRUCTION, size / 4); 28 } 29 30 int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr) 31 { 32 if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) { 33 PPC_JMP(exit_addr); 34 } else if (ctx->alt_exit_addr) { 35 if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4)))) 36 return -1; 37 PPC_JMP(ctx->alt_exit_addr); 38 } else { 39 ctx->alt_exit_addr = ctx->idx * 4; 40 bpf_jit_build_epilogue(image, ctx); 41 } 42 43 return 0; 44 } 45 46 struct powerpc_jit_data { 47 /* address of rw header */ 48 struct bpf_binary_header *hdr; 49 /* address of ro final header */ 50 struct bpf_binary_header *fhdr; 51 u32 *addrs; 52 u8 *fimage; 53 u32 proglen; 54 struct codegen_context ctx; 55 }; 56 57 bool bpf_jit_needs_zext(void) 58 { 59 return true; 60 } 61 62 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) 63 { 64 u32 proglen; 65 u32 alloclen; 66 u8 *image = NULL; 67 u32 *code_base; 68 u32 *addrs; 69 struct powerpc_jit_data *jit_data; 70 struct codegen_context cgctx; 71 int pass; 72 int flen; 73 struct bpf_binary_header *fhdr = NULL; 74 struct bpf_binary_header *hdr = NULL; 75 struct bpf_prog *org_fp = fp; 76 struct bpf_prog *tmp_fp; 77 bool bpf_blinded = false; 78 bool extra_pass = false; 79 u8 *fimage = NULL; 80 u32 *fcode_base; 81 u32 extable_len; 82 u32 fixup_len; 83 84 if (!fp->jit_requested) 85 return org_fp; 86 87 tmp_fp = bpf_jit_blind_constants(org_fp); 88 if (IS_ERR(tmp_fp)) 89 return org_fp; 90 91 if (tmp_fp != org_fp) { 92 bpf_blinded = true; 93 fp = tmp_fp; 94 } 95 96 jit_data = fp->aux->jit_data; 97 if (!jit_data) { 98 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 99 if (!jit_data) { 100 fp = org_fp; 101 goto out; 102 } 103 fp->aux->jit_data = jit_data; 104 } 105 106 flen = fp->len; 107 addrs = jit_data->addrs; 108 if (addrs) { 109 cgctx = jit_data->ctx; 110 /* 111 * JIT compiled to a writable location (image/code_base) first. 112 * It is then moved to the readonly final location (fimage/fcode_base) 113 * using instruction patching. 114 */ 115 fimage = jit_data->fimage; 116 fhdr = jit_data->fhdr; 117 proglen = jit_data->proglen; 118 hdr = jit_data->hdr; 119 image = (void *)hdr + ((void *)fimage - (void *)fhdr); 120 extra_pass = true; 121 /* During extra pass, ensure index is reset before repopulating extable entries */ 122 cgctx.exentry_idx = 0; 123 goto skip_init_ctx; 124 } 125 126 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL); 127 if (addrs == NULL) { 128 fp = org_fp; 129 goto out_addrs; 130 } 131 132 memset(&cgctx, 0, sizeof(struct codegen_context)); 133 bpf_jit_init_reg_mapping(&cgctx); 134 135 /* Make sure that the stack is quadword aligned. */ 136 cgctx.stack_size = round_up(fp->aux->stack_depth, 16); 137 138 /* Scouting faux-generate pass 0 */ 139 if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { 140 /* We hit something illegal or unsupported. */ 141 fp = org_fp; 142 goto out_addrs; 143 } 144 145 /* 146 * If we have seen a tail call, we need a second pass. 147 * This is because bpf_jit_emit_common_epilogue() is called 148 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen. 149 * We also need a second pass if we ended up with too large 150 * a program so as to ensure BPF_EXIT branches are in range. 151 */ 152 if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) { 153 cgctx.idx = 0; 154 if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { 155 fp = org_fp; 156 goto out_addrs; 157 } 158 } 159 160 bpf_jit_realloc_regs(&cgctx); 161 /* 162 * Pretend to build prologue, given the features we've seen. This will 163 * update ctgtx.idx as it pretends to output instructions, then we can 164 * calculate total size from idx. 165 */ 166 bpf_jit_build_prologue(NULL, &cgctx); 167 addrs[fp->len] = cgctx.idx * 4; 168 bpf_jit_build_epilogue(NULL, &cgctx); 169 170 fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4; 171 extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry); 172 173 proglen = cgctx.idx * 4; 174 alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len; 175 176 fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image, 177 bpf_jit_fill_ill_insns); 178 if (!fhdr) { 179 fp = org_fp; 180 goto out_addrs; 181 } 182 183 if (extable_len) 184 fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len; 185 186 skip_init_ctx: 187 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); 188 fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE); 189 190 /* Code generation passes 1-2 */ 191 for (pass = 1; pass < 3; pass++) { 192 /* Now build the prologue, body code & epilogue for real. */ 193 cgctx.idx = 0; 194 cgctx.alt_exit_addr = 0; 195 bpf_jit_build_prologue(code_base, &cgctx); 196 if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass, 197 extra_pass)) { 198 bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size)); 199 bpf_jit_binary_pack_free(fhdr, hdr); 200 fp = org_fp; 201 goto out_addrs; 202 } 203 bpf_jit_build_epilogue(code_base, &cgctx); 204 205 if (bpf_jit_enable > 1) 206 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, 207 proglen - (cgctx.idx * 4), cgctx.seen); 208 } 209 210 if (bpf_jit_enable > 1) 211 /* 212 * Note that we output the base address of the code_base 213 * rather than image, since opcodes are in code_base. 214 */ 215 bpf_jit_dump(flen, proglen, pass, code_base); 216 217 #ifdef CONFIG_PPC64_ELF_ABI_V1 218 /* Function descriptor nastiness: Address + TOC */ 219 ((u64 *)image)[0] = (u64)fcode_base; 220 ((u64 *)image)[1] = local_paca->kernel_toc; 221 #endif 222 223 fp->bpf_func = (void *)fimage; 224 fp->jited = 1; 225 fp->jited_len = proglen + FUNCTION_DESCR_SIZE; 226 227 if (!fp->is_func || extra_pass) { 228 if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) { 229 fp = org_fp; 230 goto out_addrs; 231 } 232 bpf_prog_fill_jited_linfo(fp, addrs); 233 out_addrs: 234 kfree(addrs); 235 kfree(jit_data); 236 fp->aux->jit_data = NULL; 237 } else { 238 jit_data->addrs = addrs; 239 jit_data->ctx = cgctx; 240 jit_data->proglen = proglen; 241 jit_data->fimage = fimage; 242 jit_data->fhdr = fhdr; 243 jit_data->hdr = hdr; 244 } 245 246 out: 247 if (bpf_blinded) 248 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp); 249 250 return fp; 251 } 252 253 /* 254 * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling 255 * this function, as this only applies to BPF_PROBE_MEM, for now. 256 */ 257 int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass, 258 struct codegen_context *ctx, int insn_idx, int jmp_off, 259 int dst_reg) 260 { 261 off_t offset; 262 unsigned long pc; 263 struct exception_table_entry *ex, *ex_entry; 264 u32 *fixup; 265 266 /* Populate extable entries only in the last pass */ 267 if (pass != 2) 268 return 0; 269 270 if (!fp->aux->extable || 271 WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries)) 272 return -EINVAL; 273 274 /* 275 * Program is first written to image before copying to the 276 * final location (fimage). Accordingly, update in the image first. 277 * As all offsets used are relative, copying as is to the 278 * final location should be alright. 279 */ 280 pc = (unsigned long)&image[insn_idx]; 281 ex = (void *)fp->aux->extable - (void *)fimage + (void *)image; 282 283 fixup = (void *)ex - 284 (fp->aux->num_exentries * BPF_FIXUP_LEN * 4) + 285 (ctx->exentry_idx * BPF_FIXUP_LEN * 4); 286 287 fixup[0] = PPC_RAW_LI(dst_reg, 0); 288 if (IS_ENABLED(CONFIG_PPC32)) 289 fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */ 290 291 fixup[BPF_FIXUP_LEN - 1] = 292 PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]); 293 294 ex_entry = &ex[ctx->exentry_idx]; 295 296 offset = pc - (long)&ex_entry->insn; 297 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) 298 return -ERANGE; 299 ex_entry->insn = offset; 300 301 offset = (long)fixup - (long)&ex_entry->fixup; 302 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) 303 return -ERANGE; 304 ex_entry->fixup = offset; 305 306 ctx->exentry_idx++; 307 return 0; 308 } 309 310 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 311 { 312 int err; 313 314 if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst))) 315 return ERR_PTR(-EINVAL); 316 317 mutex_lock(&text_mutex); 318 err = patch_instructions(dst, src, len, false); 319 mutex_unlock(&text_mutex); 320 321 return err ? ERR_PTR(err) : dst; 322 } 323 324 int bpf_arch_text_invalidate(void *dst, size_t len) 325 { 326 u32 insn = BREAKPOINT_INSTRUCTION; 327 int ret; 328 329 if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst))) 330 return -EINVAL; 331 332 mutex_lock(&text_mutex); 333 ret = patch_instructions(dst, &insn, len, true); 334 mutex_unlock(&text_mutex); 335 336 return ret; 337 } 338 339 void bpf_jit_free(struct bpf_prog *fp) 340 { 341 if (fp->jited) { 342 struct powerpc_jit_data *jit_data = fp->aux->jit_data; 343 struct bpf_binary_header *hdr; 344 345 /* 346 * If we fail the final pass of JIT (from jit_subprogs), 347 * the program may not be finalized yet. Call finalize here 348 * before freeing it. 349 */ 350 if (jit_data) { 351 bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr); 352 kvfree(jit_data->addrs); 353 kfree(jit_data); 354 } 355 hdr = bpf_jit_binary_pack_hdr(fp); 356 bpf_jit_binary_pack_free(hdr, NULL); 357 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 358 } 359 360 bpf_prog_unlock_free(fp); 361 } 362 363 bool bpf_jit_supports_kfunc_call(void) 364 { 365 return true; 366 } 367 368 bool bpf_jit_supports_far_kfunc_call(void) 369 { 370 return IS_ENABLED(CONFIG_PPC64); 371 } 372