1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2019 Facebook */ 3 #include <linux/hash.h> 4 #include <linux/bpf.h> 5 #include <linux/filter.h> 6 #include <linux/ftrace.h> 7 #include <linux/rbtree_latch.h> 8 #include <linux/perf_event.h> 9 #include <linux/btf.h> 10 #include <linux/rcupdate_trace.h> 11 #include <linux/rcupdate_wait.h> 12 #include <linux/module.h> 13 14 /* dummy _ops. The verifier will operate on target program's ops. */ 15 const struct bpf_verifier_ops bpf_extension_verifier_ops = { 16 }; 17 const struct bpf_prog_ops bpf_extension_prog_ops = { 18 }; 19 20 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */ 21 #define TRAMPOLINE_HASH_BITS 10 22 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS) 23 24 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE]; 25 26 /* serializes access to trampoline_table */ 27 static DEFINE_MUTEX(trampoline_mutex); 28 29 void *bpf_jit_alloc_exec_page(void) 30 { 31 void *image; 32 33 image = bpf_jit_alloc_exec(PAGE_SIZE); 34 if (!image) 35 return NULL; 36 37 set_vm_flush_reset_perms(image); 38 /* Keep image as writeable. The alternative is to keep flipping ro/rw 39 * everytime new program is attached or detached. 40 */ 41 set_memory_x((long)image, 1); 42 return image; 43 } 44 45 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym) 46 { 47 ksym->start = (unsigned long) data; 48 ksym->end = ksym->start + PAGE_SIZE; 49 bpf_ksym_add(ksym); 50 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, 51 PAGE_SIZE, false, ksym->name); 52 } 53 54 void bpf_image_ksym_del(struct bpf_ksym *ksym) 55 { 56 bpf_ksym_del(ksym); 57 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, 58 PAGE_SIZE, true, ksym->name); 59 } 60 61 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) 62 { 63 struct bpf_trampoline *tr; 64 struct hlist_head *head; 65 int i; 66 67 mutex_lock(&trampoline_mutex); 68 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)]; 69 hlist_for_each_entry(tr, head, hlist) { 70 if (tr->key == key) { 71 refcount_inc(&tr->refcnt); 72 goto out; 73 } 74 } 75 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 76 if (!tr) 77 goto out; 78 79 tr->key = key; 80 INIT_HLIST_NODE(&tr->hlist); 81 hlist_add_head(&tr->hlist, head); 82 refcount_set(&tr->refcnt, 1); 83 mutex_init(&tr->mutex); 84 for (i = 0; i < BPF_TRAMP_MAX; i++) 85 INIT_HLIST_HEAD(&tr->progs_hlist[i]); 86 out: 87 mutex_unlock(&trampoline_mutex); 88 return tr; 89 } 90 91 static int bpf_trampoline_module_get(struct bpf_trampoline *tr) 92 { 93 struct module *mod; 94 int err = 0; 95 96 preempt_disable(); 97 mod = __module_text_address((unsigned long) tr->func.addr); 98 if (mod && !try_module_get(mod)) 99 err = -ENOENT; 100 preempt_enable(); 101 tr->mod = mod; 102 return err; 103 } 104 105 static void bpf_trampoline_module_put(struct bpf_trampoline *tr) 106 { 107 module_put(tr->mod); 108 tr->mod = NULL; 109 } 110 111 static int is_ftrace_location(void *ip) 112 { 113 long addr; 114 115 addr = ftrace_location((long)ip); 116 if (!addr) 117 return 0; 118 if (WARN_ON_ONCE(addr != (long)ip)) 119 return -EFAULT; 120 return 1; 121 } 122 123 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) 124 { 125 void *ip = tr->func.addr; 126 int ret; 127 128 if (tr->func.ftrace_managed) 129 ret = unregister_ftrace_direct((long)ip, (long)old_addr); 130 else 131 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); 132 133 if (!ret) 134 bpf_trampoline_module_put(tr); 135 return ret; 136 } 137 138 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr) 139 { 140 void *ip = tr->func.addr; 141 int ret; 142 143 if (tr->func.ftrace_managed) 144 ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr); 145 else 146 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr); 147 return ret; 148 } 149 150 /* first time registering */ 151 static int register_fentry(struct bpf_trampoline *tr, void *new_addr) 152 { 153 void *ip = tr->func.addr; 154 int ret; 155 156 ret = is_ftrace_location(ip); 157 if (ret < 0) 158 return ret; 159 tr->func.ftrace_managed = ret; 160 161 if (bpf_trampoline_module_get(tr)) 162 return -ENOENT; 163 164 if (tr->func.ftrace_managed) 165 ret = register_ftrace_direct((long)ip, (long)new_addr); 166 else 167 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); 168 169 if (ret) 170 bpf_trampoline_module_put(tr); 171 return ret; 172 } 173 174 static struct bpf_tramp_progs * 175 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg) 176 { 177 const struct bpf_prog_aux *aux; 178 struct bpf_tramp_progs *tprogs; 179 struct bpf_prog **progs; 180 int kind; 181 182 *total = 0; 183 tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL); 184 if (!tprogs) 185 return ERR_PTR(-ENOMEM); 186 187 for (kind = 0; kind < BPF_TRAMP_MAX; kind++) { 188 tprogs[kind].nr_progs = tr->progs_cnt[kind]; 189 *total += tr->progs_cnt[kind]; 190 progs = tprogs[kind].progs; 191 192 hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) { 193 *ip_arg |= aux->prog->call_get_func_ip; 194 *progs++ = aux->prog; 195 } 196 } 197 return tprogs; 198 } 199 200 static void __bpf_tramp_image_put_deferred(struct work_struct *work) 201 { 202 struct bpf_tramp_image *im; 203 204 im = container_of(work, struct bpf_tramp_image, work); 205 bpf_image_ksym_del(&im->ksym); 206 bpf_jit_free_exec(im->image); 207 bpf_jit_uncharge_modmem(1); 208 percpu_ref_exit(&im->pcref); 209 kfree_rcu(im, rcu); 210 } 211 212 /* callback, fexit step 3 or fentry step 2 */ 213 static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu) 214 { 215 struct bpf_tramp_image *im; 216 217 im = container_of(rcu, struct bpf_tramp_image, rcu); 218 INIT_WORK(&im->work, __bpf_tramp_image_put_deferred); 219 schedule_work(&im->work); 220 } 221 222 /* callback, fexit step 2. Called after percpu_ref_kill confirms. */ 223 static void __bpf_tramp_image_release(struct percpu_ref *pcref) 224 { 225 struct bpf_tramp_image *im; 226 227 im = container_of(pcref, struct bpf_tramp_image, pcref); 228 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu); 229 } 230 231 /* callback, fexit or fentry step 1 */ 232 static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu) 233 { 234 struct bpf_tramp_image *im; 235 236 im = container_of(rcu, struct bpf_tramp_image, rcu); 237 if (im->ip_after_call) 238 /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */ 239 percpu_ref_kill(&im->pcref); 240 else 241 /* the case of fentry trampoline */ 242 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu); 243 } 244 245 static void bpf_tramp_image_put(struct bpf_tramp_image *im) 246 { 247 /* The trampoline image that calls original function is using: 248 * rcu_read_lock_trace to protect sleepable bpf progs 249 * rcu_read_lock to protect normal bpf progs 250 * percpu_ref to protect trampoline itself 251 * rcu tasks to protect trampoline asm not covered by percpu_ref 252 * (which are few asm insns before __bpf_tramp_enter and 253 * after __bpf_tramp_exit) 254 * 255 * The trampoline is unreachable before bpf_tramp_image_put(). 256 * 257 * First, patch the trampoline to avoid calling into fexit progs. 258 * The progs will be freed even if the original function is still 259 * executing or sleeping. 260 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on 261 * first few asm instructions to execute and call into 262 * __bpf_tramp_enter->percpu_ref_get. 263 * Then use percpu_ref_kill to wait for the trampoline and the original 264 * function to finish. 265 * Then use call_rcu_tasks() to make sure few asm insns in 266 * the trampoline epilogue are done as well. 267 * 268 * In !PREEMPT case the task that got interrupted in the first asm 269 * insns won't go through an RCU quiescent state which the 270 * percpu_ref_kill will be waiting for. Hence the first 271 * call_rcu_tasks() is not necessary. 272 */ 273 if (im->ip_after_call) { 274 int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP, 275 NULL, im->ip_epilogue); 276 WARN_ON(err); 277 if (IS_ENABLED(CONFIG_PREEMPTION)) 278 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks); 279 else 280 percpu_ref_kill(&im->pcref); 281 return; 282 } 283 284 /* The trampoline without fexit and fmod_ret progs doesn't call original 285 * function and doesn't use percpu_ref. 286 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish. 287 * Then use call_rcu_tasks() to wait for the rest of trampoline asm 288 * and normal progs. 289 */ 290 call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks); 291 } 292 293 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx) 294 { 295 struct bpf_tramp_image *im; 296 struct bpf_ksym *ksym; 297 void *image; 298 int err = -ENOMEM; 299 300 im = kzalloc(sizeof(*im), GFP_KERNEL); 301 if (!im) 302 goto out; 303 304 err = bpf_jit_charge_modmem(1); 305 if (err) 306 goto out_free_im; 307 308 err = -ENOMEM; 309 im->image = image = bpf_jit_alloc_exec_page(); 310 if (!image) 311 goto out_uncharge; 312 313 err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL); 314 if (err) 315 goto out_free_image; 316 317 ksym = &im->ksym; 318 INIT_LIST_HEAD_RCU(&ksym->lnode); 319 snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx); 320 bpf_image_ksym_add(image, ksym); 321 return im; 322 323 out_free_image: 324 bpf_jit_free_exec(im->image); 325 out_uncharge: 326 bpf_jit_uncharge_modmem(1); 327 out_free_im: 328 kfree(im); 329 out: 330 return ERR_PTR(err); 331 } 332 333 static int bpf_trampoline_update(struct bpf_trampoline *tr) 334 { 335 struct bpf_tramp_image *im; 336 struct bpf_tramp_progs *tprogs; 337 u32 flags = BPF_TRAMP_F_RESTORE_REGS; 338 bool ip_arg = false; 339 int err, total; 340 341 tprogs = bpf_trampoline_get_progs(tr, &total, &ip_arg); 342 if (IS_ERR(tprogs)) 343 return PTR_ERR(tprogs); 344 345 if (total == 0) { 346 err = unregister_fentry(tr, tr->cur_image->image); 347 bpf_tramp_image_put(tr->cur_image); 348 tr->cur_image = NULL; 349 tr->selector = 0; 350 goto out; 351 } 352 353 im = bpf_tramp_image_alloc(tr->key, tr->selector); 354 if (IS_ERR(im)) { 355 err = PTR_ERR(im); 356 goto out; 357 } 358 359 if (tprogs[BPF_TRAMP_FEXIT].nr_progs || 360 tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs) 361 flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME; 362 363 if (ip_arg) 364 flags |= BPF_TRAMP_F_IP_ARG; 365 366 err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE, 367 &tr->func.model, flags, tprogs, 368 tr->func.addr); 369 if (err < 0) 370 goto out; 371 372 WARN_ON(tr->cur_image && tr->selector == 0); 373 WARN_ON(!tr->cur_image && tr->selector); 374 if (tr->cur_image) 375 /* progs already running at this address */ 376 err = modify_fentry(tr, tr->cur_image->image, im->image); 377 else 378 /* first time registering */ 379 err = register_fentry(tr, im->image); 380 if (err) 381 goto out; 382 if (tr->cur_image) 383 bpf_tramp_image_put(tr->cur_image); 384 tr->cur_image = im; 385 tr->selector++; 386 out: 387 kfree(tprogs); 388 return err; 389 } 390 391 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) 392 { 393 switch (prog->expected_attach_type) { 394 case BPF_TRACE_FENTRY: 395 return BPF_TRAMP_FENTRY; 396 case BPF_MODIFY_RETURN: 397 return BPF_TRAMP_MODIFY_RETURN; 398 case BPF_TRACE_FEXIT: 399 return BPF_TRAMP_FEXIT; 400 case BPF_LSM_MAC: 401 if (!prog->aux->attach_func_proto->type) 402 /* The function returns void, we cannot modify its 403 * return value. 404 */ 405 return BPF_TRAMP_FEXIT; 406 else 407 return BPF_TRAMP_MODIFY_RETURN; 408 default: 409 return BPF_TRAMP_REPLACE; 410 } 411 } 412 413 int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) 414 { 415 enum bpf_tramp_prog_type kind; 416 int err = 0; 417 int cnt; 418 419 kind = bpf_attach_type_to_tramp(prog); 420 mutex_lock(&tr->mutex); 421 if (tr->extension_prog) { 422 /* cannot attach fentry/fexit if extension prog is attached. 423 * cannot overwrite extension prog either. 424 */ 425 err = -EBUSY; 426 goto out; 427 } 428 cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT]; 429 if (kind == BPF_TRAMP_REPLACE) { 430 /* Cannot attach extension if fentry/fexit are in use. */ 431 if (cnt) { 432 err = -EBUSY; 433 goto out; 434 } 435 tr->extension_prog = prog; 436 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL, 437 prog->bpf_func); 438 goto out; 439 } 440 if (cnt >= BPF_MAX_TRAMP_PROGS) { 441 err = -E2BIG; 442 goto out; 443 } 444 if (!hlist_unhashed(&prog->aux->tramp_hlist)) { 445 /* prog already linked */ 446 err = -EBUSY; 447 goto out; 448 } 449 hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]); 450 tr->progs_cnt[kind]++; 451 err = bpf_trampoline_update(tr); 452 if (err) { 453 hlist_del_init(&prog->aux->tramp_hlist); 454 tr->progs_cnt[kind]--; 455 } 456 out: 457 mutex_unlock(&tr->mutex); 458 return err; 459 } 460 461 /* bpf_trampoline_unlink_prog() should never fail. */ 462 int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) 463 { 464 enum bpf_tramp_prog_type kind; 465 int err; 466 467 kind = bpf_attach_type_to_tramp(prog); 468 mutex_lock(&tr->mutex); 469 if (kind == BPF_TRAMP_REPLACE) { 470 WARN_ON_ONCE(!tr->extension_prog); 471 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, 472 tr->extension_prog->bpf_func, NULL); 473 tr->extension_prog = NULL; 474 goto out; 475 } 476 hlist_del_init(&prog->aux->tramp_hlist); 477 tr->progs_cnt[kind]--; 478 err = bpf_trampoline_update(tr); 479 out: 480 mutex_unlock(&tr->mutex); 481 return err; 482 } 483 484 struct bpf_trampoline *bpf_trampoline_get(u64 key, 485 struct bpf_attach_target_info *tgt_info) 486 { 487 struct bpf_trampoline *tr; 488 489 tr = bpf_trampoline_lookup(key); 490 if (!tr) 491 return NULL; 492 493 mutex_lock(&tr->mutex); 494 if (tr->func.addr) 495 goto out; 496 497 memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel)); 498 tr->func.addr = (void *)tgt_info->tgt_addr; 499 out: 500 mutex_unlock(&tr->mutex); 501 return tr; 502 } 503 504 void bpf_trampoline_put(struct bpf_trampoline *tr) 505 { 506 if (!tr) 507 return; 508 mutex_lock(&trampoline_mutex); 509 if (!refcount_dec_and_test(&tr->refcnt)) 510 goto out; 511 WARN_ON_ONCE(mutex_is_locked(&tr->mutex)); 512 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY]))) 513 goto out; 514 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT]))) 515 goto out; 516 /* This code will be executed even when the last bpf_tramp_image 517 * is alive. All progs are detached from the trampoline and the 518 * trampoline image is patched with jmp into epilogue to skip 519 * fexit progs. The fentry-only trampoline will be freed via 520 * multiple rcu callbacks. 521 */ 522 hlist_del(&tr->hlist); 523 kfree(tr); 524 out: 525 mutex_unlock(&trampoline_mutex); 526 } 527 528 #define NO_START_TIME 1 529 static u64 notrace bpf_prog_start_time(void) 530 { 531 u64 start = NO_START_TIME; 532 533 if (static_branch_unlikely(&bpf_stats_enabled_key)) { 534 start = sched_clock(); 535 if (unlikely(!start)) 536 start = NO_START_TIME; 537 } 538 return start; 539 } 540 541 static void notrace inc_misses_counter(struct bpf_prog *prog) 542 { 543 struct bpf_prog_stats *stats; 544 545 stats = this_cpu_ptr(prog->stats); 546 u64_stats_update_begin(&stats->syncp); 547 stats->misses++; 548 u64_stats_update_end(&stats->syncp); 549 } 550 551 /* The logic is similar to BPF_PROG_RUN, but with an explicit 552 * rcu_read_lock() and migrate_disable() which are required 553 * for the trampoline. The macro is split into 554 * call __bpf_prog_enter 555 * call prog->bpf_func 556 * call __bpf_prog_exit 557 * 558 * __bpf_prog_enter returns: 559 * 0 - skip execution of the bpf prog 560 * 1 - execute bpf prog 561 * [2..MAX_U64] - execute bpf prog and record execution time. 562 * This is start time. 563 */ 564 u64 notrace __bpf_prog_enter(struct bpf_prog *prog) 565 __acquires(RCU) 566 { 567 rcu_read_lock(); 568 migrate_disable(); 569 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) { 570 inc_misses_counter(prog); 571 return 0; 572 } 573 return bpf_prog_start_time(); 574 } 575 576 static void notrace update_prog_stats(struct bpf_prog *prog, 577 u64 start) 578 { 579 struct bpf_prog_stats *stats; 580 581 if (static_branch_unlikely(&bpf_stats_enabled_key) && 582 /* static_key could be enabled in __bpf_prog_enter* 583 * and disabled in __bpf_prog_exit*. 584 * And vice versa. 585 * Hence check that 'start' is valid. 586 */ 587 start > NO_START_TIME) { 588 stats = this_cpu_ptr(prog->stats); 589 u64_stats_update_begin(&stats->syncp); 590 stats->cnt++; 591 stats->nsecs += sched_clock() - start; 592 u64_stats_update_end(&stats->syncp); 593 } 594 } 595 596 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start) 597 __releases(RCU) 598 { 599 update_prog_stats(prog, start); 600 __this_cpu_dec(*(prog->active)); 601 migrate_enable(); 602 rcu_read_unlock(); 603 } 604 605 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog) 606 { 607 rcu_read_lock_trace(); 608 migrate_disable(); 609 might_fault(); 610 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) { 611 inc_misses_counter(prog); 612 return 0; 613 } 614 return bpf_prog_start_time(); 615 } 616 617 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start) 618 { 619 update_prog_stats(prog, start); 620 __this_cpu_dec(*(prog->active)); 621 migrate_enable(); 622 rcu_read_unlock_trace(); 623 } 624 625 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr) 626 { 627 percpu_ref_get(&tr->pcref); 628 } 629 630 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr) 631 { 632 percpu_ref_put(&tr->pcref); 633 } 634 635 int __weak 636 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 637 const struct btf_func_model *m, u32 flags, 638 struct bpf_tramp_progs *tprogs, 639 void *orig_call) 640 { 641 return -ENOTSUPP; 642 } 643 644 static int __init init_trampolines(void) 645 { 646 int i; 647 648 for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++) 649 INIT_HLIST_HEAD(&trampoline_table[i]); 650 return 0; 651 } 652 late_initcall(init_trampolines); 653