1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * User-space Probes (UProbes) 4 * 5 * Copyright (C) IBM Corporation, 2008-2012 6 * Authors: 7 * Srikar Dronamraju 8 * Jim Keniston 9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/highmem.h> 14 #include <linux/pagemap.h> /* read_mapping_page */ 15 #include <linux/slab.h> 16 #include <linux/sched.h> 17 #include <linux/sched/mm.h> 18 #include <linux/sched/coredump.h> 19 #include <linux/export.h> 20 #include <linux/rmap.h> /* anon_vma_prepare */ 21 #include <linux/mmu_notifier.h> 22 #include <linux/swap.h> /* folio_free_swap */ 23 #include <linux/ptrace.h> /* user_enable_single_step */ 24 #include <linux/kdebug.h> /* notifier mechanism */ 25 #include <linux/percpu-rwsem.h> 26 #include <linux/task_work.h> 27 #include <linux/shmem_fs.h> 28 #include <linux/khugepaged.h> 29 #include <linux/rcupdate_trace.h> 30 #include <linux/workqueue.h> 31 #include <linux/srcu.h> 32 33 #include <linux/uprobes.h> 34 35 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) 36 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 37 38 static struct rb_root uprobes_tree = RB_ROOT; 39 /* 40 * allows us to skip the uprobe_mmap if there are no uprobe events active 41 * at this time. Probably a fine grained per inode count is better? 42 */ 43 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) 44 45 static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */ 46 static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock); 47 48 #define UPROBES_HASH_SZ 13 49 /* serialize uprobe->pending_list */ 50 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 51 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 52 53 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); 54 55 /* Covers return_instance's uprobe lifetime. */ 56 DEFINE_STATIC_SRCU(uretprobes_srcu); 57 58 /* Have a copy of original instruction */ 59 #define UPROBE_COPY_INSN 0 60 61 struct uprobe { 62 struct rb_node rb_node; /* node in the rb tree */ 63 refcount_t ref; 64 struct rw_semaphore register_rwsem; 65 struct rw_semaphore consumer_rwsem; 66 struct list_head pending_list; 67 struct list_head consumers; 68 struct inode *inode; /* Also hold a ref to inode */ 69 union { 70 struct rcu_head rcu; 71 struct work_struct work; 72 }; 73 loff_t offset; 74 loff_t ref_ctr_offset; 75 unsigned long flags; /* "unsigned long" so bitops work */ 76 77 /* 78 * The generic code assumes that it has two members of unknown type 79 * owned by the arch-specific code: 80 * 81 * insn - copy_insn() saves the original instruction here for 82 * arch_uprobe_analyze_insn(). 83 * 84 * ixol - potentially modified instruction to execute out of 85 * line, copied to xol_area by xol_get_insn_slot(). 86 */ 87 struct arch_uprobe arch; 88 }; 89 90 struct delayed_uprobe { 91 struct list_head list; 92 struct uprobe *uprobe; 93 struct mm_struct *mm; 94 }; 95 96 static DEFINE_MUTEX(delayed_uprobe_lock); 97 static LIST_HEAD(delayed_uprobe_list); 98 99 /* 100 * Execute out of line area: anonymous executable mapping installed 101 * by the probed task to execute the copy of the original instruction 102 * mangled by set_swbp(). 103 * 104 * On a breakpoint hit, thread contests for a slot. It frees the 105 * slot after singlestep. Currently a fixed number of slots are 106 * allocated. 107 */ 108 struct xol_area { 109 wait_queue_head_t wq; /* if all slots are busy */ 110 unsigned long *bitmap; /* 0 = free slot */ 111 112 struct page *page; 113 /* 114 * We keep the vma's vm_start rather than a pointer to the vma 115 * itself. The probed process or a naughty kernel module could make 116 * the vma go away, and we must handle that reasonably gracefully. 117 */ 118 unsigned long vaddr; /* Page(s) of instruction slots */ 119 }; 120 121 static void uprobe_warn(struct task_struct *t, const char *msg) 122 { 123 pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg); 124 } 125 126 /* 127 * valid_vma: Verify if the specified vma is an executable vma 128 * Relax restrictions while unregistering: vm_flags might have 129 * changed after breakpoint was inserted. 130 * - is_register: indicates if we are in register context. 131 * - Return 1 if the specified virtual address is in an 132 * executable vma. 133 */ 134 static bool valid_vma(struct vm_area_struct *vma, bool is_register) 135 { 136 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; 137 138 if (is_register) 139 flags |= VM_WRITE; 140 141 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; 142 } 143 144 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 145 { 146 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 147 } 148 149 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) 150 { 151 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); 152 } 153 154 /** 155 * __replace_page - replace page in vma by new page. 156 * based on replace_page in mm/ksm.c 157 * 158 * @vma: vma that holds the pte pointing to page 159 * @addr: address the old @page is mapped at 160 * @old_page: the page we are replacing by new_page 161 * @new_page: the modified page we replace page by 162 * 163 * If @new_page is NULL, only unmap @old_page. 164 * 165 * Returns 0 on success, negative error code otherwise. 166 */ 167 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 168 struct page *old_page, struct page *new_page) 169 { 170 struct folio *old_folio = page_folio(old_page); 171 struct folio *new_folio; 172 struct mm_struct *mm = vma->vm_mm; 173 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); 174 int err; 175 struct mmu_notifier_range range; 176 177 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, 178 addr + PAGE_SIZE); 179 180 if (new_page) { 181 new_folio = page_folio(new_page); 182 err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL); 183 if (err) 184 return err; 185 } 186 187 /* For folio_free_swap() below */ 188 folio_lock(old_folio); 189 190 mmu_notifier_invalidate_range_start(&range); 191 err = -EAGAIN; 192 if (!page_vma_mapped_walk(&pvmw)) 193 goto unlock; 194 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); 195 196 if (new_page) { 197 folio_get(new_folio); 198 folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE); 199 folio_add_lru_vma(new_folio, vma); 200 } else 201 /* no new page, just dec_mm_counter for old_page */ 202 dec_mm_counter(mm, MM_ANONPAGES); 203 204 if (!folio_test_anon(old_folio)) { 205 dec_mm_counter(mm, mm_counter_file(old_folio)); 206 inc_mm_counter(mm, MM_ANONPAGES); 207 } 208 209 flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); 210 ptep_clear_flush(vma, addr, pvmw.pte); 211 if (new_page) 212 set_pte_at(mm, addr, pvmw.pte, 213 mk_pte(new_page, vma->vm_page_prot)); 214 215 folio_remove_rmap_pte(old_folio, old_page, vma); 216 if (!folio_mapped(old_folio)) 217 folio_free_swap(old_folio); 218 page_vma_mapped_walk_done(&pvmw); 219 folio_put(old_folio); 220 221 err = 0; 222 unlock: 223 mmu_notifier_invalidate_range_end(&range); 224 folio_unlock(old_folio); 225 return err; 226 } 227 228 /** 229 * is_swbp_insn - check if instruction is breakpoint instruction. 230 * @insn: instruction to be checked. 231 * Default implementation of is_swbp_insn 232 * Returns true if @insn is a breakpoint instruction. 233 */ 234 bool __weak is_swbp_insn(uprobe_opcode_t *insn) 235 { 236 return *insn == UPROBE_SWBP_INSN; 237 } 238 239 /** 240 * is_trap_insn - check if instruction is breakpoint instruction. 241 * @insn: instruction to be checked. 242 * Default implementation of is_trap_insn 243 * Returns true if @insn is a breakpoint instruction. 244 * 245 * This function is needed for the case where an architecture has multiple 246 * trap instructions (like powerpc). 247 */ 248 bool __weak is_trap_insn(uprobe_opcode_t *insn) 249 { 250 return is_swbp_insn(insn); 251 } 252 253 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) 254 { 255 void *kaddr = kmap_atomic(page); 256 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); 257 kunmap_atomic(kaddr); 258 } 259 260 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) 261 { 262 void *kaddr = kmap_atomic(page); 263 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); 264 kunmap_atomic(kaddr); 265 } 266 267 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) 268 { 269 uprobe_opcode_t old_opcode; 270 bool is_swbp; 271 272 /* 273 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. 274 * We do not check if it is any other 'trap variant' which could 275 * be conditional trap instruction such as the one powerpc supports. 276 * 277 * The logic is that we do not care if the underlying instruction 278 * is a trap variant; uprobes always wins over any other (gdb) 279 * breakpoint. 280 */ 281 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); 282 is_swbp = is_swbp_insn(&old_opcode); 283 284 if (is_swbp_insn(new_opcode)) { 285 if (is_swbp) /* register: already installed? */ 286 return 0; 287 } else { 288 if (!is_swbp) /* unregister: was it changed by us? */ 289 return 0; 290 } 291 292 return 1; 293 } 294 295 static struct delayed_uprobe * 296 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) 297 { 298 struct delayed_uprobe *du; 299 300 list_for_each_entry(du, &delayed_uprobe_list, list) 301 if (du->uprobe == uprobe && du->mm == mm) 302 return du; 303 return NULL; 304 } 305 306 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) 307 { 308 struct delayed_uprobe *du; 309 310 if (delayed_uprobe_check(uprobe, mm)) 311 return 0; 312 313 du = kzalloc(sizeof(*du), GFP_KERNEL); 314 if (!du) 315 return -ENOMEM; 316 317 du->uprobe = uprobe; 318 du->mm = mm; 319 list_add(&du->list, &delayed_uprobe_list); 320 return 0; 321 } 322 323 static void delayed_uprobe_delete(struct delayed_uprobe *du) 324 { 325 if (WARN_ON(!du)) 326 return; 327 list_del(&du->list); 328 kfree(du); 329 } 330 331 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) 332 { 333 struct list_head *pos, *q; 334 struct delayed_uprobe *du; 335 336 if (!uprobe && !mm) 337 return; 338 339 list_for_each_safe(pos, q, &delayed_uprobe_list) { 340 du = list_entry(pos, struct delayed_uprobe, list); 341 342 if (uprobe && du->uprobe != uprobe) 343 continue; 344 if (mm && du->mm != mm) 345 continue; 346 347 delayed_uprobe_delete(du); 348 } 349 } 350 351 static bool valid_ref_ctr_vma(struct uprobe *uprobe, 352 struct vm_area_struct *vma) 353 { 354 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); 355 356 return uprobe->ref_ctr_offset && 357 vma->vm_file && 358 file_inode(vma->vm_file) == uprobe->inode && 359 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && 360 vma->vm_start <= vaddr && 361 vma->vm_end > vaddr; 362 } 363 364 static struct vm_area_struct * 365 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) 366 { 367 VMA_ITERATOR(vmi, mm, 0); 368 struct vm_area_struct *tmp; 369 370 for_each_vma(vmi, tmp) 371 if (valid_ref_ctr_vma(uprobe, tmp)) 372 return tmp; 373 374 return NULL; 375 } 376 377 static int 378 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) 379 { 380 void *kaddr; 381 struct page *page; 382 int ret; 383 short *ptr; 384 385 if (!vaddr || !d) 386 return -EINVAL; 387 388 ret = get_user_pages_remote(mm, vaddr, 1, 389 FOLL_WRITE, &page, NULL); 390 if (unlikely(ret <= 0)) { 391 /* 392 * We are asking for 1 page. If get_user_pages_remote() fails, 393 * it may return 0, in that case we have to return error. 394 */ 395 return ret == 0 ? -EBUSY : ret; 396 } 397 398 kaddr = kmap_atomic(page); 399 ptr = kaddr + (vaddr & ~PAGE_MASK); 400 401 if (unlikely(*ptr + d < 0)) { 402 pr_warn("ref_ctr going negative. vaddr: 0x%lx, " 403 "curr val: %d, delta: %d\n", vaddr, *ptr, d); 404 ret = -EINVAL; 405 goto out; 406 } 407 408 *ptr += d; 409 ret = 0; 410 out: 411 kunmap_atomic(kaddr); 412 put_page(page); 413 return ret; 414 } 415 416 static void update_ref_ctr_warn(struct uprobe *uprobe, 417 struct mm_struct *mm, short d) 418 { 419 pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " 420 "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n", 421 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, 422 (unsigned long long) uprobe->offset, 423 (unsigned long long) uprobe->ref_ctr_offset, mm); 424 } 425 426 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, 427 short d) 428 { 429 struct vm_area_struct *rc_vma; 430 unsigned long rc_vaddr; 431 int ret = 0; 432 433 rc_vma = find_ref_ctr_vma(uprobe, mm); 434 435 if (rc_vma) { 436 rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); 437 ret = __update_ref_ctr(mm, rc_vaddr, d); 438 if (ret) 439 update_ref_ctr_warn(uprobe, mm, d); 440 441 if (d > 0) 442 return ret; 443 } 444 445 mutex_lock(&delayed_uprobe_lock); 446 if (d > 0) 447 ret = delayed_uprobe_add(uprobe, mm); 448 else 449 delayed_uprobe_remove(uprobe, mm); 450 mutex_unlock(&delayed_uprobe_lock); 451 452 return ret; 453 } 454 455 /* 456 * NOTE: 457 * Expect the breakpoint instruction to be the smallest size instruction for 458 * the architecture. If an arch has variable length instruction and the 459 * breakpoint instruction is not of the smallest length instruction 460 * supported by that architecture then we need to modify is_trap_at_addr and 461 * uprobe_write_opcode accordingly. This would never be a problem for archs 462 * that have fixed length instructions. 463 * 464 * uprobe_write_opcode - write the opcode at a given virtual address. 465 * @auprobe: arch specific probepoint information. 466 * @mm: the probed process address space. 467 * @vaddr: the virtual address to store the opcode. 468 * @opcode: opcode to be written at @vaddr. 469 * 470 * Called with mm->mmap_lock held for read or write. 471 * Return 0 (success) or a negative errno. 472 */ 473 int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, 474 unsigned long vaddr, uprobe_opcode_t opcode) 475 { 476 struct uprobe *uprobe; 477 struct page *old_page, *new_page; 478 struct vm_area_struct *vma; 479 int ret, is_register, ref_ctr_updated = 0; 480 bool orig_page_huge = false; 481 unsigned int gup_flags = FOLL_FORCE; 482 483 is_register = is_swbp_insn(&opcode); 484 uprobe = container_of(auprobe, struct uprobe, arch); 485 486 retry: 487 if (is_register) 488 gup_flags |= FOLL_SPLIT_PMD; 489 /* Read the page with vaddr into memory */ 490 old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma); 491 if (IS_ERR(old_page)) 492 return PTR_ERR(old_page); 493 494 ret = verify_opcode(old_page, vaddr, &opcode); 495 if (ret <= 0) 496 goto put_old; 497 498 if (WARN(!is_register && PageCompound(old_page), 499 "uprobe unregister should never work on compound page\n")) { 500 ret = -EINVAL; 501 goto put_old; 502 } 503 504 /* We are going to replace instruction, update ref_ctr. */ 505 if (!ref_ctr_updated && uprobe->ref_ctr_offset) { 506 ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); 507 if (ret) 508 goto put_old; 509 510 ref_ctr_updated = 1; 511 } 512 513 ret = 0; 514 if (!is_register && !PageAnon(old_page)) 515 goto put_old; 516 517 ret = anon_vma_prepare(vma); 518 if (ret) 519 goto put_old; 520 521 ret = -ENOMEM; 522 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 523 if (!new_page) 524 goto put_old; 525 526 __SetPageUptodate(new_page); 527 copy_highpage(new_page, old_page); 528 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 529 530 if (!is_register) { 531 struct page *orig_page; 532 pgoff_t index; 533 534 VM_BUG_ON_PAGE(!PageAnon(old_page), old_page); 535 536 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT; 537 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping, 538 index); 539 540 if (orig_page) { 541 if (PageUptodate(orig_page) && 542 pages_identical(new_page, orig_page)) { 543 /* let go new_page */ 544 put_page(new_page); 545 new_page = NULL; 546 547 if (PageCompound(orig_page)) 548 orig_page_huge = true; 549 } 550 put_page(orig_page); 551 } 552 } 553 554 ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page); 555 if (new_page) 556 put_page(new_page); 557 put_old: 558 put_page(old_page); 559 560 if (unlikely(ret == -EAGAIN)) 561 goto retry; 562 563 /* Revert back reference counter if instruction update failed. */ 564 if (ret && is_register && ref_ctr_updated) 565 update_ref_ctr(uprobe, mm, -1); 566 567 /* try collapse pmd for compound page */ 568 if (!ret && orig_page_huge) 569 collapse_pte_mapped_thp(mm, vaddr, false); 570 571 return ret; 572 } 573 574 /** 575 * set_swbp - store breakpoint at a given address. 576 * @auprobe: arch specific probepoint information. 577 * @mm: the probed process address space. 578 * @vaddr: the virtual address to insert the opcode. 579 * 580 * For mm @mm, store the breakpoint instruction at @vaddr. 581 * Return 0 (success) or a negative errno. 582 */ 583 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 584 { 585 return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); 586 } 587 588 /** 589 * set_orig_insn - Restore the original instruction. 590 * @mm: the probed process address space. 591 * @auprobe: arch specific probepoint information. 592 * @vaddr: the virtual address to insert the opcode. 593 * 594 * For mm @mm, restore the original opcode (opcode) at @vaddr. 595 * Return 0 (success) or a negative errno. 596 */ 597 int __weak 598 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 599 { 600 return uprobe_write_opcode(auprobe, mm, vaddr, 601 *(uprobe_opcode_t *)&auprobe->insn); 602 } 603 604 /* uprobe should have guaranteed positive refcount */ 605 static struct uprobe *get_uprobe(struct uprobe *uprobe) 606 { 607 refcount_inc(&uprobe->ref); 608 return uprobe; 609 } 610 611 /* 612 * uprobe should have guaranteed lifetime, which can be either of: 613 * - caller already has refcount taken (and wants an extra one); 614 * - uprobe is RCU protected and won't be freed until after grace period; 615 * - we are holding uprobes_treelock (for read or write, doesn't matter). 616 */ 617 static struct uprobe *try_get_uprobe(struct uprobe *uprobe) 618 { 619 if (refcount_inc_not_zero(&uprobe->ref)) 620 return uprobe; 621 return NULL; 622 } 623 624 static inline bool uprobe_is_active(struct uprobe *uprobe) 625 { 626 return !RB_EMPTY_NODE(&uprobe->rb_node); 627 } 628 629 static void uprobe_free_rcu_tasks_trace(struct rcu_head *rcu) 630 { 631 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); 632 633 kfree(uprobe); 634 } 635 636 static void uprobe_free_srcu(struct rcu_head *rcu) 637 { 638 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); 639 640 call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace); 641 } 642 643 static void uprobe_free_deferred(struct work_struct *work) 644 { 645 struct uprobe *uprobe = container_of(work, struct uprobe, work); 646 647 write_lock(&uprobes_treelock); 648 649 if (uprobe_is_active(uprobe)) { 650 write_seqcount_begin(&uprobes_seqcount); 651 rb_erase(&uprobe->rb_node, &uprobes_tree); 652 write_seqcount_end(&uprobes_seqcount); 653 } 654 655 write_unlock(&uprobes_treelock); 656 657 /* 658 * If application munmap(exec_vma) before uprobe_unregister() 659 * gets called, we don't get a chance to remove uprobe from 660 * delayed_uprobe_list from remove_breakpoint(). Do it here. 661 */ 662 mutex_lock(&delayed_uprobe_lock); 663 delayed_uprobe_remove(uprobe, NULL); 664 mutex_unlock(&delayed_uprobe_lock); 665 666 /* start srcu -> rcu_tasks_trace -> kfree chain */ 667 call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu); 668 } 669 670 static void put_uprobe(struct uprobe *uprobe) 671 { 672 if (!refcount_dec_and_test(&uprobe->ref)) 673 return; 674 675 INIT_WORK(&uprobe->work, uprobe_free_deferred); 676 schedule_work(&uprobe->work); 677 } 678 679 /* Initialize hprobe as SRCU-protected "leased" uprobe */ 680 static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx) 681 { 682 WARN_ON(!uprobe); 683 hprobe->state = HPROBE_LEASED; 684 hprobe->uprobe = uprobe; 685 hprobe->srcu_idx = srcu_idx; 686 } 687 688 /* Initialize hprobe as refcounted ("stable") uprobe (uprobe can be NULL). */ 689 static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe) 690 { 691 hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE; 692 hprobe->uprobe = uprobe; 693 hprobe->srcu_idx = -1; 694 } 695 696 /* 697 * hprobe_consume() fetches hprobe's underlying uprobe and detects whether 698 * uprobe is SRCU protected or is refcounted. hprobe_consume() can be 699 * used only once for a given hprobe. 700 * 701 * Caller has to call hprobe_finalize() and pass previous hprobe_state, so 702 * that hprobe_finalize() can perform SRCU unlock or put uprobe, whichever 703 * is appropriate. 704 */ 705 static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate) 706 { 707 *hstate = xchg(&hprobe->state, HPROBE_CONSUMED); 708 switch (*hstate) { 709 case HPROBE_LEASED: 710 case HPROBE_STABLE: 711 return hprobe->uprobe; 712 case HPROBE_GONE: /* uprobe is NULL, no SRCU */ 713 case HPROBE_CONSUMED: /* uprobe was finalized already, do nothing */ 714 return NULL; 715 default: 716 WARN(1, "hprobe invalid state %d", *hstate); 717 return NULL; 718 } 719 } 720 721 /* 722 * Reset hprobe state and, if hprobe was LEASED, release SRCU lock. 723 * hprobe_finalize() can only be used from current context after 724 * hprobe_consume() call (which determines uprobe and hstate value). 725 */ 726 static void hprobe_finalize(struct hprobe *hprobe, enum hprobe_state hstate) 727 { 728 switch (hstate) { 729 case HPROBE_LEASED: 730 __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx); 731 break; 732 case HPROBE_STABLE: 733 put_uprobe(hprobe->uprobe); 734 break; 735 case HPROBE_GONE: 736 case HPROBE_CONSUMED: 737 break; 738 default: 739 WARN(1, "hprobe invalid state %d", hstate); 740 break; 741 } 742 } 743 744 /* 745 * Attempt to switch (atomically) uprobe from being SRCU protected (LEASED) 746 * to refcounted (STABLE) state. Competes with hprobe_consume(); only one of 747 * them can win the race to perform SRCU unlocking. Whoever wins must perform 748 * SRCU unlock. 749 * 750 * Returns underlying valid uprobe or NULL, if there was no underlying uprobe 751 * to begin with or we failed to bump its refcount and it's going away. 752 * 753 * Returned non-NULL uprobe can be still safely used within an ongoing SRCU 754 * locked region. If `get` is true, it's guaranteed that non-NULL uprobe has 755 * an extra refcount for caller to assume and use. Otherwise, it's not 756 * guaranteed that returned uprobe has a positive refcount, so caller has to 757 * attempt try_get_uprobe(), if it needs to preserve uprobe beyond current 758 * SRCU lock region. See dup_utask(). 759 */ 760 static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get) 761 { 762 enum hprobe_state hstate; 763 764 /* 765 * return_instance's hprobe is protected by RCU. 766 * Underlying uprobe is itself protected from reuse by SRCU. 767 */ 768 lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu)); 769 770 hstate = READ_ONCE(hprobe->state); 771 switch (hstate) { 772 case HPROBE_STABLE: 773 /* uprobe has positive refcount, bump refcount, if necessary */ 774 return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe; 775 case HPROBE_GONE: 776 /* 777 * SRCU was unlocked earlier and we didn't manage to take 778 * uprobe refcnt, so it's effectively NULL 779 */ 780 return NULL; 781 case HPROBE_CONSUMED: 782 /* 783 * uprobe was consumed, so it's effectively NULL as far as 784 * uretprobe processing logic is concerned 785 */ 786 return NULL; 787 case HPROBE_LEASED: { 788 struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe); 789 /* 790 * Try to switch hprobe state, guarding against 791 * hprobe_consume() or another hprobe_expire() racing with us. 792 * Note, if we failed to get uprobe refcount, we use special 793 * HPROBE_GONE state to signal that hprobe->uprobe shouldn't 794 * be used as it will be freed after SRCU is unlocked. 795 */ 796 if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) { 797 /* We won the race, we are the ones to unlock SRCU */ 798 __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx); 799 return get ? get_uprobe(uprobe) : uprobe; 800 } 801 802 /* 803 * We lost the race, undo refcount bump (if it ever happened), 804 * unless caller would like an extra refcount anyways. 805 */ 806 if (uprobe && !get) 807 put_uprobe(uprobe); 808 /* 809 * Even if hprobe_consume() or another hprobe_expire() wins 810 * the state update race and unlocks SRCU from under us, we 811 * still have a guarantee that underyling uprobe won't be 812 * freed due to ongoing caller's SRCU lock region, so we can 813 * return it regardless. Also, if `get` was true, we also have 814 * an extra ref for the caller to own. This is used in dup_utask(). 815 */ 816 return uprobe; 817 } 818 default: 819 WARN(1, "unknown hprobe state %d", hstate); 820 return NULL; 821 } 822 } 823 824 static __always_inline 825 int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, 826 const struct uprobe *r) 827 { 828 if (l_inode < r->inode) 829 return -1; 830 831 if (l_inode > r->inode) 832 return 1; 833 834 if (l_offset < r->offset) 835 return -1; 836 837 if (l_offset > r->offset) 838 return 1; 839 840 return 0; 841 } 842 843 #define __node_2_uprobe(node) \ 844 rb_entry((node), struct uprobe, rb_node) 845 846 struct __uprobe_key { 847 struct inode *inode; 848 loff_t offset; 849 }; 850 851 static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b) 852 { 853 const struct __uprobe_key *a = key; 854 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b)); 855 } 856 857 static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b) 858 { 859 struct uprobe *u = __node_2_uprobe(a); 860 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b)); 861 } 862 863 /* 864 * Assumes being inside RCU protected region. 865 * No refcount is taken on returned uprobe. 866 */ 867 static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset) 868 { 869 struct __uprobe_key key = { 870 .inode = inode, 871 .offset = offset, 872 }; 873 struct rb_node *node; 874 unsigned int seq; 875 876 lockdep_assert(rcu_read_lock_trace_held()); 877 878 do { 879 seq = read_seqcount_begin(&uprobes_seqcount); 880 node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key); 881 /* 882 * Lockless RB-tree lookups can result only in false negatives. 883 * If the element is found, it is correct and can be returned 884 * under RCU protection. If we find nothing, we need to 885 * validate that seqcount didn't change. If it did, we have to 886 * try again as we might have missed the element (false 887 * negative). If seqcount is unchanged, search truly failed. 888 */ 889 if (node) 890 return __node_2_uprobe(node); 891 } while (read_seqcount_retry(&uprobes_seqcount, seq)); 892 893 return NULL; 894 } 895 896 /* 897 * Attempt to insert a new uprobe into uprobes_tree. 898 * 899 * If uprobe already exists (for given inode+offset), we just increment 900 * refcount of previously existing uprobe. 901 * 902 * If not, a provided new instance of uprobe is inserted into the tree (with 903 * assumed initial refcount == 1). 904 * 905 * In any case, we return a uprobe instance that ends up being in uprobes_tree. 906 * Caller has to clean up new uprobe instance, if it ended up not being 907 * inserted into the tree. 908 * 909 * We assume that uprobes_treelock is held for writing. 910 */ 911 static struct uprobe *__insert_uprobe(struct uprobe *uprobe) 912 { 913 struct rb_node *node; 914 again: 915 node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp); 916 if (node) { 917 struct uprobe *u = __node_2_uprobe(node); 918 919 if (!try_get_uprobe(u)) { 920 rb_erase(node, &uprobes_tree); 921 RB_CLEAR_NODE(&u->rb_node); 922 goto again; 923 } 924 925 return u; 926 } 927 928 return uprobe; 929 } 930 931 /* 932 * Acquire uprobes_treelock and insert uprobe into uprobes_tree 933 * (or reuse existing one, see __insert_uprobe() comments above). 934 */ 935 static struct uprobe *insert_uprobe(struct uprobe *uprobe) 936 { 937 struct uprobe *u; 938 939 write_lock(&uprobes_treelock); 940 write_seqcount_begin(&uprobes_seqcount); 941 u = __insert_uprobe(uprobe); 942 write_seqcount_end(&uprobes_seqcount); 943 write_unlock(&uprobes_treelock); 944 945 return u; 946 } 947 948 static void 949 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) 950 { 951 pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " 952 "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n", 953 uprobe->inode->i_ino, (unsigned long long) uprobe->offset, 954 (unsigned long long) cur_uprobe->ref_ctr_offset, 955 (unsigned long long) uprobe->ref_ctr_offset); 956 } 957 958 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, 959 loff_t ref_ctr_offset) 960 { 961 struct uprobe *uprobe, *cur_uprobe; 962 963 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); 964 if (!uprobe) 965 return ERR_PTR(-ENOMEM); 966 967 uprobe->inode = inode; 968 uprobe->offset = offset; 969 uprobe->ref_ctr_offset = ref_ctr_offset; 970 INIT_LIST_HEAD(&uprobe->consumers); 971 init_rwsem(&uprobe->register_rwsem); 972 init_rwsem(&uprobe->consumer_rwsem); 973 RB_CLEAR_NODE(&uprobe->rb_node); 974 refcount_set(&uprobe->ref, 1); 975 976 /* add to uprobes_tree, sorted on inode:offset */ 977 cur_uprobe = insert_uprobe(uprobe); 978 /* a uprobe exists for this inode:offset combination */ 979 if (cur_uprobe != uprobe) { 980 if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { 981 ref_ctr_mismatch_warn(cur_uprobe, uprobe); 982 put_uprobe(cur_uprobe); 983 kfree(uprobe); 984 return ERR_PTR(-EINVAL); 985 } 986 kfree(uprobe); 987 uprobe = cur_uprobe; 988 } 989 990 return uprobe; 991 } 992 993 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) 994 { 995 static atomic64_t id; 996 997 down_write(&uprobe->consumer_rwsem); 998 list_add_rcu(&uc->cons_node, &uprobe->consumers); 999 uc->id = (__u64) atomic64_inc_return(&id); 1000 up_write(&uprobe->consumer_rwsem); 1001 } 1002 1003 /* 1004 * For uprobe @uprobe, delete the consumer @uc. 1005 * Should never be called with consumer that's not part of @uprobe->consumers. 1006 */ 1007 static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) 1008 { 1009 down_write(&uprobe->consumer_rwsem); 1010 list_del_rcu(&uc->cons_node); 1011 up_write(&uprobe->consumer_rwsem); 1012 } 1013 1014 static int __copy_insn(struct address_space *mapping, struct file *filp, 1015 void *insn, int nbytes, loff_t offset) 1016 { 1017 struct page *page; 1018 /* 1019 * Ensure that the page that has the original instruction is populated 1020 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(), 1021 * see uprobe_register(). 1022 */ 1023 if (mapping->a_ops->read_folio) 1024 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); 1025 else 1026 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 1027 if (IS_ERR(page)) 1028 return PTR_ERR(page); 1029 1030 copy_from_page(page, offset, insn, nbytes); 1031 put_page(page); 1032 1033 return 0; 1034 } 1035 1036 static int copy_insn(struct uprobe *uprobe, struct file *filp) 1037 { 1038 struct address_space *mapping = uprobe->inode->i_mapping; 1039 loff_t offs = uprobe->offset; 1040 void *insn = &uprobe->arch.insn; 1041 int size = sizeof(uprobe->arch.insn); 1042 int len, err = -EIO; 1043 1044 /* Copy only available bytes, -EIO if nothing was read */ 1045 do { 1046 if (offs >= i_size_read(uprobe->inode)) 1047 break; 1048 1049 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); 1050 err = __copy_insn(mapping, filp, insn, len, offs); 1051 if (err) 1052 break; 1053 1054 insn += len; 1055 offs += len; 1056 size -= len; 1057 } while (size); 1058 1059 return err; 1060 } 1061 1062 static int prepare_uprobe(struct uprobe *uprobe, struct file *file, 1063 struct mm_struct *mm, unsigned long vaddr) 1064 { 1065 int ret = 0; 1066 1067 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 1068 return ret; 1069 1070 /* TODO: move this into _register, until then we abuse this sem. */ 1071 down_write(&uprobe->consumer_rwsem); 1072 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 1073 goto out; 1074 1075 ret = copy_insn(uprobe, file); 1076 if (ret) 1077 goto out; 1078 1079 ret = -ENOTSUPP; 1080 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) 1081 goto out; 1082 1083 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); 1084 if (ret) 1085 goto out; 1086 1087 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ 1088 set_bit(UPROBE_COPY_INSN, &uprobe->flags); 1089 1090 out: 1091 up_write(&uprobe->consumer_rwsem); 1092 1093 return ret; 1094 } 1095 1096 static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm) 1097 { 1098 return !uc->filter || uc->filter(uc, mm); 1099 } 1100 1101 static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm) 1102 { 1103 struct uprobe_consumer *uc; 1104 bool ret = false; 1105 1106 down_read(&uprobe->consumer_rwsem); 1107 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { 1108 ret = consumer_filter(uc, mm); 1109 if (ret) 1110 break; 1111 } 1112 up_read(&uprobe->consumer_rwsem); 1113 1114 return ret; 1115 } 1116 1117 static int 1118 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 1119 struct vm_area_struct *vma, unsigned long vaddr) 1120 { 1121 bool first_uprobe; 1122 int ret; 1123 1124 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); 1125 if (ret) 1126 return ret; 1127 1128 /* 1129 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 1130 * the task can hit this breakpoint right after __replace_page(). 1131 */ 1132 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); 1133 if (first_uprobe) 1134 set_bit(MMF_HAS_UPROBES, &mm->flags); 1135 1136 ret = set_swbp(&uprobe->arch, mm, vaddr); 1137 if (!ret) 1138 clear_bit(MMF_RECALC_UPROBES, &mm->flags); 1139 else if (first_uprobe) 1140 clear_bit(MMF_HAS_UPROBES, &mm->flags); 1141 1142 return ret; 1143 } 1144 1145 static int 1146 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 1147 { 1148 set_bit(MMF_RECALC_UPROBES, &mm->flags); 1149 return set_orig_insn(&uprobe->arch, mm, vaddr); 1150 } 1151 1152 struct map_info { 1153 struct map_info *next; 1154 struct mm_struct *mm; 1155 unsigned long vaddr; 1156 }; 1157 1158 static inline struct map_info *free_map_info(struct map_info *info) 1159 { 1160 struct map_info *next = info->next; 1161 kfree(info); 1162 return next; 1163 } 1164 1165 static struct map_info * 1166 build_map_info(struct address_space *mapping, loff_t offset, bool is_register) 1167 { 1168 unsigned long pgoff = offset >> PAGE_SHIFT; 1169 struct vm_area_struct *vma; 1170 struct map_info *curr = NULL; 1171 struct map_info *prev = NULL; 1172 struct map_info *info; 1173 int more = 0; 1174 1175 again: 1176 i_mmap_lock_read(mapping); 1177 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1178 if (!valid_vma(vma, is_register)) 1179 continue; 1180 1181 if (!prev && !more) { 1182 /* 1183 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through 1184 * reclaim. This is optimistic, no harm done if it fails. 1185 */ 1186 prev = kmalloc(sizeof(struct map_info), 1187 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); 1188 if (prev) 1189 prev->next = NULL; 1190 } 1191 if (!prev) { 1192 more++; 1193 continue; 1194 } 1195 1196 if (!mmget_not_zero(vma->vm_mm)) 1197 continue; 1198 1199 info = prev; 1200 prev = prev->next; 1201 info->next = curr; 1202 curr = info; 1203 1204 info->mm = vma->vm_mm; 1205 info->vaddr = offset_to_vaddr(vma, offset); 1206 } 1207 i_mmap_unlock_read(mapping); 1208 1209 if (!more) 1210 goto out; 1211 1212 prev = curr; 1213 while (curr) { 1214 mmput(curr->mm); 1215 curr = curr->next; 1216 } 1217 1218 do { 1219 info = kmalloc(sizeof(struct map_info), GFP_KERNEL); 1220 if (!info) { 1221 curr = ERR_PTR(-ENOMEM); 1222 goto out; 1223 } 1224 info->next = prev; 1225 prev = info; 1226 } while (--more); 1227 1228 goto again; 1229 out: 1230 while (prev) 1231 prev = free_map_info(prev); 1232 return curr; 1233 } 1234 1235 static int 1236 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) 1237 { 1238 bool is_register = !!new; 1239 struct map_info *info; 1240 int err = 0; 1241 1242 percpu_down_write(&dup_mmap_sem); 1243 info = build_map_info(uprobe->inode->i_mapping, 1244 uprobe->offset, is_register); 1245 if (IS_ERR(info)) { 1246 err = PTR_ERR(info); 1247 goto out; 1248 } 1249 1250 while (info) { 1251 struct mm_struct *mm = info->mm; 1252 struct vm_area_struct *vma; 1253 1254 if (err && is_register) 1255 goto free; 1256 /* 1257 * We take mmap_lock for writing to avoid the race with 1258 * find_active_uprobe_rcu() which takes mmap_lock for reading. 1259 * Thus this install_breakpoint() can not make 1260 * is_trap_at_addr() true right after find_uprobe_rcu() 1261 * returns NULL in find_active_uprobe_rcu(). 1262 */ 1263 mmap_write_lock(mm); 1264 vma = find_vma(mm, info->vaddr); 1265 if (!vma || !valid_vma(vma, is_register) || 1266 file_inode(vma->vm_file) != uprobe->inode) 1267 goto unlock; 1268 1269 if (vma->vm_start > info->vaddr || 1270 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) 1271 goto unlock; 1272 1273 if (is_register) { 1274 /* consult only the "caller", new consumer. */ 1275 if (consumer_filter(new, mm)) 1276 err = install_breakpoint(uprobe, mm, vma, info->vaddr); 1277 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { 1278 if (!filter_chain(uprobe, mm)) 1279 err |= remove_breakpoint(uprobe, mm, info->vaddr); 1280 } 1281 1282 unlock: 1283 mmap_write_unlock(mm); 1284 free: 1285 mmput(mm); 1286 info = free_map_info(info); 1287 } 1288 out: 1289 percpu_up_write(&dup_mmap_sem); 1290 return err; 1291 } 1292 1293 /** 1294 * uprobe_unregister_nosync - unregister an already registered probe. 1295 * @uprobe: uprobe to remove 1296 * @uc: identify which probe if multiple probes are colocated. 1297 */ 1298 void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) 1299 { 1300 int err; 1301 1302 down_write(&uprobe->register_rwsem); 1303 consumer_del(uprobe, uc); 1304 err = register_for_each_vma(uprobe, NULL); 1305 up_write(&uprobe->register_rwsem); 1306 1307 /* TODO : cant unregister? schedule a worker thread */ 1308 if (unlikely(err)) { 1309 uprobe_warn(current, "unregister, leaking uprobe"); 1310 return; 1311 } 1312 1313 put_uprobe(uprobe); 1314 } 1315 EXPORT_SYMBOL_GPL(uprobe_unregister_nosync); 1316 1317 void uprobe_unregister_sync(void) 1318 { 1319 /* 1320 * Now that handler_chain() and handle_uretprobe_chain() iterate over 1321 * uprobe->consumers list under RCU protection without holding 1322 * uprobe->register_rwsem, we need to wait for RCU grace period to 1323 * make sure that we can't call into just unregistered 1324 * uprobe_consumer's callbacks anymore. If we don't do that, fast and 1325 * unlucky enough caller can free consumer's memory and cause 1326 * handler_chain() or handle_uretprobe_chain() to do an use-after-free. 1327 */ 1328 synchronize_rcu_tasks_trace(); 1329 synchronize_srcu(&uretprobes_srcu); 1330 } 1331 EXPORT_SYMBOL_GPL(uprobe_unregister_sync); 1332 1333 /** 1334 * uprobe_register - register a probe 1335 * @inode: the file in which the probe has to be placed. 1336 * @offset: offset from the start of the file. 1337 * @ref_ctr_offset: offset of SDT marker / reference counter 1338 * @uc: information on howto handle the probe.. 1339 * 1340 * Apart from the access refcount, uprobe_register() takes a creation 1341 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting 1342 * inserted into the rbtree (i.e first consumer for a @inode:@offset 1343 * tuple). Creation refcount stops uprobe_unregister from freeing the 1344 * @uprobe even before the register operation is complete. Creation 1345 * refcount is released when the last @uc for the @uprobe 1346 * unregisters. Caller of uprobe_register() is required to keep @inode 1347 * (and the containing mount) referenced. 1348 * 1349 * Return: pointer to the new uprobe on success or an ERR_PTR on failure. 1350 */ 1351 struct uprobe *uprobe_register(struct inode *inode, 1352 loff_t offset, loff_t ref_ctr_offset, 1353 struct uprobe_consumer *uc) 1354 { 1355 struct uprobe *uprobe; 1356 int ret; 1357 1358 /* Uprobe must have at least one set consumer */ 1359 if (!uc->handler && !uc->ret_handler) 1360 return ERR_PTR(-EINVAL); 1361 1362 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ 1363 if (!inode->i_mapping->a_ops->read_folio && 1364 !shmem_mapping(inode->i_mapping)) 1365 return ERR_PTR(-EIO); 1366 /* Racy, just to catch the obvious mistakes */ 1367 if (offset > i_size_read(inode)) 1368 return ERR_PTR(-EINVAL); 1369 1370 /* 1371 * This ensures that copy_from_page(), copy_to_page() and 1372 * __update_ref_ctr() can't cross page boundary. 1373 */ 1374 if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) 1375 return ERR_PTR(-EINVAL); 1376 if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) 1377 return ERR_PTR(-EINVAL); 1378 1379 uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); 1380 if (IS_ERR(uprobe)) 1381 return uprobe; 1382 1383 down_write(&uprobe->register_rwsem); 1384 consumer_add(uprobe, uc); 1385 ret = register_for_each_vma(uprobe, uc); 1386 up_write(&uprobe->register_rwsem); 1387 1388 if (ret) { 1389 uprobe_unregister_nosync(uprobe, uc); 1390 /* 1391 * Registration might have partially succeeded, so we can have 1392 * this consumer being called right at this time. We need to 1393 * sync here. It's ok, it's unlikely slow path. 1394 */ 1395 uprobe_unregister_sync(); 1396 return ERR_PTR(ret); 1397 } 1398 1399 return uprobe; 1400 } 1401 EXPORT_SYMBOL_GPL(uprobe_register); 1402 1403 /** 1404 * uprobe_apply - add or remove the breakpoints according to @uc->filter 1405 * @uprobe: uprobe which "owns" the breakpoint 1406 * @uc: consumer which wants to add more or remove some breakpoints 1407 * @add: add or remove the breakpoints 1408 * Return: 0 on success or negative error code. 1409 */ 1410 int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add) 1411 { 1412 struct uprobe_consumer *con; 1413 int ret = -ENOENT; 1414 1415 down_write(&uprobe->register_rwsem); 1416 1417 rcu_read_lock_trace(); 1418 list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { 1419 if (con == uc) { 1420 ret = register_for_each_vma(uprobe, add ? uc : NULL); 1421 break; 1422 } 1423 } 1424 rcu_read_unlock_trace(); 1425 1426 up_write(&uprobe->register_rwsem); 1427 1428 return ret; 1429 } 1430 1431 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) 1432 { 1433 VMA_ITERATOR(vmi, mm, 0); 1434 struct vm_area_struct *vma; 1435 int err = 0; 1436 1437 mmap_read_lock(mm); 1438 for_each_vma(vmi, vma) { 1439 unsigned long vaddr; 1440 loff_t offset; 1441 1442 if (!valid_vma(vma, false) || 1443 file_inode(vma->vm_file) != uprobe->inode) 1444 continue; 1445 1446 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1447 if (uprobe->offset < offset || 1448 uprobe->offset >= offset + vma->vm_end - vma->vm_start) 1449 continue; 1450 1451 vaddr = offset_to_vaddr(vma, uprobe->offset); 1452 err |= remove_breakpoint(uprobe, mm, vaddr); 1453 } 1454 mmap_read_unlock(mm); 1455 1456 return err; 1457 } 1458 1459 static struct rb_node * 1460 find_node_in_range(struct inode *inode, loff_t min, loff_t max) 1461 { 1462 struct rb_node *n = uprobes_tree.rb_node; 1463 1464 while (n) { 1465 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); 1466 1467 if (inode < u->inode) { 1468 n = n->rb_left; 1469 } else if (inode > u->inode) { 1470 n = n->rb_right; 1471 } else { 1472 if (max < u->offset) 1473 n = n->rb_left; 1474 else if (min > u->offset) 1475 n = n->rb_right; 1476 else 1477 break; 1478 } 1479 } 1480 1481 return n; 1482 } 1483 1484 /* 1485 * For a given range in vma, build a list of probes that need to be inserted. 1486 */ 1487 static void build_probe_list(struct inode *inode, 1488 struct vm_area_struct *vma, 1489 unsigned long start, unsigned long end, 1490 struct list_head *head) 1491 { 1492 loff_t min, max; 1493 struct rb_node *n, *t; 1494 struct uprobe *u; 1495 1496 INIT_LIST_HEAD(head); 1497 min = vaddr_to_offset(vma, start); 1498 max = min + (end - start) - 1; 1499 1500 read_lock(&uprobes_treelock); 1501 n = find_node_in_range(inode, min, max); 1502 if (n) { 1503 for (t = n; t; t = rb_prev(t)) { 1504 u = rb_entry(t, struct uprobe, rb_node); 1505 if (u->inode != inode || u->offset < min) 1506 break; 1507 /* if uprobe went away, it's safe to ignore it */ 1508 if (try_get_uprobe(u)) 1509 list_add(&u->pending_list, head); 1510 } 1511 for (t = n; (t = rb_next(t)); ) { 1512 u = rb_entry(t, struct uprobe, rb_node); 1513 if (u->inode != inode || u->offset > max) 1514 break; 1515 /* if uprobe went away, it's safe to ignore it */ 1516 if (try_get_uprobe(u)) 1517 list_add(&u->pending_list, head); 1518 } 1519 } 1520 read_unlock(&uprobes_treelock); 1521 } 1522 1523 /* @vma contains reference counter, not the probed instruction. */ 1524 static int delayed_ref_ctr_inc(struct vm_area_struct *vma) 1525 { 1526 struct list_head *pos, *q; 1527 struct delayed_uprobe *du; 1528 unsigned long vaddr; 1529 int ret = 0, err = 0; 1530 1531 mutex_lock(&delayed_uprobe_lock); 1532 list_for_each_safe(pos, q, &delayed_uprobe_list) { 1533 du = list_entry(pos, struct delayed_uprobe, list); 1534 1535 if (du->mm != vma->vm_mm || 1536 !valid_ref_ctr_vma(du->uprobe, vma)) 1537 continue; 1538 1539 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); 1540 ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); 1541 if (ret) { 1542 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); 1543 if (!err) 1544 err = ret; 1545 } 1546 delayed_uprobe_delete(du); 1547 } 1548 mutex_unlock(&delayed_uprobe_lock); 1549 return err; 1550 } 1551 1552 /* 1553 * Called from mmap_region/vma_merge with mm->mmap_lock acquired. 1554 * 1555 * Currently we ignore all errors and always return 0, the callers 1556 * can't handle the failure anyway. 1557 */ 1558 int uprobe_mmap(struct vm_area_struct *vma) 1559 { 1560 struct list_head tmp_list; 1561 struct uprobe *uprobe, *u; 1562 struct inode *inode; 1563 1564 if (no_uprobe_events()) 1565 return 0; 1566 1567 if (vma->vm_file && 1568 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && 1569 test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) 1570 delayed_ref_ctr_inc(vma); 1571 1572 if (!valid_vma(vma, true)) 1573 return 0; 1574 1575 inode = file_inode(vma->vm_file); 1576 if (!inode) 1577 return 0; 1578 1579 mutex_lock(uprobes_mmap_hash(inode)); 1580 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); 1581 /* 1582 * We can race with uprobe_unregister(), this uprobe can be already 1583 * removed. But in this case filter_chain() must return false, all 1584 * consumers have gone away. 1585 */ 1586 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 1587 if (!fatal_signal_pending(current) && 1588 filter_chain(uprobe, vma->vm_mm)) { 1589 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); 1590 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); 1591 } 1592 put_uprobe(uprobe); 1593 } 1594 mutex_unlock(uprobes_mmap_hash(inode)); 1595 1596 return 0; 1597 } 1598 1599 static bool 1600 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1601 { 1602 loff_t min, max; 1603 struct inode *inode; 1604 struct rb_node *n; 1605 1606 inode = file_inode(vma->vm_file); 1607 1608 min = vaddr_to_offset(vma, start); 1609 max = min + (end - start) - 1; 1610 1611 read_lock(&uprobes_treelock); 1612 n = find_node_in_range(inode, min, max); 1613 read_unlock(&uprobes_treelock); 1614 1615 return !!n; 1616 } 1617 1618 /* 1619 * Called in context of a munmap of a vma. 1620 */ 1621 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1622 { 1623 if (no_uprobe_events() || !valid_vma(vma, false)) 1624 return; 1625 1626 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1627 return; 1628 1629 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || 1630 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) 1631 return; 1632 1633 if (vma_has_uprobes(vma, start, end)) 1634 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); 1635 } 1636 1637 static vm_fault_t xol_fault(const struct vm_special_mapping *sm, 1638 struct vm_area_struct *vma, struct vm_fault *vmf) 1639 { 1640 struct xol_area *area = vma->vm_mm->uprobes_state.xol_area; 1641 1642 vmf->page = area->page; 1643 get_page(vmf->page); 1644 return 0; 1645 } 1646 1647 static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) 1648 { 1649 return -EPERM; 1650 } 1651 1652 static const struct vm_special_mapping xol_mapping = { 1653 .name = "[uprobes]", 1654 .fault = xol_fault, 1655 .mremap = xol_mremap, 1656 }; 1657 1658 /* Slot allocation for XOL */ 1659 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) 1660 { 1661 struct vm_area_struct *vma; 1662 int ret; 1663 1664 if (mmap_write_lock_killable(mm)) 1665 return -EINTR; 1666 1667 if (mm->uprobes_state.xol_area) { 1668 ret = -EALREADY; 1669 goto fail; 1670 } 1671 1672 if (!area->vaddr) { 1673 /* Try to map as high as possible, this is only a hint. */ 1674 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, 1675 PAGE_SIZE, 0, 0); 1676 if (IS_ERR_VALUE(area->vaddr)) { 1677 ret = area->vaddr; 1678 goto fail; 1679 } 1680 } 1681 1682 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, 1683 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, 1684 &xol_mapping); 1685 if (IS_ERR(vma)) { 1686 ret = PTR_ERR(vma); 1687 goto fail; 1688 } 1689 1690 ret = 0; 1691 /* pairs with get_xol_area() */ 1692 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ 1693 fail: 1694 mmap_write_unlock(mm); 1695 1696 return ret; 1697 } 1698 1699 void * __weak arch_uprobe_trampoline(unsigned long *psize) 1700 { 1701 static uprobe_opcode_t insn = UPROBE_SWBP_INSN; 1702 1703 *psize = UPROBE_SWBP_INSN_SIZE; 1704 return &insn; 1705 } 1706 1707 static struct xol_area *__create_xol_area(unsigned long vaddr) 1708 { 1709 struct mm_struct *mm = current->mm; 1710 unsigned long insns_size; 1711 struct xol_area *area; 1712 void *insns; 1713 1714 area = kzalloc(sizeof(*area), GFP_KERNEL); 1715 if (unlikely(!area)) 1716 goto out; 1717 1718 area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long), 1719 GFP_KERNEL); 1720 if (!area->bitmap) 1721 goto free_area; 1722 1723 area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); 1724 if (!area->page) 1725 goto free_bitmap; 1726 1727 area->vaddr = vaddr; 1728 init_waitqueue_head(&area->wq); 1729 /* Reserve the 1st slot for get_trampoline_vaddr() */ 1730 set_bit(0, area->bitmap); 1731 insns = arch_uprobe_trampoline(&insns_size); 1732 arch_uprobe_copy_ixol(area->page, 0, insns, insns_size); 1733 1734 if (!xol_add_vma(mm, area)) 1735 return area; 1736 1737 __free_page(area->page); 1738 free_bitmap: 1739 kfree(area->bitmap); 1740 free_area: 1741 kfree(area); 1742 out: 1743 return NULL; 1744 } 1745 1746 /* 1747 * get_xol_area - Allocate process's xol_area if necessary. 1748 * This area will be used for storing instructions for execution out of line. 1749 * 1750 * Returns the allocated area or NULL. 1751 */ 1752 static struct xol_area *get_xol_area(void) 1753 { 1754 struct mm_struct *mm = current->mm; 1755 struct xol_area *area; 1756 1757 if (!mm->uprobes_state.xol_area) 1758 __create_xol_area(0); 1759 1760 /* Pairs with xol_add_vma() smp_store_release() */ 1761 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ 1762 return area; 1763 } 1764 1765 /* 1766 * uprobe_clear_state - Free the area allocated for slots. 1767 */ 1768 void uprobe_clear_state(struct mm_struct *mm) 1769 { 1770 struct xol_area *area = mm->uprobes_state.xol_area; 1771 1772 mutex_lock(&delayed_uprobe_lock); 1773 delayed_uprobe_remove(NULL, mm); 1774 mutex_unlock(&delayed_uprobe_lock); 1775 1776 if (!area) 1777 return; 1778 1779 put_page(area->page); 1780 kfree(area->bitmap); 1781 kfree(area); 1782 } 1783 1784 void uprobe_start_dup_mmap(void) 1785 { 1786 percpu_down_read(&dup_mmap_sem); 1787 } 1788 1789 void uprobe_end_dup_mmap(void) 1790 { 1791 percpu_up_read(&dup_mmap_sem); 1792 } 1793 1794 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) 1795 { 1796 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { 1797 set_bit(MMF_HAS_UPROBES, &newmm->flags); 1798 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ 1799 set_bit(MMF_RECALC_UPROBES, &newmm->flags); 1800 } 1801 } 1802 1803 static unsigned long xol_get_slot_nr(struct xol_area *area) 1804 { 1805 unsigned long slot_nr; 1806 1807 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); 1808 if (slot_nr < UINSNS_PER_PAGE) { 1809 if (!test_and_set_bit(slot_nr, area->bitmap)) 1810 return slot_nr; 1811 } 1812 1813 return UINSNS_PER_PAGE; 1814 } 1815 1816 /* 1817 * xol_get_insn_slot - allocate a slot for xol. 1818 */ 1819 static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask) 1820 { 1821 struct xol_area *area = get_xol_area(); 1822 unsigned long slot_nr; 1823 1824 if (!area) 1825 return false; 1826 1827 wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE); 1828 1829 utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; 1830 arch_uprobe_copy_ixol(area->page, utask->xol_vaddr, 1831 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); 1832 return true; 1833 } 1834 1835 /* 1836 * xol_free_insn_slot - free the slot allocated by xol_get_insn_slot() 1837 */ 1838 static void xol_free_insn_slot(struct uprobe_task *utask) 1839 { 1840 struct xol_area *area = current->mm->uprobes_state.xol_area; 1841 unsigned long offset = utask->xol_vaddr - area->vaddr; 1842 unsigned int slot_nr; 1843 1844 utask->xol_vaddr = 0; 1845 /* xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE) */ 1846 if (WARN_ON_ONCE(offset >= PAGE_SIZE)) 1847 return; 1848 1849 slot_nr = offset / UPROBE_XOL_SLOT_BYTES; 1850 clear_bit(slot_nr, area->bitmap); 1851 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ 1852 if (waitqueue_active(&area->wq)) 1853 wake_up(&area->wq); 1854 } 1855 1856 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 1857 void *src, unsigned long len) 1858 { 1859 /* Initialize the slot */ 1860 copy_to_page(page, vaddr, src, len); 1861 1862 /* 1863 * We probably need flush_icache_user_page() but it needs vma. 1864 * This should work on most of architectures by default. If 1865 * architecture needs to do something different it can define 1866 * its own version of the function. 1867 */ 1868 flush_dcache_page(page); 1869 } 1870 1871 /** 1872 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 1873 * @regs: Reflects the saved state of the task after it has hit a breakpoint 1874 * instruction. 1875 * Return the address of the breakpoint instruction. 1876 */ 1877 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) 1878 { 1879 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; 1880 } 1881 1882 unsigned long uprobe_get_trap_addr(struct pt_regs *regs) 1883 { 1884 struct uprobe_task *utask = current->utask; 1885 1886 if (unlikely(utask && utask->active_uprobe)) 1887 return utask->vaddr; 1888 1889 return instruction_pointer(regs); 1890 } 1891 1892 static struct return_instance *free_ret_instance(struct return_instance *ri, bool cleanup_hprobe) 1893 { 1894 struct return_instance *next = ri->next; 1895 1896 if (cleanup_hprobe) { 1897 enum hprobe_state hstate; 1898 1899 (void)hprobe_consume(&ri->hprobe, &hstate); 1900 hprobe_finalize(&ri->hprobe, hstate); 1901 } 1902 1903 kfree_rcu(ri, rcu); 1904 return next; 1905 } 1906 1907 /* 1908 * Called with no locks held. 1909 * Called in context of an exiting or an exec-ing thread. 1910 */ 1911 void uprobe_free_utask(struct task_struct *t) 1912 { 1913 struct uprobe_task *utask = t->utask; 1914 struct return_instance *ri; 1915 1916 if (!utask) 1917 return; 1918 1919 WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr); 1920 1921 timer_delete_sync(&utask->ri_timer); 1922 1923 ri = utask->return_instances; 1924 while (ri) 1925 ri = free_ret_instance(ri, true /* cleanup_hprobe */); 1926 1927 kfree(utask); 1928 t->utask = NULL; 1929 } 1930 1931 #define RI_TIMER_PERIOD (HZ / 10) /* 100 ms */ 1932 1933 #define for_each_ret_instance_rcu(pos, head) \ 1934 for (pos = rcu_dereference_raw(head); pos; pos = rcu_dereference_raw(pos->next)) 1935 1936 static void ri_timer(struct timer_list *timer) 1937 { 1938 struct uprobe_task *utask = container_of(timer, struct uprobe_task, ri_timer); 1939 struct return_instance *ri; 1940 1941 /* SRCU protects uprobe from reuse for the cmpxchg() inside hprobe_expire(). */ 1942 guard(srcu)(&uretprobes_srcu); 1943 /* RCU protects return_instance from freeing. */ 1944 guard(rcu)(); 1945 1946 for_each_ret_instance_rcu(ri, utask->return_instances) 1947 hprobe_expire(&ri->hprobe, false); 1948 } 1949 1950 static struct uprobe_task *alloc_utask(void) 1951 { 1952 struct uprobe_task *utask; 1953 1954 utask = kzalloc(sizeof(*utask), GFP_KERNEL); 1955 if (!utask) 1956 return NULL; 1957 1958 timer_setup(&utask->ri_timer, ri_timer, 0); 1959 1960 return utask; 1961 } 1962 1963 /* 1964 * Allocate a uprobe_task object for the task if necessary. 1965 * Called when the thread hits a breakpoint. 1966 * 1967 * Returns: 1968 * - pointer to new uprobe_task on success 1969 * - NULL otherwise 1970 */ 1971 static struct uprobe_task *get_utask(void) 1972 { 1973 if (!current->utask) 1974 current->utask = alloc_utask(); 1975 return current->utask; 1976 } 1977 1978 static size_t ri_size(int consumers_cnt) 1979 { 1980 struct return_instance *ri; 1981 1982 return sizeof(*ri) + sizeof(ri->consumers[0]) * consumers_cnt; 1983 } 1984 1985 #define DEF_CNT 4 1986 1987 static struct return_instance *alloc_return_instance(void) 1988 { 1989 struct return_instance *ri; 1990 1991 ri = kzalloc(ri_size(DEF_CNT), GFP_KERNEL); 1992 if (!ri) 1993 return ZERO_SIZE_PTR; 1994 1995 ri->consumers_cnt = DEF_CNT; 1996 return ri; 1997 } 1998 1999 static struct return_instance *dup_return_instance(struct return_instance *old) 2000 { 2001 size_t size = ri_size(old->consumers_cnt); 2002 2003 return kmemdup(old, size, GFP_KERNEL); 2004 } 2005 2006 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) 2007 { 2008 struct uprobe_task *n_utask; 2009 struct return_instance **p, *o, *n; 2010 struct uprobe *uprobe; 2011 2012 n_utask = alloc_utask(); 2013 if (!n_utask) 2014 return -ENOMEM; 2015 t->utask = n_utask; 2016 2017 /* protect uprobes from freeing, we'll need try_get_uprobe() them */ 2018 guard(srcu)(&uretprobes_srcu); 2019 2020 p = &n_utask->return_instances; 2021 for (o = o_utask->return_instances; o; o = o->next) { 2022 n = dup_return_instance(o); 2023 if (!n) 2024 return -ENOMEM; 2025 2026 /* if uprobe is non-NULL, we'll have an extra refcount for uprobe */ 2027 uprobe = hprobe_expire(&o->hprobe, true); 2028 2029 /* 2030 * New utask will have stable properly refcounted uprobe or 2031 * NULL. Even if we failed to get refcounted uprobe, we still 2032 * need to preserve full set of return_instances for proper 2033 * uretprobe handling and nesting in forked task. 2034 */ 2035 hprobe_init_stable(&n->hprobe, uprobe); 2036 2037 n->next = NULL; 2038 rcu_assign_pointer(*p, n); 2039 p = &n->next; 2040 2041 n_utask->depth++; 2042 } 2043 2044 return 0; 2045 } 2046 2047 static void dup_xol_work(struct callback_head *work) 2048 { 2049 if (current->flags & PF_EXITING) 2050 return; 2051 2052 if (!__create_xol_area(current->utask->dup_xol_addr) && 2053 !fatal_signal_pending(current)) 2054 uprobe_warn(current, "dup xol area"); 2055 } 2056 2057 /* 2058 * Called in context of a new clone/fork from copy_process. 2059 */ 2060 void uprobe_copy_process(struct task_struct *t, unsigned long flags) 2061 { 2062 struct uprobe_task *utask = current->utask; 2063 struct mm_struct *mm = current->mm; 2064 struct xol_area *area; 2065 2066 t->utask = NULL; 2067 2068 if (!utask || !utask->return_instances) 2069 return; 2070 2071 if (mm == t->mm && !(flags & CLONE_VFORK)) 2072 return; 2073 2074 if (dup_utask(t, utask)) 2075 return uprobe_warn(t, "dup ret instances"); 2076 2077 /* The task can fork() after dup_xol_work() fails */ 2078 area = mm->uprobes_state.xol_area; 2079 if (!area) 2080 return uprobe_warn(t, "dup xol area"); 2081 2082 if (mm == t->mm) 2083 return; 2084 2085 t->utask->dup_xol_addr = area->vaddr; 2086 init_task_work(&t->utask->dup_xol_work, dup_xol_work); 2087 task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME); 2088 } 2089 2090 /* 2091 * Current area->vaddr notion assume the trampoline address is always 2092 * equal area->vaddr. 2093 * 2094 * Returns -1 in case the xol_area is not allocated. 2095 */ 2096 unsigned long uprobe_get_trampoline_vaddr(void) 2097 { 2098 struct xol_area *area; 2099 unsigned long trampoline_vaddr = -1; 2100 2101 /* Pairs with xol_add_vma() smp_store_release() */ 2102 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ 2103 if (area) 2104 trampoline_vaddr = area->vaddr; 2105 2106 return trampoline_vaddr; 2107 } 2108 2109 static void cleanup_return_instances(struct uprobe_task *utask, bool chained, 2110 struct pt_regs *regs) 2111 { 2112 struct return_instance *ri = utask->return_instances; 2113 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; 2114 2115 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { 2116 ri = free_ret_instance(ri, true /* cleanup_hprobe */); 2117 utask->depth--; 2118 } 2119 rcu_assign_pointer(utask->return_instances, ri); 2120 } 2121 2122 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, 2123 struct return_instance *ri) 2124 { 2125 struct uprobe_task *utask = current->utask; 2126 unsigned long orig_ret_vaddr, trampoline_vaddr; 2127 bool chained; 2128 int srcu_idx; 2129 2130 if (!get_xol_area()) 2131 goto free; 2132 2133 if (utask->depth >= MAX_URETPROBE_DEPTH) { 2134 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" 2135 " nestedness limit pid/tgid=%d/%d\n", 2136 current->pid, current->tgid); 2137 goto free; 2138 } 2139 2140 trampoline_vaddr = uprobe_get_trampoline_vaddr(); 2141 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); 2142 if (orig_ret_vaddr == -1) 2143 goto free; 2144 2145 /* drop the entries invalidated by longjmp() */ 2146 chained = (orig_ret_vaddr == trampoline_vaddr); 2147 cleanup_return_instances(utask, chained, regs); 2148 2149 /* 2150 * We don't want to keep trampoline address in stack, rather keep the 2151 * original return address of first caller thru all the consequent 2152 * instances. This also makes breakpoint unwrapping easier. 2153 */ 2154 if (chained) { 2155 if (!utask->return_instances) { 2156 /* 2157 * This situation is not possible. Likely we have an 2158 * attack from user-space. 2159 */ 2160 uprobe_warn(current, "handle tail call"); 2161 goto free; 2162 } 2163 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; 2164 } 2165 2166 /* __srcu_read_lock() because SRCU lock survives switch to user space */ 2167 srcu_idx = __srcu_read_lock(&uretprobes_srcu); 2168 2169 ri->func = instruction_pointer(regs); 2170 ri->stack = user_stack_pointer(regs); 2171 ri->orig_ret_vaddr = orig_ret_vaddr; 2172 ri->chained = chained; 2173 2174 utask->depth++; 2175 2176 hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx); 2177 ri->next = utask->return_instances; 2178 rcu_assign_pointer(utask->return_instances, ri); 2179 2180 mod_timer(&utask->ri_timer, jiffies + RI_TIMER_PERIOD); 2181 2182 return; 2183 free: 2184 kfree(ri); 2185 } 2186 2187 /* Prepare to single-step probed instruction out of line. */ 2188 static int 2189 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) 2190 { 2191 struct uprobe_task *utask = current->utask; 2192 int err; 2193 2194 if (!try_get_uprobe(uprobe)) 2195 return -EINVAL; 2196 2197 if (!xol_get_insn_slot(uprobe, utask)) { 2198 err = -ENOMEM; 2199 goto err_out; 2200 } 2201 2202 utask->vaddr = bp_vaddr; 2203 err = arch_uprobe_pre_xol(&uprobe->arch, regs); 2204 if (unlikely(err)) { 2205 xol_free_insn_slot(utask); 2206 goto err_out; 2207 } 2208 2209 utask->active_uprobe = uprobe; 2210 utask->state = UTASK_SSTEP; 2211 return 0; 2212 err_out: 2213 put_uprobe(uprobe); 2214 return err; 2215 } 2216 2217 /* 2218 * If we are singlestepping, then ensure this thread is not connected to 2219 * non-fatal signals until completion of singlestep. When xol insn itself 2220 * triggers the signal, restart the original insn even if the task is 2221 * already SIGKILL'ed (since coredump should report the correct ip). This 2222 * is even more important if the task has a handler for SIGSEGV/etc, The 2223 * _same_ instruction should be repeated again after return from the signal 2224 * handler, and SSTEP can never finish in this case. 2225 */ 2226 bool uprobe_deny_signal(void) 2227 { 2228 struct task_struct *t = current; 2229 struct uprobe_task *utask = t->utask; 2230 2231 if (likely(!utask || !utask->active_uprobe)) 2232 return false; 2233 2234 WARN_ON_ONCE(utask->state != UTASK_SSTEP); 2235 2236 if (task_sigpending(t)) { 2237 spin_lock_irq(&t->sighand->siglock); 2238 clear_tsk_thread_flag(t, TIF_SIGPENDING); 2239 spin_unlock_irq(&t->sighand->siglock); 2240 2241 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 2242 utask->state = UTASK_SSTEP_TRAPPED; 2243 set_tsk_thread_flag(t, TIF_UPROBE); 2244 } 2245 } 2246 2247 return true; 2248 } 2249 2250 static void mmf_recalc_uprobes(struct mm_struct *mm) 2251 { 2252 VMA_ITERATOR(vmi, mm, 0); 2253 struct vm_area_struct *vma; 2254 2255 for_each_vma(vmi, vma) { 2256 if (!valid_vma(vma, false)) 2257 continue; 2258 /* 2259 * This is not strictly accurate, we can race with 2260 * uprobe_unregister() and see the already removed 2261 * uprobe if delete_uprobe() was not yet called. 2262 * Or this uprobe can be filtered out. 2263 */ 2264 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) 2265 return; 2266 } 2267 2268 clear_bit(MMF_HAS_UPROBES, &mm->flags); 2269 } 2270 2271 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) 2272 { 2273 struct page *page; 2274 uprobe_opcode_t opcode; 2275 int result; 2276 2277 if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) 2278 return -EINVAL; 2279 2280 pagefault_disable(); 2281 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); 2282 pagefault_enable(); 2283 2284 if (likely(result == 0)) 2285 goto out; 2286 2287 result = get_user_pages(vaddr, 1, FOLL_FORCE, &page); 2288 if (result < 0) 2289 return result; 2290 2291 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 2292 put_page(page); 2293 out: 2294 /* This needs to return true for any variant of the trap insn */ 2295 return is_trap_insn(&opcode); 2296 } 2297 2298 /* assumes being inside RCU protected region */ 2299 static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp) 2300 { 2301 struct mm_struct *mm = current->mm; 2302 struct uprobe *uprobe = NULL; 2303 struct vm_area_struct *vma; 2304 2305 mmap_read_lock(mm); 2306 vma = vma_lookup(mm, bp_vaddr); 2307 if (vma) { 2308 if (valid_vma(vma, false)) { 2309 struct inode *inode = file_inode(vma->vm_file); 2310 loff_t offset = vaddr_to_offset(vma, bp_vaddr); 2311 2312 uprobe = find_uprobe_rcu(inode, offset); 2313 } 2314 2315 if (!uprobe) 2316 *is_swbp = is_trap_at_addr(mm, bp_vaddr); 2317 } else { 2318 *is_swbp = -EFAULT; 2319 } 2320 2321 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) 2322 mmf_recalc_uprobes(mm); 2323 mmap_read_unlock(mm); 2324 2325 return uprobe; 2326 } 2327 2328 static struct return_instance* 2329 push_consumer(struct return_instance *ri, int idx, __u64 id, __u64 cookie) 2330 { 2331 if (unlikely(ri == ZERO_SIZE_PTR)) 2332 return ri; 2333 2334 if (unlikely(idx >= ri->consumers_cnt)) { 2335 struct return_instance *old_ri = ri; 2336 2337 ri->consumers_cnt += DEF_CNT; 2338 ri = krealloc(old_ri, ri_size(old_ri->consumers_cnt), GFP_KERNEL); 2339 if (!ri) { 2340 kfree(old_ri); 2341 return ZERO_SIZE_PTR; 2342 } 2343 } 2344 2345 ri->consumers[idx].id = id; 2346 ri->consumers[idx].cookie = cookie; 2347 return ri; 2348 } 2349 2350 static struct return_consumer * 2351 return_consumer_find(struct return_instance *ri, int *iter, int id) 2352 { 2353 struct return_consumer *ric; 2354 int idx = *iter; 2355 2356 for (ric = &ri->consumers[idx]; idx < ri->consumers_cnt; idx++, ric++) { 2357 if (ric->id == id) { 2358 *iter = idx + 1; 2359 return ric; 2360 } 2361 } 2362 return NULL; 2363 } 2364 2365 static bool ignore_ret_handler(int rc) 2366 { 2367 return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE; 2368 } 2369 2370 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) 2371 { 2372 struct uprobe_consumer *uc; 2373 bool has_consumers = false, remove = true; 2374 struct return_instance *ri = NULL; 2375 int push_idx = 0; 2376 2377 current->utask->auprobe = &uprobe->arch; 2378 2379 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { 2380 bool session = uc->handler && uc->ret_handler; 2381 __u64 cookie = 0; 2382 int rc = 0; 2383 2384 if (uc->handler) { 2385 rc = uc->handler(uc, regs, &cookie); 2386 WARN(rc < 0 || rc > 2, 2387 "bad rc=0x%x from %ps()\n", rc, uc->handler); 2388 } 2389 2390 remove &= rc == UPROBE_HANDLER_REMOVE; 2391 has_consumers = true; 2392 2393 if (!uc->ret_handler || ignore_ret_handler(rc)) 2394 continue; 2395 2396 if (!ri) 2397 ri = alloc_return_instance(); 2398 2399 if (session) 2400 ri = push_consumer(ri, push_idx++, uc->id, cookie); 2401 } 2402 current->utask->auprobe = NULL; 2403 2404 if (!ZERO_OR_NULL_PTR(ri)) { 2405 /* 2406 * The push_idx value has the final number of return consumers, 2407 * and ri->consumers_cnt has number of allocated consumers. 2408 */ 2409 ri->consumers_cnt = push_idx; 2410 prepare_uretprobe(uprobe, regs, ri); 2411 } 2412 2413 if (remove && has_consumers) { 2414 down_read(&uprobe->register_rwsem); 2415 2416 /* re-check that removal is still required, this time under lock */ 2417 if (!filter_chain(uprobe, current->mm)) { 2418 WARN_ON(!uprobe_is_active(uprobe)); 2419 unapply_uprobe(uprobe, current->mm); 2420 } 2421 2422 up_read(&uprobe->register_rwsem); 2423 } 2424 } 2425 2426 static void 2427 handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs) 2428 { 2429 struct return_consumer *ric; 2430 struct uprobe_consumer *uc; 2431 int ric_idx = 0; 2432 2433 /* all consumers unsubscribed meanwhile */ 2434 if (unlikely(!uprobe)) 2435 return; 2436 2437 rcu_read_lock_trace(); 2438 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { 2439 bool session = uc->handler && uc->ret_handler; 2440 2441 if (uc->ret_handler) { 2442 ric = return_consumer_find(ri, &ric_idx, uc->id); 2443 if (!session || ric) 2444 uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL); 2445 } 2446 } 2447 rcu_read_unlock_trace(); 2448 } 2449 2450 static struct return_instance *find_next_ret_chain(struct return_instance *ri) 2451 { 2452 bool chained; 2453 2454 do { 2455 chained = ri->chained; 2456 ri = ri->next; /* can't be NULL if chained */ 2457 } while (chained); 2458 2459 return ri; 2460 } 2461 2462 void uprobe_handle_trampoline(struct pt_regs *regs) 2463 { 2464 struct uprobe_task *utask; 2465 struct return_instance *ri, *next; 2466 struct uprobe *uprobe; 2467 enum hprobe_state hstate; 2468 bool valid; 2469 2470 utask = current->utask; 2471 if (!utask) 2472 goto sigill; 2473 2474 ri = utask->return_instances; 2475 if (!ri) 2476 goto sigill; 2477 2478 do { 2479 /* 2480 * We should throw out the frames invalidated by longjmp(). 2481 * If this chain is valid, then the next one should be alive 2482 * or NULL; the latter case means that nobody but ri->func 2483 * could hit this trampoline on return. TODO: sigaltstack(). 2484 */ 2485 next = find_next_ret_chain(ri); 2486 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs); 2487 2488 instruction_pointer_set(regs, ri->orig_ret_vaddr); 2489 do { 2490 /* pop current instance from the stack of pending return instances, 2491 * as it's not pending anymore: we just fixed up original 2492 * instruction pointer in regs and are about to call handlers; 2493 * this allows fixup_uretprobe_trampoline_entries() to properly fix up 2494 * captured stack traces from uretprobe handlers, in which pending 2495 * trampoline addresses on the stack are replaced with correct 2496 * original return addresses 2497 */ 2498 rcu_assign_pointer(utask->return_instances, ri->next); 2499 2500 uprobe = hprobe_consume(&ri->hprobe, &hstate); 2501 if (valid) 2502 handle_uretprobe_chain(ri, uprobe, regs); 2503 hprobe_finalize(&ri->hprobe, hstate); 2504 2505 /* We already took care of hprobe, no need to waste more time on that. */ 2506 ri = free_ret_instance(ri, false /* !cleanup_hprobe */); 2507 utask->depth--; 2508 } while (ri != next); 2509 } while (!valid); 2510 2511 return; 2512 2513 sigill: 2514 uprobe_warn(current, "handle uretprobe, sending SIGILL."); 2515 force_sig(SIGILL); 2516 } 2517 2518 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) 2519 { 2520 return false; 2521 } 2522 2523 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, 2524 struct pt_regs *regs) 2525 { 2526 return true; 2527 } 2528 2529 /* 2530 * Run handler and ask thread to singlestep. 2531 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 2532 */ 2533 static void handle_swbp(struct pt_regs *regs) 2534 { 2535 struct uprobe *uprobe; 2536 unsigned long bp_vaddr; 2537 int is_swbp; 2538 2539 bp_vaddr = uprobe_get_swbp_addr(regs); 2540 if (bp_vaddr == uprobe_get_trampoline_vaddr()) 2541 return uprobe_handle_trampoline(regs); 2542 2543 rcu_read_lock_trace(); 2544 2545 uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp); 2546 if (!uprobe) { 2547 if (is_swbp > 0) { 2548 /* No matching uprobe; signal SIGTRAP. */ 2549 force_sig(SIGTRAP); 2550 } else { 2551 /* 2552 * Either we raced with uprobe_unregister() or we can't 2553 * access this memory. The latter is only possible if 2554 * another thread plays with our ->mm. In both cases 2555 * we can simply restart. If this vma was unmapped we 2556 * can pretend this insn was not executed yet and get 2557 * the (correct) SIGSEGV after restart. 2558 */ 2559 instruction_pointer_set(regs, bp_vaddr); 2560 } 2561 goto out; 2562 } 2563 2564 /* change it in advance for ->handler() and restart */ 2565 instruction_pointer_set(regs, bp_vaddr); 2566 2567 /* 2568 * TODO: move copy_insn/etc into _register and remove this hack. 2569 * After we hit the bp, _unregister + _register can install the 2570 * new and not-yet-analyzed uprobe at the same address, restart. 2571 */ 2572 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) 2573 goto out; 2574 2575 /* 2576 * Pairs with the smp_wmb() in prepare_uprobe(). 2577 * 2578 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then 2579 * we must also see the stores to &uprobe->arch performed by the 2580 * prepare_uprobe() call. 2581 */ 2582 smp_rmb(); 2583 2584 /* Tracing handlers use ->utask to communicate with fetch methods */ 2585 if (!get_utask()) 2586 goto out; 2587 2588 if (arch_uprobe_ignore(&uprobe->arch, regs)) 2589 goto out; 2590 2591 handler_chain(uprobe, regs); 2592 2593 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 2594 goto out; 2595 2596 if (pre_ssout(uprobe, regs, bp_vaddr)) 2597 goto out; 2598 2599 out: 2600 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ 2601 rcu_read_unlock_trace(); 2602 } 2603 2604 /* 2605 * Perform required fix-ups and disable singlestep. 2606 * Allow pending signals to take effect. 2607 */ 2608 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) 2609 { 2610 struct uprobe *uprobe; 2611 int err = 0; 2612 2613 uprobe = utask->active_uprobe; 2614 if (utask->state == UTASK_SSTEP_ACK) 2615 err = arch_uprobe_post_xol(&uprobe->arch, regs); 2616 else if (utask->state == UTASK_SSTEP_TRAPPED) 2617 arch_uprobe_abort_xol(&uprobe->arch, regs); 2618 else 2619 WARN_ON_ONCE(1); 2620 2621 put_uprobe(uprobe); 2622 utask->active_uprobe = NULL; 2623 utask->state = UTASK_RUNNING; 2624 xol_free_insn_slot(utask); 2625 2626 spin_lock_irq(¤t->sighand->siglock); 2627 recalc_sigpending(); /* see uprobe_deny_signal() */ 2628 spin_unlock_irq(¤t->sighand->siglock); 2629 2630 if (unlikely(err)) { 2631 uprobe_warn(current, "execute the probed insn, sending SIGILL."); 2632 force_sig(SIGILL); 2633 } 2634 } 2635 2636 /* 2637 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and 2638 * allows the thread to return from interrupt. After that handle_swbp() 2639 * sets utask->active_uprobe. 2640 * 2641 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag 2642 * and allows the thread to return from interrupt. 2643 * 2644 * While returning to userspace, thread notices the TIF_UPROBE flag and calls 2645 * uprobe_notify_resume(). 2646 */ 2647 void uprobe_notify_resume(struct pt_regs *regs) 2648 { 2649 struct uprobe_task *utask; 2650 2651 clear_thread_flag(TIF_UPROBE); 2652 2653 utask = current->utask; 2654 if (utask && utask->active_uprobe) 2655 handle_singlestep(utask, regs); 2656 else 2657 handle_swbp(regs); 2658 } 2659 2660 /* 2661 * uprobe_pre_sstep_notifier gets called from interrupt context as part of 2662 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. 2663 */ 2664 int uprobe_pre_sstep_notifier(struct pt_regs *regs) 2665 { 2666 if (!current->mm) 2667 return 0; 2668 2669 if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && 2670 (!current->utask || !current->utask->return_instances)) 2671 return 0; 2672 2673 set_thread_flag(TIF_UPROBE); 2674 return 1; 2675 } 2676 2677 /* 2678 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier 2679 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. 2680 */ 2681 int uprobe_post_sstep_notifier(struct pt_regs *regs) 2682 { 2683 struct uprobe_task *utask = current->utask; 2684 2685 if (!current->mm || !utask || !utask->active_uprobe) 2686 /* task is currently not uprobed */ 2687 return 0; 2688 2689 utask->state = UTASK_SSTEP_ACK; 2690 set_thread_flag(TIF_UPROBE); 2691 return 1; 2692 } 2693 2694 static struct notifier_block uprobe_exception_nb = { 2695 .notifier_call = arch_uprobe_exception_notify, 2696 .priority = INT_MAX-1, /* notified after kprobes, kgdb */ 2697 }; 2698 2699 void __init uprobes_init(void) 2700 { 2701 int i; 2702 2703 for (i = 0; i < UPROBES_HASH_SZ; i++) 2704 mutex_init(&uprobes_mmap_mutex[i]); 2705 2706 BUG_ON(register_die_notifier(&uprobe_exception_nb)); 2707 } 2708