1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * User-space Probes (UProbes) 4 * 5 * Copyright (C) IBM Corporation, 2008-2012 6 * Authors: 7 * Srikar Dronamraju 8 * Jim Keniston 9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/highmem.h> 14 #include <linux/pagemap.h> /* read_mapping_page */ 15 #include <linux/slab.h> 16 #include <linux/sched.h> 17 #include <linux/sched/mm.h> 18 #include <linux/sched/coredump.h> 19 #include <linux/export.h> 20 #include <linux/rmap.h> /* anon_vma_prepare */ 21 #include <linux/mmu_notifier.h> 22 #include <linux/swap.h> /* folio_free_swap */ 23 #include <linux/ptrace.h> /* user_enable_single_step */ 24 #include <linux/kdebug.h> /* notifier mechanism */ 25 #include <linux/percpu-rwsem.h> 26 #include <linux/task_work.h> 27 #include <linux/shmem_fs.h> 28 #include <linux/khugepaged.h> 29 30 #include <linux/uprobes.h> 31 32 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) 33 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 34 35 static struct rb_root uprobes_tree = RB_ROOT; 36 /* 37 * allows us to skip the uprobe_mmap if there are no uprobe events active 38 * at this time. Probably a fine grained per inode count is better? 39 */ 40 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) 41 42 static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */ 43 static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock); 44 45 DEFINE_STATIC_SRCU(uprobes_srcu); 46 47 #define UPROBES_HASH_SZ 13 48 /* serialize uprobe->pending_list */ 49 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 50 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 51 52 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); 53 54 /* Have a copy of original instruction */ 55 #define UPROBE_COPY_INSN 0 56 57 struct uprobe { 58 struct rb_node rb_node; /* node in the rb tree */ 59 refcount_t ref; 60 struct rw_semaphore register_rwsem; 61 struct rw_semaphore consumer_rwsem; 62 struct list_head pending_list; 63 struct list_head consumers; 64 struct inode *inode; /* Also hold a ref to inode */ 65 struct rcu_head rcu; 66 loff_t offset; 67 loff_t ref_ctr_offset; 68 unsigned long flags; 69 70 /* 71 * The generic code assumes that it has two members of unknown type 72 * owned by the arch-specific code: 73 * 74 * insn - copy_insn() saves the original instruction here for 75 * arch_uprobe_analyze_insn(). 76 * 77 * ixol - potentially modified instruction to execute out of 78 * line, copied to xol_area by xol_get_insn_slot(). 79 */ 80 struct arch_uprobe arch; 81 }; 82 83 struct delayed_uprobe { 84 struct list_head list; 85 struct uprobe *uprobe; 86 struct mm_struct *mm; 87 }; 88 89 static DEFINE_MUTEX(delayed_uprobe_lock); 90 static LIST_HEAD(delayed_uprobe_list); 91 92 /* 93 * Execute out of line area: anonymous executable mapping installed 94 * by the probed task to execute the copy of the original instruction 95 * mangled by set_swbp(). 96 * 97 * On a breakpoint hit, thread contests for a slot. It frees the 98 * slot after singlestep. Currently a fixed number of slots are 99 * allocated. 100 */ 101 struct xol_area { 102 wait_queue_head_t wq; /* if all slots are busy */ 103 atomic_t slot_count; /* number of in-use slots */ 104 unsigned long *bitmap; /* 0 = free slot */ 105 106 struct vm_special_mapping xol_mapping; 107 struct page *pages[2]; 108 /* 109 * We keep the vma's vm_start rather than a pointer to the vma 110 * itself. The probed process or a naughty kernel module could make 111 * the vma go away, and we must handle that reasonably gracefully. 112 */ 113 unsigned long vaddr; /* Page(s) of instruction slots */ 114 }; 115 116 static void uprobe_warn(struct task_struct *t, const char *msg) 117 { 118 pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg); 119 } 120 121 /* 122 * valid_vma: Verify if the specified vma is an executable vma 123 * Relax restrictions while unregistering: vm_flags might have 124 * changed after breakpoint was inserted. 125 * - is_register: indicates if we are in register context. 126 * - Return 1 if the specified virtual address is in an 127 * executable vma. 128 */ 129 static bool valid_vma(struct vm_area_struct *vma, bool is_register) 130 { 131 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; 132 133 if (is_register) 134 flags |= VM_WRITE; 135 136 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; 137 } 138 139 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 140 { 141 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 142 } 143 144 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) 145 { 146 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); 147 } 148 149 /** 150 * __replace_page - replace page in vma by new page. 151 * based on replace_page in mm/ksm.c 152 * 153 * @vma: vma that holds the pte pointing to page 154 * @addr: address the old @page is mapped at 155 * @old_page: the page we are replacing by new_page 156 * @new_page: the modified page we replace page by 157 * 158 * If @new_page is NULL, only unmap @old_page. 159 * 160 * Returns 0 on success, negative error code otherwise. 161 */ 162 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 163 struct page *old_page, struct page *new_page) 164 { 165 struct folio *old_folio = page_folio(old_page); 166 struct folio *new_folio; 167 struct mm_struct *mm = vma->vm_mm; 168 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); 169 int err; 170 struct mmu_notifier_range range; 171 172 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, 173 addr + PAGE_SIZE); 174 175 if (new_page) { 176 new_folio = page_folio(new_page); 177 err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL); 178 if (err) 179 return err; 180 } 181 182 /* For folio_free_swap() below */ 183 folio_lock(old_folio); 184 185 mmu_notifier_invalidate_range_start(&range); 186 err = -EAGAIN; 187 if (!page_vma_mapped_walk(&pvmw)) 188 goto unlock; 189 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); 190 191 if (new_page) { 192 folio_get(new_folio); 193 folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE); 194 folio_add_lru_vma(new_folio, vma); 195 } else 196 /* no new page, just dec_mm_counter for old_page */ 197 dec_mm_counter(mm, MM_ANONPAGES); 198 199 if (!folio_test_anon(old_folio)) { 200 dec_mm_counter(mm, mm_counter_file(old_folio)); 201 inc_mm_counter(mm, MM_ANONPAGES); 202 } 203 204 flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); 205 ptep_clear_flush(vma, addr, pvmw.pte); 206 if (new_page) 207 set_pte_at(mm, addr, pvmw.pte, 208 mk_pte(new_page, vma->vm_page_prot)); 209 210 folio_remove_rmap_pte(old_folio, old_page, vma); 211 if (!folio_mapped(old_folio)) 212 folio_free_swap(old_folio); 213 page_vma_mapped_walk_done(&pvmw); 214 folio_put(old_folio); 215 216 err = 0; 217 unlock: 218 mmu_notifier_invalidate_range_end(&range); 219 folio_unlock(old_folio); 220 return err; 221 } 222 223 /** 224 * is_swbp_insn - check if instruction is breakpoint instruction. 225 * @insn: instruction to be checked. 226 * Default implementation of is_swbp_insn 227 * Returns true if @insn is a breakpoint instruction. 228 */ 229 bool __weak is_swbp_insn(uprobe_opcode_t *insn) 230 { 231 return *insn == UPROBE_SWBP_INSN; 232 } 233 234 /** 235 * is_trap_insn - check if instruction is breakpoint instruction. 236 * @insn: instruction to be checked. 237 * Default implementation of is_trap_insn 238 * Returns true if @insn is a breakpoint instruction. 239 * 240 * This function is needed for the case where an architecture has multiple 241 * trap instructions (like powerpc). 242 */ 243 bool __weak is_trap_insn(uprobe_opcode_t *insn) 244 { 245 return is_swbp_insn(insn); 246 } 247 248 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) 249 { 250 void *kaddr = kmap_atomic(page); 251 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); 252 kunmap_atomic(kaddr); 253 } 254 255 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) 256 { 257 void *kaddr = kmap_atomic(page); 258 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); 259 kunmap_atomic(kaddr); 260 } 261 262 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) 263 { 264 uprobe_opcode_t old_opcode; 265 bool is_swbp; 266 267 /* 268 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. 269 * We do not check if it is any other 'trap variant' which could 270 * be conditional trap instruction such as the one powerpc supports. 271 * 272 * The logic is that we do not care if the underlying instruction 273 * is a trap variant; uprobes always wins over any other (gdb) 274 * breakpoint. 275 */ 276 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); 277 is_swbp = is_swbp_insn(&old_opcode); 278 279 if (is_swbp_insn(new_opcode)) { 280 if (is_swbp) /* register: already installed? */ 281 return 0; 282 } else { 283 if (!is_swbp) /* unregister: was it changed by us? */ 284 return 0; 285 } 286 287 return 1; 288 } 289 290 static struct delayed_uprobe * 291 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) 292 { 293 struct delayed_uprobe *du; 294 295 list_for_each_entry(du, &delayed_uprobe_list, list) 296 if (du->uprobe == uprobe && du->mm == mm) 297 return du; 298 return NULL; 299 } 300 301 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) 302 { 303 struct delayed_uprobe *du; 304 305 if (delayed_uprobe_check(uprobe, mm)) 306 return 0; 307 308 du = kzalloc(sizeof(*du), GFP_KERNEL); 309 if (!du) 310 return -ENOMEM; 311 312 du->uprobe = uprobe; 313 du->mm = mm; 314 list_add(&du->list, &delayed_uprobe_list); 315 return 0; 316 } 317 318 static void delayed_uprobe_delete(struct delayed_uprobe *du) 319 { 320 if (WARN_ON(!du)) 321 return; 322 list_del(&du->list); 323 kfree(du); 324 } 325 326 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) 327 { 328 struct list_head *pos, *q; 329 struct delayed_uprobe *du; 330 331 if (!uprobe && !mm) 332 return; 333 334 list_for_each_safe(pos, q, &delayed_uprobe_list) { 335 du = list_entry(pos, struct delayed_uprobe, list); 336 337 if (uprobe && du->uprobe != uprobe) 338 continue; 339 if (mm && du->mm != mm) 340 continue; 341 342 delayed_uprobe_delete(du); 343 } 344 } 345 346 static bool valid_ref_ctr_vma(struct uprobe *uprobe, 347 struct vm_area_struct *vma) 348 { 349 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); 350 351 return uprobe->ref_ctr_offset && 352 vma->vm_file && 353 file_inode(vma->vm_file) == uprobe->inode && 354 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && 355 vma->vm_start <= vaddr && 356 vma->vm_end > vaddr; 357 } 358 359 static struct vm_area_struct * 360 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) 361 { 362 VMA_ITERATOR(vmi, mm, 0); 363 struct vm_area_struct *tmp; 364 365 for_each_vma(vmi, tmp) 366 if (valid_ref_ctr_vma(uprobe, tmp)) 367 return tmp; 368 369 return NULL; 370 } 371 372 static int 373 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) 374 { 375 void *kaddr; 376 struct page *page; 377 int ret; 378 short *ptr; 379 380 if (!vaddr || !d) 381 return -EINVAL; 382 383 ret = get_user_pages_remote(mm, vaddr, 1, 384 FOLL_WRITE, &page, NULL); 385 if (unlikely(ret <= 0)) { 386 /* 387 * We are asking for 1 page. If get_user_pages_remote() fails, 388 * it may return 0, in that case we have to return error. 389 */ 390 return ret == 0 ? -EBUSY : ret; 391 } 392 393 kaddr = kmap_atomic(page); 394 ptr = kaddr + (vaddr & ~PAGE_MASK); 395 396 if (unlikely(*ptr + d < 0)) { 397 pr_warn("ref_ctr going negative. vaddr: 0x%lx, " 398 "curr val: %d, delta: %d\n", vaddr, *ptr, d); 399 ret = -EINVAL; 400 goto out; 401 } 402 403 *ptr += d; 404 ret = 0; 405 out: 406 kunmap_atomic(kaddr); 407 put_page(page); 408 return ret; 409 } 410 411 static void update_ref_ctr_warn(struct uprobe *uprobe, 412 struct mm_struct *mm, short d) 413 { 414 pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " 415 "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n", 416 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, 417 (unsigned long long) uprobe->offset, 418 (unsigned long long) uprobe->ref_ctr_offset, mm); 419 } 420 421 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, 422 short d) 423 { 424 struct vm_area_struct *rc_vma; 425 unsigned long rc_vaddr; 426 int ret = 0; 427 428 rc_vma = find_ref_ctr_vma(uprobe, mm); 429 430 if (rc_vma) { 431 rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); 432 ret = __update_ref_ctr(mm, rc_vaddr, d); 433 if (ret) 434 update_ref_ctr_warn(uprobe, mm, d); 435 436 if (d > 0) 437 return ret; 438 } 439 440 mutex_lock(&delayed_uprobe_lock); 441 if (d > 0) 442 ret = delayed_uprobe_add(uprobe, mm); 443 else 444 delayed_uprobe_remove(uprobe, mm); 445 mutex_unlock(&delayed_uprobe_lock); 446 447 return ret; 448 } 449 450 /* 451 * NOTE: 452 * Expect the breakpoint instruction to be the smallest size instruction for 453 * the architecture. If an arch has variable length instruction and the 454 * breakpoint instruction is not of the smallest length instruction 455 * supported by that architecture then we need to modify is_trap_at_addr and 456 * uprobe_write_opcode accordingly. This would never be a problem for archs 457 * that have fixed length instructions. 458 * 459 * uprobe_write_opcode - write the opcode at a given virtual address. 460 * @auprobe: arch specific probepoint information. 461 * @mm: the probed process address space. 462 * @vaddr: the virtual address to store the opcode. 463 * @opcode: opcode to be written at @vaddr. 464 * 465 * Called with mm->mmap_lock held for read or write. 466 * Return 0 (success) or a negative errno. 467 */ 468 int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, 469 unsigned long vaddr, uprobe_opcode_t opcode) 470 { 471 struct uprobe *uprobe; 472 struct page *old_page, *new_page; 473 struct vm_area_struct *vma; 474 int ret, is_register, ref_ctr_updated = 0; 475 bool orig_page_huge = false; 476 unsigned int gup_flags = FOLL_FORCE; 477 478 is_register = is_swbp_insn(&opcode); 479 uprobe = container_of(auprobe, struct uprobe, arch); 480 481 retry: 482 if (is_register) 483 gup_flags |= FOLL_SPLIT_PMD; 484 /* Read the page with vaddr into memory */ 485 old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma); 486 if (IS_ERR(old_page)) 487 return PTR_ERR(old_page); 488 489 ret = verify_opcode(old_page, vaddr, &opcode); 490 if (ret <= 0) 491 goto put_old; 492 493 if (WARN(!is_register && PageCompound(old_page), 494 "uprobe unregister should never work on compound page\n")) { 495 ret = -EINVAL; 496 goto put_old; 497 } 498 499 /* We are going to replace instruction, update ref_ctr. */ 500 if (!ref_ctr_updated && uprobe->ref_ctr_offset) { 501 ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); 502 if (ret) 503 goto put_old; 504 505 ref_ctr_updated = 1; 506 } 507 508 ret = 0; 509 if (!is_register && !PageAnon(old_page)) 510 goto put_old; 511 512 ret = anon_vma_prepare(vma); 513 if (ret) 514 goto put_old; 515 516 ret = -ENOMEM; 517 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 518 if (!new_page) 519 goto put_old; 520 521 __SetPageUptodate(new_page); 522 copy_highpage(new_page, old_page); 523 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 524 525 if (!is_register) { 526 struct page *orig_page; 527 pgoff_t index; 528 529 VM_BUG_ON_PAGE(!PageAnon(old_page), old_page); 530 531 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT; 532 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping, 533 index); 534 535 if (orig_page) { 536 if (PageUptodate(orig_page) && 537 pages_identical(new_page, orig_page)) { 538 /* let go new_page */ 539 put_page(new_page); 540 new_page = NULL; 541 542 if (PageCompound(orig_page)) 543 orig_page_huge = true; 544 } 545 put_page(orig_page); 546 } 547 } 548 549 ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page); 550 if (new_page) 551 put_page(new_page); 552 put_old: 553 put_page(old_page); 554 555 if (unlikely(ret == -EAGAIN)) 556 goto retry; 557 558 /* Revert back reference counter if instruction update failed. */ 559 if (ret && is_register && ref_ctr_updated) 560 update_ref_ctr(uprobe, mm, -1); 561 562 /* try collapse pmd for compound page */ 563 if (!ret && orig_page_huge) 564 collapse_pte_mapped_thp(mm, vaddr, false); 565 566 return ret; 567 } 568 569 /** 570 * set_swbp - store breakpoint at a given address. 571 * @auprobe: arch specific probepoint information. 572 * @mm: the probed process address space. 573 * @vaddr: the virtual address to insert the opcode. 574 * 575 * For mm @mm, store the breakpoint instruction at @vaddr. 576 * Return 0 (success) or a negative errno. 577 */ 578 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 579 { 580 return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); 581 } 582 583 /** 584 * set_orig_insn - Restore the original instruction. 585 * @mm: the probed process address space. 586 * @auprobe: arch specific probepoint information. 587 * @vaddr: the virtual address to insert the opcode. 588 * 589 * For mm @mm, restore the original opcode (opcode) at @vaddr. 590 * Return 0 (success) or a negative errno. 591 */ 592 int __weak 593 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 594 { 595 return uprobe_write_opcode(auprobe, mm, vaddr, 596 *(uprobe_opcode_t *)&auprobe->insn); 597 } 598 599 /* uprobe should have guaranteed positive refcount */ 600 static struct uprobe *get_uprobe(struct uprobe *uprobe) 601 { 602 refcount_inc(&uprobe->ref); 603 return uprobe; 604 } 605 606 /* 607 * uprobe should have guaranteed lifetime, which can be either of: 608 * - caller already has refcount taken (and wants an extra one); 609 * - uprobe is RCU protected and won't be freed until after grace period; 610 * - we are holding uprobes_treelock (for read or write, doesn't matter). 611 */ 612 static struct uprobe *try_get_uprobe(struct uprobe *uprobe) 613 { 614 if (refcount_inc_not_zero(&uprobe->ref)) 615 return uprobe; 616 return NULL; 617 } 618 619 static inline bool uprobe_is_active(struct uprobe *uprobe) 620 { 621 return !RB_EMPTY_NODE(&uprobe->rb_node); 622 } 623 624 static void uprobe_free_rcu(struct rcu_head *rcu) 625 { 626 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); 627 628 kfree(uprobe); 629 } 630 631 static void put_uprobe(struct uprobe *uprobe) 632 { 633 if (!refcount_dec_and_test(&uprobe->ref)) 634 return; 635 636 write_lock(&uprobes_treelock); 637 638 if (uprobe_is_active(uprobe)) { 639 write_seqcount_begin(&uprobes_seqcount); 640 rb_erase(&uprobe->rb_node, &uprobes_tree); 641 write_seqcount_end(&uprobes_seqcount); 642 } 643 644 write_unlock(&uprobes_treelock); 645 646 /* 647 * If application munmap(exec_vma) before uprobe_unregister() 648 * gets called, we don't get a chance to remove uprobe from 649 * delayed_uprobe_list from remove_breakpoint(). Do it here. 650 */ 651 mutex_lock(&delayed_uprobe_lock); 652 delayed_uprobe_remove(uprobe, NULL); 653 mutex_unlock(&delayed_uprobe_lock); 654 655 call_srcu(&uprobes_srcu, &uprobe->rcu, uprobe_free_rcu); 656 } 657 658 static __always_inline 659 int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, 660 const struct uprobe *r) 661 { 662 if (l_inode < r->inode) 663 return -1; 664 665 if (l_inode > r->inode) 666 return 1; 667 668 if (l_offset < r->offset) 669 return -1; 670 671 if (l_offset > r->offset) 672 return 1; 673 674 return 0; 675 } 676 677 #define __node_2_uprobe(node) \ 678 rb_entry((node), struct uprobe, rb_node) 679 680 struct __uprobe_key { 681 struct inode *inode; 682 loff_t offset; 683 }; 684 685 static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b) 686 { 687 const struct __uprobe_key *a = key; 688 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b)); 689 } 690 691 static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b) 692 { 693 struct uprobe *u = __node_2_uprobe(a); 694 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b)); 695 } 696 697 /* 698 * Assumes being inside RCU protected region. 699 * No refcount is taken on returned uprobe. 700 */ 701 static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset) 702 { 703 struct __uprobe_key key = { 704 .inode = inode, 705 .offset = offset, 706 }; 707 struct rb_node *node; 708 unsigned int seq; 709 710 lockdep_assert(srcu_read_lock_held(&uprobes_srcu)); 711 712 do { 713 seq = read_seqcount_begin(&uprobes_seqcount); 714 node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key); 715 /* 716 * Lockless RB-tree lookups can result only in false negatives. 717 * If the element is found, it is correct and can be returned 718 * under RCU protection. If we find nothing, we need to 719 * validate that seqcount didn't change. If it did, we have to 720 * try again as we might have missed the element (false 721 * negative). If seqcount is unchanged, search truly failed. 722 */ 723 if (node) 724 return __node_2_uprobe(node); 725 } while (read_seqcount_retry(&uprobes_seqcount, seq)); 726 727 return NULL; 728 } 729 730 /* 731 * Attempt to insert a new uprobe into uprobes_tree. 732 * 733 * If uprobe already exists (for given inode+offset), we just increment 734 * refcount of previously existing uprobe. 735 * 736 * If not, a provided new instance of uprobe is inserted into the tree (with 737 * assumed initial refcount == 1). 738 * 739 * In any case, we return a uprobe instance that ends up being in uprobes_tree. 740 * Caller has to clean up new uprobe instance, if it ended up not being 741 * inserted into the tree. 742 * 743 * We assume that uprobes_treelock is held for writing. 744 */ 745 static struct uprobe *__insert_uprobe(struct uprobe *uprobe) 746 { 747 struct rb_node *node; 748 again: 749 node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp); 750 if (node) { 751 struct uprobe *u = __node_2_uprobe(node); 752 753 if (!try_get_uprobe(u)) { 754 rb_erase(node, &uprobes_tree); 755 RB_CLEAR_NODE(&u->rb_node); 756 goto again; 757 } 758 759 return u; 760 } 761 762 return uprobe; 763 } 764 765 /* 766 * Acquire uprobes_treelock and insert uprobe into uprobes_tree 767 * (or reuse existing one, see __insert_uprobe() comments above). 768 */ 769 static struct uprobe *insert_uprobe(struct uprobe *uprobe) 770 { 771 struct uprobe *u; 772 773 write_lock(&uprobes_treelock); 774 write_seqcount_begin(&uprobes_seqcount); 775 u = __insert_uprobe(uprobe); 776 write_seqcount_end(&uprobes_seqcount); 777 write_unlock(&uprobes_treelock); 778 779 return u; 780 } 781 782 static void 783 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) 784 { 785 pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " 786 "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n", 787 uprobe->inode->i_ino, (unsigned long long) uprobe->offset, 788 (unsigned long long) cur_uprobe->ref_ctr_offset, 789 (unsigned long long) uprobe->ref_ctr_offset); 790 } 791 792 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, 793 loff_t ref_ctr_offset) 794 { 795 struct uprobe *uprobe, *cur_uprobe; 796 797 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); 798 if (!uprobe) 799 return ERR_PTR(-ENOMEM); 800 801 uprobe->inode = inode; 802 uprobe->offset = offset; 803 uprobe->ref_ctr_offset = ref_ctr_offset; 804 INIT_LIST_HEAD(&uprobe->consumers); 805 init_rwsem(&uprobe->register_rwsem); 806 init_rwsem(&uprobe->consumer_rwsem); 807 RB_CLEAR_NODE(&uprobe->rb_node); 808 refcount_set(&uprobe->ref, 1); 809 810 /* add to uprobes_tree, sorted on inode:offset */ 811 cur_uprobe = insert_uprobe(uprobe); 812 /* a uprobe exists for this inode:offset combination */ 813 if (cur_uprobe != uprobe) { 814 if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { 815 ref_ctr_mismatch_warn(cur_uprobe, uprobe); 816 put_uprobe(cur_uprobe); 817 kfree(uprobe); 818 return ERR_PTR(-EINVAL); 819 } 820 kfree(uprobe); 821 uprobe = cur_uprobe; 822 } 823 824 return uprobe; 825 } 826 827 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) 828 { 829 down_write(&uprobe->consumer_rwsem); 830 list_add_rcu(&uc->cons_node, &uprobe->consumers); 831 up_write(&uprobe->consumer_rwsem); 832 } 833 834 /* 835 * For uprobe @uprobe, delete the consumer @uc. 836 * Should never be called with consumer that's not part of @uprobe->consumers. 837 */ 838 static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) 839 { 840 down_write(&uprobe->consumer_rwsem); 841 list_del_rcu(&uc->cons_node); 842 up_write(&uprobe->consumer_rwsem); 843 } 844 845 static int __copy_insn(struct address_space *mapping, struct file *filp, 846 void *insn, int nbytes, loff_t offset) 847 { 848 struct page *page; 849 /* 850 * Ensure that the page that has the original instruction is populated 851 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(), 852 * see uprobe_register(). 853 */ 854 if (mapping->a_ops->read_folio) 855 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); 856 else 857 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 858 if (IS_ERR(page)) 859 return PTR_ERR(page); 860 861 copy_from_page(page, offset, insn, nbytes); 862 put_page(page); 863 864 return 0; 865 } 866 867 static int copy_insn(struct uprobe *uprobe, struct file *filp) 868 { 869 struct address_space *mapping = uprobe->inode->i_mapping; 870 loff_t offs = uprobe->offset; 871 void *insn = &uprobe->arch.insn; 872 int size = sizeof(uprobe->arch.insn); 873 int len, err = -EIO; 874 875 /* Copy only available bytes, -EIO if nothing was read */ 876 do { 877 if (offs >= i_size_read(uprobe->inode)) 878 break; 879 880 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); 881 err = __copy_insn(mapping, filp, insn, len, offs); 882 if (err) 883 break; 884 885 insn += len; 886 offs += len; 887 size -= len; 888 } while (size); 889 890 return err; 891 } 892 893 static int prepare_uprobe(struct uprobe *uprobe, struct file *file, 894 struct mm_struct *mm, unsigned long vaddr) 895 { 896 int ret = 0; 897 898 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 899 return ret; 900 901 /* TODO: move this into _register, until then we abuse this sem. */ 902 down_write(&uprobe->consumer_rwsem); 903 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 904 goto out; 905 906 ret = copy_insn(uprobe, file); 907 if (ret) 908 goto out; 909 910 ret = -ENOTSUPP; 911 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) 912 goto out; 913 914 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); 915 if (ret) 916 goto out; 917 918 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ 919 set_bit(UPROBE_COPY_INSN, &uprobe->flags); 920 921 out: 922 up_write(&uprobe->consumer_rwsem); 923 924 return ret; 925 } 926 927 static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm) 928 { 929 return !uc->filter || uc->filter(uc, mm); 930 } 931 932 static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm) 933 { 934 struct uprobe_consumer *uc; 935 bool ret = false; 936 937 down_read(&uprobe->consumer_rwsem); 938 list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, 939 srcu_read_lock_held(&uprobes_srcu)) { 940 ret = consumer_filter(uc, mm); 941 if (ret) 942 break; 943 } 944 up_read(&uprobe->consumer_rwsem); 945 946 return ret; 947 } 948 949 static int 950 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 951 struct vm_area_struct *vma, unsigned long vaddr) 952 { 953 bool first_uprobe; 954 int ret; 955 956 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); 957 if (ret) 958 return ret; 959 960 /* 961 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 962 * the task can hit this breakpoint right after __replace_page(). 963 */ 964 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); 965 if (first_uprobe) 966 set_bit(MMF_HAS_UPROBES, &mm->flags); 967 968 ret = set_swbp(&uprobe->arch, mm, vaddr); 969 if (!ret) 970 clear_bit(MMF_RECALC_UPROBES, &mm->flags); 971 else if (first_uprobe) 972 clear_bit(MMF_HAS_UPROBES, &mm->flags); 973 974 return ret; 975 } 976 977 static int 978 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 979 { 980 set_bit(MMF_RECALC_UPROBES, &mm->flags); 981 return set_orig_insn(&uprobe->arch, mm, vaddr); 982 } 983 984 struct map_info { 985 struct map_info *next; 986 struct mm_struct *mm; 987 unsigned long vaddr; 988 }; 989 990 static inline struct map_info *free_map_info(struct map_info *info) 991 { 992 struct map_info *next = info->next; 993 kfree(info); 994 return next; 995 } 996 997 static struct map_info * 998 build_map_info(struct address_space *mapping, loff_t offset, bool is_register) 999 { 1000 unsigned long pgoff = offset >> PAGE_SHIFT; 1001 struct vm_area_struct *vma; 1002 struct map_info *curr = NULL; 1003 struct map_info *prev = NULL; 1004 struct map_info *info; 1005 int more = 0; 1006 1007 again: 1008 i_mmap_lock_read(mapping); 1009 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1010 if (!valid_vma(vma, is_register)) 1011 continue; 1012 1013 if (!prev && !more) { 1014 /* 1015 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through 1016 * reclaim. This is optimistic, no harm done if it fails. 1017 */ 1018 prev = kmalloc(sizeof(struct map_info), 1019 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); 1020 if (prev) 1021 prev->next = NULL; 1022 } 1023 if (!prev) { 1024 more++; 1025 continue; 1026 } 1027 1028 if (!mmget_not_zero(vma->vm_mm)) 1029 continue; 1030 1031 info = prev; 1032 prev = prev->next; 1033 info->next = curr; 1034 curr = info; 1035 1036 info->mm = vma->vm_mm; 1037 info->vaddr = offset_to_vaddr(vma, offset); 1038 } 1039 i_mmap_unlock_read(mapping); 1040 1041 if (!more) 1042 goto out; 1043 1044 prev = curr; 1045 while (curr) { 1046 mmput(curr->mm); 1047 curr = curr->next; 1048 } 1049 1050 do { 1051 info = kmalloc(sizeof(struct map_info), GFP_KERNEL); 1052 if (!info) { 1053 curr = ERR_PTR(-ENOMEM); 1054 goto out; 1055 } 1056 info->next = prev; 1057 prev = info; 1058 } while (--more); 1059 1060 goto again; 1061 out: 1062 while (prev) 1063 prev = free_map_info(prev); 1064 return curr; 1065 } 1066 1067 static int 1068 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) 1069 { 1070 bool is_register = !!new; 1071 struct map_info *info; 1072 int err = 0; 1073 1074 percpu_down_write(&dup_mmap_sem); 1075 info = build_map_info(uprobe->inode->i_mapping, 1076 uprobe->offset, is_register); 1077 if (IS_ERR(info)) { 1078 err = PTR_ERR(info); 1079 goto out; 1080 } 1081 1082 while (info) { 1083 struct mm_struct *mm = info->mm; 1084 struct vm_area_struct *vma; 1085 1086 if (err && is_register) 1087 goto free; 1088 /* 1089 * We take mmap_lock for writing to avoid the race with 1090 * find_active_uprobe_rcu() which takes mmap_lock for reading. 1091 * Thus this install_breakpoint() can not make 1092 * is_trap_at_addr() true right after find_uprobe_rcu() 1093 * returns NULL in find_active_uprobe_rcu(). 1094 */ 1095 mmap_write_lock(mm); 1096 vma = find_vma(mm, info->vaddr); 1097 if (!vma || !valid_vma(vma, is_register) || 1098 file_inode(vma->vm_file) != uprobe->inode) 1099 goto unlock; 1100 1101 if (vma->vm_start > info->vaddr || 1102 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) 1103 goto unlock; 1104 1105 if (is_register) { 1106 /* consult only the "caller", new consumer. */ 1107 if (consumer_filter(new, mm)) 1108 err = install_breakpoint(uprobe, mm, vma, info->vaddr); 1109 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { 1110 if (!filter_chain(uprobe, mm)) 1111 err |= remove_breakpoint(uprobe, mm, info->vaddr); 1112 } 1113 1114 unlock: 1115 mmap_write_unlock(mm); 1116 free: 1117 mmput(mm); 1118 info = free_map_info(info); 1119 } 1120 out: 1121 percpu_up_write(&dup_mmap_sem); 1122 return err; 1123 } 1124 1125 /** 1126 * uprobe_unregister_nosync - unregister an already registered probe. 1127 * @uprobe: uprobe to remove 1128 * @uc: identify which probe if multiple probes are colocated. 1129 */ 1130 void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) 1131 { 1132 int err; 1133 1134 down_write(&uprobe->register_rwsem); 1135 consumer_del(uprobe, uc); 1136 err = register_for_each_vma(uprobe, NULL); 1137 up_write(&uprobe->register_rwsem); 1138 1139 /* TODO : cant unregister? schedule a worker thread */ 1140 if (unlikely(err)) { 1141 uprobe_warn(current, "unregister, leaking uprobe"); 1142 return; 1143 } 1144 1145 put_uprobe(uprobe); 1146 } 1147 EXPORT_SYMBOL_GPL(uprobe_unregister_nosync); 1148 1149 void uprobe_unregister_sync(void) 1150 { 1151 /* 1152 * Now that handler_chain() and handle_uretprobe_chain() iterate over 1153 * uprobe->consumers list under RCU protection without holding 1154 * uprobe->register_rwsem, we need to wait for RCU grace period to 1155 * make sure that we can't call into just unregistered 1156 * uprobe_consumer's callbacks anymore. If we don't do that, fast and 1157 * unlucky enough caller can free consumer's memory and cause 1158 * handler_chain() or handle_uretprobe_chain() to do an use-after-free. 1159 */ 1160 synchronize_srcu(&uprobes_srcu); 1161 } 1162 EXPORT_SYMBOL_GPL(uprobe_unregister_sync); 1163 1164 /** 1165 * uprobe_register - register a probe 1166 * @inode: the file in which the probe has to be placed. 1167 * @offset: offset from the start of the file. 1168 * @ref_ctr_offset: offset of SDT marker / reference counter 1169 * @uc: information on howto handle the probe.. 1170 * 1171 * Apart from the access refcount, uprobe_register() takes a creation 1172 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting 1173 * inserted into the rbtree (i.e first consumer for a @inode:@offset 1174 * tuple). Creation refcount stops uprobe_unregister from freeing the 1175 * @uprobe even before the register operation is complete. Creation 1176 * refcount is released when the last @uc for the @uprobe 1177 * unregisters. Caller of uprobe_register() is required to keep @inode 1178 * (and the containing mount) referenced. 1179 * 1180 * Return: pointer to the new uprobe on success or an ERR_PTR on failure. 1181 */ 1182 struct uprobe *uprobe_register(struct inode *inode, 1183 loff_t offset, loff_t ref_ctr_offset, 1184 struct uprobe_consumer *uc) 1185 { 1186 struct uprobe *uprobe; 1187 int ret; 1188 1189 /* Uprobe must have at least one set consumer */ 1190 if (!uc->handler && !uc->ret_handler) 1191 return ERR_PTR(-EINVAL); 1192 1193 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ 1194 if (!inode->i_mapping->a_ops->read_folio && 1195 !shmem_mapping(inode->i_mapping)) 1196 return ERR_PTR(-EIO); 1197 /* Racy, just to catch the obvious mistakes */ 1198 if (offset > i_size_read(inode)) 1199 return ERR_PTR(-EINVAL); 1200 1201 /* 1202 * This ensures that copy_from_page(), copy_to_page() and 1203 * __update_ref_ctr() can't cross page boundary. 1204 */ 1205 if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) 1206 return ERR_PTR(-EINVAL); 1207 if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) 1208 return ERR_PTR(-EINVAL); 1209 1210 uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); 1211 if (IS_ERR(uprobe)) 1212 return uprobe; 1213 1214 down_write(&uprobe->register_rwsem); 1215 consumer_add(uprobe, uc); 1216 ret = register_for_each_vma(uprobe, uc); 1217 up_write(&uprobe->register_rwsem); 1218 1219 if (ret) { 1220 uprobe_unregister_nosync(uprobe, uc); 1221 /* 1222 * Registration might have partially succeeded, so we can have 1223 * this consumer being called right at this time. We need to 1224 * sync here. It's ok, it's unlikely slow path. 1225 */ 1226 uprobe_unregister_sync(); 1227 return ERR_PTR(ret); 1228 } 1229 1230 return uprobe; 1231 } 1232 EXPORT_SYMBOL_GPL(uprobe_register); 1233 1234 /** 1235 * uprobe_apply - add or remove the breakpoints according to @uc->filter 1236 * @uprobe: uprobe which "owns" the breakpoint 1237 * @uc: consumer which wants to add more or remove some breakpoints 1238 * @add: add or remove the breakpoints 1239 * Return: 0 on success or negative error code. 1240 */ 1241 int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add) 1242 { 1243 struct uprobe_consumer *con; 1244 int ret = -ENOENT, srcu_idx; 1245 1246 down_write(&uprobe->register_rwsem); 1247 1248 srcu_idx = srcu_read_lock(&uprobes_srcu); 1249 list_for_each_entry_srcu(con, &uprobe->consumers, cons_node, 1250 srcu_read_lock_held(&uprobes_srcu)) { 1251 if (con == uc) { 1252 ret = register_for_each_vma(uprobe, add ? uc : NULL); 1253 break; 1254 } 1255 } 1256 srcu_read_unlock(&uprobes_srcu, srcu_idx); 1257 1258 up_write(&uprobe->register_rwsem); 1259 1260 return ret; 1261 } 1262 1263 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) 1264 { 1265 VMA_ITERATOR(vmi, mm, 0); 1266 struct vm_area_struct *vma; 1267 int err = 0; 1268 1269 mmap_read_lock(mm); 1270 for_each_vma(vmi, vma) { 1271 unsigned long vaddr; 1272 loff_t offset; 1273 1274 if (!valid_vma(vma, false) || 1275 file_inode(vma->vm_file) != uprobe->inode) 1276 continue; 1277 1278 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1279 if (uprobe->offset < offset || 1280 uprobe->offset >= offset + vma->vm_end - vma->vm_start) 1281 continue; 1282 1283 vaddr = offset_to_vaddr(vma, uprobe->offset); 1284 err |= remove_breakpoint(uprobe, mm, vaddr); 1285 } 1286 mmap_read_unlock(mm); 1287 1288 return err; 1289 } 1290 1291 static struct rb_node * 1292 find_node_in_range(struct inode *inode, loff_t min, loff_t max) 1293 { 1294 struct rb_node *n = uprobes_tree.rb_node; 1295 1296 while (n) { 1297 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); 1298 1299 if (inode < u->inode) { 1300 n = n->rb_left; 1301 } else if (inode > u->inode) { 1302 n = n->rb_right; 1303 } else { 1304 if (max < u->offset) 1305 n = n->rb_left; 1306 else if (min > u->offset) 1307 n = n->rb_right; 1308 else 1309 break; 1310 } 1311 } 1312 1313 return n; 1314 } 1315 1316 /* 1317 * For a given range in vma, build a list of probes that need to be inserted. 1318 */ 1319 static void build_probe_list(struct inode *inode, 1320 struct vm_area_struct *vma, 1321 unsigned long start, unsigned long end, 1322 struct list_head *head) 1323 { 1324 loff_t min, max; 1325 struct rb_node *n, *t; 1326 struct uprobe *u; 1327 1328 INIT_LIST_HEAD(head); 1329 min = vaddr_to_offset(vma, start); 1330 max = min + (end - start) - 1; 1331 1332 read_lock(&uprobes_treelock); 1333 n = find_node_in_range(inode, min, max); 1334 if (n) { 1335 for (t = n; t; t = rb_prev(t)) { 1336 u = rb_entry(t, struct uprobe, rb_node); 1337 if (u->inode != inode || u->offset < min) 1338 break; 1339 /* if uprobe went away, it's safe to ignore it */ 1340 if (try_get_uprobe(u)) 1341 list_add(&u->pending_list, head); 1342 } 1343 for (t = n; (t = rb_next(t)); ) { 1344 u = rb_entry(t, struct uprobe, rb_node); 1345 if (u->inode != inode || u->offset > max) 1346 break; 1347 /* if uprobe went away, it's safe to ignore it */ 1348 if (try_get_uprobe(u)) 1349 list_add(&u->pending_list, head); 1350 } 1351 } 1352 read_unlock(&uprobes_treelock); 1353 } 1354 1355 /* @vma contains reference counter, not the probed instruction. */ 1356 static int delayed_ref_ctr_inc(struct vm_area_struct *vma) 1357 { 1358 struct list_head *pos, *q; 1359 struct delayed_uprobe *du; 1360 unsigned long vaddr; 1361 int ret = 0, err = 0; 1362 1363 mutex_lock(&delayed_uprobe_lock); 1364 list_for_each_safe(pos, q, &delayed_uprobe_list) { 1365 du = list_entry(pos, struct delayed_uprobe, list); 1366 1367 if (du->mm != vma->vm_mm || 1368 !valid_ref_ctr_vma(du->uprobe, vma)) 1369 continue; 1370 1371 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); 1372 ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); 1373 if (ret) { 1374 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); 1375 if (!err) 1376 err = ret; 1377 } 1378 delayed_uprobe_delete(du); 1379 } 1380 mutex_unlock(&delayed_uprobe_lock); 1381 return err; 1382 } 1383 1384 /* 1385 * Called from mmap_region/vma_merge with mm->mmap_lock acquired. 1386 * 1387 * Currently we ignore all errors and always return 0, the callers 1388 * can't handle the failure anyway. 1389 */ 1390 int uprobe_mmap(struct vm_area_struct *vma) 1391 { 1392 struct list_head tmp_list; 1393 struct uprobe *uprobe, *u; 1394 struct inode *inode; 1395 1396 if (no_uprobe_events()) 1397 return 0; 1398 1399 if (vma->vm_file && 1400 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && 1401 test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) 1402 delayed_ref_ctr_inc(vma); 1403 1404 if (!valid_vma(vma, true)) 1405 return 0; 1406 1407 inode = file_inode(vma->vm_file); 1408 if (!inode) 1409 return 0; 1410 1411 mutex_lock(uprobes_mmap_hash(inode)); 1412 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); 1413 /* 1414 * We can race with uprobe_unregister(), this uprobe can be already 1415 * removed. But in this case filter_chain() must return false, all 1416 * consumers have gone away. 1417 */ 1418 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 1419 if (!fatal_signal_pending(current) && 1420 filter_chain(uprobe, vma->vm_mm)) { 1421 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); 1422 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); 1423 } 1424 put_uprobe(uprobe); 1425 } 1426 mutex_unlock(uprobes_mmap_hash(inode)); 1427 1428 return 0; 1429 } 1430 1431 static bool 1432 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1433 { 1434 loff_t min, max; 1435 struct inode *inode; 1436 struct rb_node *n; 1437 1438 inode = file_inode(vma->vm_file); 1439 1440 min = vaddr_to_offset(vma, start); 1441 max = min + (end - start) - 1; 1442 1443 read_lock(&uprobes_treelock); 1444 n = find_node_in_range(inode, min, max); 1445 read_unlock(&uprobes_treelock); 1446 1447 return !!n; 1448 } 1449 1450 /* 1451 * Called in context of a munmap of a vma. 1452 */ 1453 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1454 { 1455 if (no_uprobe_events() || !valid_vma(vma, false)) 1456 return; 1457 1458 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1459 return; 1460 1461 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || 1462 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) 1463 return; 1464 1465 if (vma_has_uprobes(vma, start, end)) 1466 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); 1467 } 1468 1469 /* Slot allocation for XOL */ 1470 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) 1471 { 1472 struct vm_area_struct *vma; 1473 int ret; 1474 1475 if (mmap_write_lock_killable(mm)) 1476 return -EINTR; 1477 1478 if (mm->uprobes_state.xol_area) { 1479 ret = -EALREADY; 1480 goto fail; 1481 } 1482 1483 if (!area->vaddr) { 1484 /* Try to map as high as possible, this is only a hint. */ 1485 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, 1486 PAGE_SIZE, 0, 0); 1487 if (IS_ERR_VALUE(area->vaddr)) { 1488 ret = area->vaddr; 1489 goto fail; 1490 } 1491 } 1492 1493 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, 1494 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, 1495 &area->xol_mapping); 1496 if (IS_ERR(vma)) { 1497 ret = PTR_ERR(vma); 1498 goto fail; 1499 } 1500 1501 ret = 0; 1502 /* pairs with get_xol_area() */ 1503 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ 1504 fail: 1505 mmap_write_unlock(mm); 1506 1507 return ret; 1508 } 1509 1510 void * __weak arch_uprobe_trampoline(unsigned long *psize) 1511 { 1512 static uprobe_opcode_t insn = UPROBE_SWBP_INSN; 1513 1514 *psize = UPROBE_SWBP_INSN_SIZE; 1515 return &insn; 1516 } 1517 1518 static struct xol_area *__create_xol_area(unsigned long vaddr) 1519 { 1520 struct mm_struct *mm = current->mm; 1521 unsigned long insns_size; 1522 struct xol_area *area; 1523 void *insns; 1524 1525 area = kzalloc(sizeof(*area), GFP_KERNEL); 1526 if (unlikely(!area)) 1527 goto out; 1528 1529 area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long), 1530 GFP_KERNEL); 1531 if (!area->bitmap) 1532 goto free_area; 1533 1534 area->xol_mapping.name = "[uprobes]"; 1535 area->xol_mapping.pages = area->pages; 1536 area->pages[0] = alloc_page(GFP_HIGHUSER); 1537 if (!area->pages[0]) 1538 goto free_bitmap; 1539 area->pages[1] = NULL; 1540 1541 area->vaddr = vaddr; 1542 init_waitqueue_head(&area->wq); 1543 /* Reserve the 1st slot for get_trampoline_vaddr() */ 1544 set_bit(0, area->bitmap); 1545 atomic_set(&area->slot_count, 1); 1546 insns = arch_uprobe_trampoline(&insns_size); 1547 arch_uprobe_copy_ixol(area->pages[0], 0, insns, insns_size); 1548 1549 if (!xol_add_vma(mm, area)) 1550 return area; 1551 1552 __free_page(area->pages[0]); 1553 free_bitmap: 1554 kfree(area->bitmap); 1555 free_area: 1556 kfree(area); 1557 out: 1558 return NULL; 1559 } 1560 1561 /* 1562 * get_xol_area - Allocate process's xol_area if necessary. 1563 * This area will be used for storing instructions for execution out of line. 1564 * 1565 * Returns the allocated area or NULL. 1566 */ 1567 static struct xol_area *get_xol_area(void) 1568 { 1569 struct mm_struct *mm = current->mm; 1570 struct xol_area *area; 1571 1572 if (!mm->uprobes_state.xol_area) 1573 __create_xol_area(0); 1574 1575 /* Pairs with xol_add_vma() smp_store_release() */ 1576 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ 1577 return area; 1578 } 1579 1580 /* 1581 * uprobe_clear_state - Free the area allocated for slots. 1582 */ 1583 void uprobe_clear_state(struct mm_struct *mm) 1584 { 1585 struct xol_area *area = mm->uprobes_state.xol_area; 1586 1587 mutex_lock(&delayed_uprobe_lock); 1588 delayed_uprobe_remove(NULL, mm); 1589 mutex_unlock(&delayed_uprobe_lock); 1590 1591 if (!area) 1592 return; 1593 1594 put_page(area->pages[0]); 1595 kfree(area->bitmap); 1596 kfree(area); 1597 } 1598 1599 void uprobe_start_dup_mmap(void) 1600 { 1601 percpu_down_read(&dup_mmap_sem); 1602 } 1603 1604 void uprobe_end_dup_mmap(void) 1605 { 1606 percpu_up_read(&dup_mmap_sem); 1607 } 1608 1609 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) 1610 { 1611 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { 1612 set_bit(MMF_HAS_UPROBES, &newmm->flags); 1613 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ 1614 set_bit(MMF_RECALC_UPROBES, &newmm->flags); 1615 } 1616 } 1617 1618 /* 1619 * - search for a free slot. 1620 */ 1621 static unsigned long xol_take_insn_slot(struct xol_area *area) 1622 { 1623 unsigned long slot_addr; 1624 int slot_nr; 1625 1626 do { 1627 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); 1628 if (slot_nr < UINSNS_PER_PAGE) { 1629 if (!test_and_set_bit(slot_nr, area->bitmap)) 1630 break; 1631 1632 slot_nr = UINSNS_PER_PAGE; 1633 continue; 1634 } 1635 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); 1636 } while (slot_nr >= UINSNS_PER_PAGE); 1637 1638 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); 1639 atomic_inc(&area->slot_count); 1640 1641 return slot_addr; 1642 } 1643 1644 /* 1645 * xol_get_insn_slot - allocate a slot for xol. 1646 * Returns the allocated slot address or 0. 1647 */ 1648 static unsigned long xol_get_insn_slot(struct uprobe *uprobe) 1649 { 1650 struct xol_area *area; 1651 unsigned long xol_vaddr; 1652 1653 area = get_xol_area(); 1654 if (!area) 1655 return 0; 1656 1657 xol_vaddr = xol_take_insn_slot(area); 1658 if (unlikely(!xol_vaddr)) 1659 return 0; 1660 1661 arch_uprobe_copy_ixol(area->pages[0], xol_vaddr, 1662 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); 1663 1664 return xol_vaddr; 1665 } 1666 1667 /* 1668 * xol_free_insn_slot - If slot was earlier allocated by 1669 * @xol_get_insn_slot(), make the slot available for 1670 * subsequent requests. 1671 */ 1672 static void xol_free_insn_slot(struct task_struct *tsk) 1673 { 1674 struct xol_area *area; 1675 unsigned long vma_end; 1676 unsigned long slot_addr; 1677 1678 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) 1679 return; 1680 1681 slot_addr = tsk->utask->xol_vaddr; 1682 if (unlikely(!slot_addr)) 1683 return; 1684 1685 area = tsk->mm->uprobes_state.xol_area; 1686 vma_end = area->vaddr + PAGE_SIZE; 1687 if (area->vaddr <= slot_addr && slot_addr < vma_end) { 1688 unsigned long offset; 1689 int slot_nr; 1690 1691 offset = slot_addr - area->vaddr; 1692 slot_nr = offset / UPROBE_XOL_SLOT_BYTES; 1693 if (slot_nr >= UINSNS_PER_PAGE) 1694 return; 1695 1696 clear_bit(slot_nr, area->bitmap); 1697 atomic_dec(&area->slot_count); 1698 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ 1699 if (waitqueue_active(&area->wq)) 1700 wake_up(&area->wq); 1701 1702 tsk->utask->xol_vaddr = 0; 1703 } 1704 } 1705 1706 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 1707 void *src, unsigned long len) 1708 { 1709 /* Initialize the slot */ 1710 copy_to_page(page, vaddr, src, len); 1711 1712 /* 1713 * We probably need flush_icache_user_page() but it needs vma. 1714 * This should work on most of architectures by default. If 1715 * architecture needs to do something different it can define 1716 * its own version of the function. 1717 */ 1718 flush_dcache_page(page); 1719 } 1720 1721 /** 1722 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 1723 * @regs: Reflects the saved state of the task after it has hit a breakpoint 1724 * instruction. 1725 * Return the address of the breakpoint instruction. 1726 */ 1727 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) 1728 { 1729 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; 1730 } 1731 1732 unsigned long uprobe_get_trap_addr(struct pt_regs *regs) 1733 { 1734 struct uprobe_task *utask = current->utask; 1735 1736 if (unlikely(utask && utask->active_uprobe)) 1737 return utask->vaddr; 1738 1739 return instruction_pointer(regs); 1740 } 1741 1742 static struct return_instance *free_ret_instance(struct return_instance *ri) 1743 { 1744 struct return_instance *next = ri->next; 1745 put_uprobe(ri->uprobe); 1746 kfree(ri); 1747 return next; 1748 } 1749 1750 /* 1751 * Called with no locks held. 1752 * Called in context of an exiting or an exec-ing thread. 1753 */ 1754 void uprobe_free_utask(struct task_struct *t) 1755 { 1756 struct uprobe_task *utask = t->utask; 1757 struct return_instance *ri; 1758 1759 if (!utask) 1760 return; 1761 1762 if (utask->active_uprobe) 1763 put_uprobe(utask->active_uprobe); 1764 1765 ri = utask->return_instances; 1766 while (ri) 1767 ri = free_ret_instance(ri); 1768 1769 xol_free_insn_slot(t); 1770 kfree(utask); 1771 t->utask = NULL; 1772 } 1773 1774 /* 1775 * Allocate a uprobe_task object for the task if necessary. 1776 * Called when the thread hits a breakpoint. 1777 * 1778 * Returns: 1779 * - pointer to new uprobe_task on success 1780 * - NULL otherwise 1781 */ 1782 static struct uprobe_task *get_utask(void) 1783 { 1784 if (!current->utask) 1785 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 1786 return current->utask; 1787 } 1788 1789 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) 1790 { 1791 struct uprobe_task *n_utask; 1792 struct return_instance **p, *o, *n; 1793 1794 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 1795 if (!n_utask) 1796 return -ENOMEM; 1797 t->utask = n_utask; 1798 1799 p = &n_utask->return_instances; 1800 for (o = o_utask->return_instances; o; o = o->next) { 1801 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 1802 if (!n) 1803 return -ENOMEM; 1804 1805 *n = *o; 1806 /* 1807 * uprobe's refcnt has to be positive at this point, kept by 1808 * utask->return_instances items; return_instances can't be 1809 * removed right now, as task is blocked due to duping; so 1810 * get_uprobe() is safe to use here. 1811 */ 1812 get_uprobe(n->uprobe); 1813 n->next = NULL; 1814 1815 *p = n; 1816 p = &n->next; 1817 n_utask->depth++; 1818 } 1819 1820 return 0; 1821 } 1822 1823 static void dup_xol_work(struct callback_head *work) 1824 { 1825 if (current->flags & PF_EXITING) 1826 return; 1827 1828 if (!__create_xol_area(current->utask->dup_xol_addr) && 1829 !fatal_signal_pending(current)) 1830 uprobe_warn(current, "dup xol area"); 1831 } 1832 1833 /* 1834 * Called in context of a new clone/fork from copy_process. 1835 */ 1836 void uprobe_copy_process(struct task_struct *t, unsigned long flags) 1837 { 1838 struct uprobe_task *utask = current->utask; 1839 struct mm_struct *mm = current->mm; 1840 struct xol_area *area; 1841 1842 t->utask = NULL; 1843 1844 if (!utask || !utask->return_instances) 1845 return; 1846 1847 if (mm == t->mm && !(flags & CLONE_VFORK)) 1848 return; 1849 1850 if (dup_utask(t, utask)) 1851 return uprobe_warn(t, "dup ret instances"); 1852 1853 /* The task can fork() after dup_xol_work() fails */ 1854 area = mm->uprobes_state.xol_area; 1855 if (!area) 1856 return uprobe_warn(t, "dup xol area"); 1857 1858 if (mm == t->mm) 1859 return; 1860 1861 t->utask->dup_xol_addr = area->vaddr; 1862 init_task_work(&t->utask->dup_xol_work, dup_xol_work); 1863 task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME); 1864 } 1865 1866 /* 1867 * Current area->vaddr notion assume the trampoline address is always 1868 * equal area->vaddr. 1869 * 1870 * Returns -1 in case the xol_area is not allocated. 1871 */ 1872 unsigned long uprobe_get_trampoline_vaddr(void) 1873 { 1874 struct xol_area *area; 1875 unsigned long trampoline_vaddr = -1; 1876 1877 /* Pairs with xol_add_vma() smp_store_release() */ 1878 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ 1879 if (area) 1880 trampoline_vaddr = area->vaddr; 1881 1882 return trampoline_vaddr; 1883 } 1884 1885 static void cleanup_return_instances(struct uprobe_task *utask, bool chained, 1886 struct pt_regs *regs) 1887 { 1888 struct return_instance *ri = utask->return_instances; 1889 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; 1890 1891 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { 1892 ri = free_ret_instance(ri); 1893 utask->depth--; 1894 } 1895 utask->return_instances = ri; 1896 } 1897 1898 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) 1899 { 1900 struct return_instance *ri; 1901 struct uprobe_task *utask; 1902 unsigned long orig_ret_vaddr, trampoline_vaddr; 1903 bool chained; 1904 1905 if (!get_xol_area()) 1906 return; 1907 1908 utask = get_utask(); 1909 if (!utask) 1910 return; 1911 1912 if (utask->depth >= MAX_URETPROBE_DEPTH) { 1913 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" 1914 " nestedness limit pid/tgid=%d/%d\n", 1915 current->pid, current->tgid); 1916 return; 1917 } 1918 1919 /* we need to bump refcount to store uprobe in utask */ 1920 if (!try_get_uprobe(uprobe)) 1921 return; 1922 1923 ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 1924 if (!ri) 1925 goto fail; 1926 1927 trampoline_vaddr = uprobe_get_trampoline_vaddr(); 1928 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); 1929 if (orig_ret_vaddr == -1) 1930 goto fail; 1931 1932 /* drop the entries invalidated by longjmp() */ 1933 chained = (orig_ret_vaddr == trampoline_vaddr); 1934 cleanup_return_instances(utask, chained, regs); 1935 1936 /* 1937 * We don't want to keep trampoline address in stack, rather keep the 1938 * original return address of first caller thru all the consequent 1939 * instances. This also makes breakpoint unwrapping easier. 1940 */ 1941 if (chained) { 1942 if (!utask->return_instances) { 1943 /* 1944 * This situation is not possible. Likely we have an 1945 * attack from user-space. 1946 */ 1947 uprobe_warn(current, "handle tail call"); 1948 goto fail; 1949 } 1950 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; 1951 } 1952 ri->uprobe = uprobe; 1953 ri->func = instruction_pointer(regs); 1954 ri->stack = user_stack_pointer(regs); 1955 ri->orig_ret_vaddr = orig_ret_vaddr; 1956 ri->chained = chained; 1957 1958 utask->depth++; 1959 ri->next = utask->return_instances; 1960 utask->return_instances = ri; 1961 1962 return; 1963 fail: 1964 kfree(ri); 1965 put_uprobe(uprobe); 1966 } 1967 1968 /* Prepare to single-step probed instruction out of line. */ 1969 static int 1970 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) 1971 { 1972 struct uprobe_task *utask; 1973 unsigned long xol_vaddr; 1974 int err; 1975 1976 utask = get_utask(); 1977 if (!utask) 1978 return -ENOMEM; 1979 1980 if (!try_get_uprobe(uprobe)) 1981 return -EINVAL; 1982 1983 xol_vaddr = xol_get_insn_slot(uprobe); 1984 if (!xol_vaddr) { 1985 err = -ENOMEM; 1986 goto err_out; 1987 } 1988 1989 utask->xol_vaddr = xol_vaddr; 1990 utask->vaddr = bp_vaddr; 1991 1992 err = arch_uprobe_pre_xol(&uprobe->arch, regs); 1993 if (unlikely(err)) { 1994 xol_free_insn_slot(current); 1995 goto err_out; 1996 } 1997 1998 utask->active_uprobe = uprobe; 1999 utask->state = UTASK_SSTEP; 2000 return 0; 2001 err_out: 2002 put_uprobe(uprobe); 2003 return err; 2004 } 2005 2006 /* 2007 * If we are singlestepping, then ensure this thread is not connected to 2008 * non-fatal signals until completion of singlestep. When xol insn itself 2009 * triggers the signal, restart the original insn even if the task is 2010 * already SIGKILL'ed (since coredump should report the correct ip). This 2011 * is even more important if the task has a handler for SIGSEGV/etc, The 2012 * _same_ instruction should be repeated again after return from the signal 2013 * handler, and SSTEP can never finish in this case. 2014 */ 2015 bool uprobe_deny_signal(void) 2016 { 2017 struct task_struct *t = current; 2018 struct uprobe_task *utask = t->utask; 2019 2020 if (likely(!utask || !utask->active_uprobe)) 2021 return false; 2022 2023 WARN_ON_ONCE(utask->state != UTASK_SSTEP); 2024 2025 if (task_sigpending(t)) { 2026 spin_lock_irq(&t->sighand->siglock); 2027 clear_tsk_thread_flag(t, TIF_SIGPENDING); 2028 spin_unlock_irq(&t->sighand->siglock); 2029 2030 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 2031 utask->state = UTASK_SSTEP_TRAPPED; 2032 set_tsk_thread_flag(t, TIF_UPROBE); 2033 } 2034 } 2035 2036 return true; 2037 } 2038 2039 static void mmf_recalc_uprobes(struct mm_struct *mm) 2040 { 2041 VMA_ITERATOR(vmi, mm, 0); 2042 struct vm_area_struct *vma; 2043 2044 for_each_vma(vmi, vma) { 2045 if (!valid_vma(vma, false)) 2046 continue; 2047 /* 2048 * This is not strictly accurate, we can race with 2049 * uprobe_unregister() and see the already removed 2050 * uprobe if delete_uprobe() was not yet called. 2051 * Or this uprobe can be filtered out. 2052 */ 2053 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) 2054 return; 2055 } 2056 2057 clear_bit(MMF_HAS_UPROBES, &mm->flags); 2058 } 2059 2060 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) 2061 { 2062 struct page *page; 2063 uprobe_opcode_t opcode; 2064 int result; 2065 2066 if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) 2067 return -EINVAL; 2068 2069 pagefault_disable(); 2070 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); 2071 pagefault_enable(); 2072 2073 if (likely(result == 0)) 2074 goto out; 2075 2076 result = get_user_pages(vaddr, 1, FOLL_FORCE, &page); 2077 if (result < 0) 2078 return result; 2079 2080 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 2081 put_page(page); 2082 out: 2083 /* This needs to return true for any variant of the trap insn */ 2084 return is_trap_insn(&opcode); 2085 } 2086 2087 /* assumes being inside RCU protected region */ 2088 static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp) 2089 { 2090 struct mm_struct *mm = current->mm; 2091 struct uprobe *uprobe = NULL; 2092 struct vm_area_struct *vma; 2093 2094 mmap_read_lock(mm); 2095 vma = vma_lookup(mm, bp_vaddr); 2096 if (vma) { 2097 if (valid_vma(vma, false)) { 2098 struct inode *inode = file_inode(vma->vm_file); 2099 loff_t offset = vaddr_to_offset(vma, bp_vaddr); 2100 2101 uprobe = find_uprobe_rcu(inode, offset); 2102 } 2103 2104 if (!uprobe) 2105 *is_swbp = is_trap_at_addr(mm, bp_vaddr); 2106 } else { 2107 *is_swbp = -EFAULT; 2108 } 2109 2110 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) 2111 mmf_recalc_uprobes(mm); 2112 mmap_read_unlock(mm); 2113 2114 return uprobe; 2115 } 2116 2117 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) 2118 { 2119 struct uprobe_consumer *uc; 2120 int remove = UPROBE_HANDLER_REMOVE; 2121 bool need_prep = false; /* prepare return uprobe, when needed */ 2122 bool has_consumers = false; 2123 2124 current->utask->auprobe = &uprobe->arch; 2125 2126 list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, 2127 srcu_read_lock_held(&uprobes_srcu)) { 2128 int rc = 0; 2129 2130 if (uc->handler) { 2131 rc = uc->handler(uc, regs); 2132 WARN(rc & ~UPROBE_HANDLER_MASK, 2133 "bad rc=0x%x from %ps()\n", rc, uc->handler); 2134 } 2135 2136 if (uc->ret_handler) 2137 need_prep = true; 2138 2139 remove &= rc; 2140 has_consumers = true; 2141 } 2142 current->utask->auprobe = NULL; 2143 2144 if (need_prep && !remove) 2145 prepare_uretprobe(uprobe, regs); /* put bp at return */ 2146 2147 if (remove && has_consumers) { 2148 down_read(&uprobe->register_rwsem); 2149 2150 /* re-check that removal is still required, this time under lock */ 2151 if (!filter_chain(uprobe, current->mm)) { 2152 WARN_ON(!uprobe_is_active(uprobe)); 2153 unapply_uprobe(uprobe, current->mm); 2154 } 2155 2156 up_read(&uprobe->register_rwsem); 2157 } 2158 } 2159 2160 static void 2161 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) 2162 { 2163 struct uprobe *uprobe = ri->uprobe; 2164 struct uprobe_consumer *uc; 2165 int srcu_idx; 2166 2167 srcu_idx = srcu_read_lock(&uprobes_srcu); 2168 list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, 2169 srcu_read_lock_held(&uprobes_srcu)) { 2170 if (uc->ret_handler) 2171 uc->ret_handler(uc, ri->func, regs); 2172 } 2173 srcu_read_unlock(&uprobes_srcu, srcu_idx); 2174 } 2175 2176 static struct return_instance *find_next_ret_chain(struct return_instance *ri) 2177 { 2178 bool chained; 2179 2180 do { 2181 chained = ri->chained; 2182 ri = ri->next; /* can't be NULL if chained */ 2183 } while (chained); 2184 2185 return ri; 2186 } 2187 2188 void uprobe_handle_trampoline(struct pt_regs *regs) 2189 { 2190 struct uprobe_task *utask; 2191 struct return_instance *ri, *next; 2192 bool valid; 2193 2194 utask = current->utask; 2195 if (!utask) 2196 goto sigill; 2197 2198 ri = utask->return_instances; 2199 if (!ri) 2200 goto sigill; 2201 2202 do { 2203 /* 2204 * We should throw out the frames invalidated by longjmp(). 2205 * If this chain is valid, then the next one should be alive 2206 * or NULL; the latter case means that nobody but ri->func 2207 * could hit this trampoline on return. TODO: sigaltstack(). 2208 */ 2209 next = find_next_ret_chain(ri); 2210 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs); 2211 2212 instruction_pointer_set(regs, ri->orig_ret_vaddr); 2213 do { 2214 /* pop current instance from the stack of pending return instances, 2215 * as it's not pending anymore: we just fixed up original 2216 * instruction pointer in regs and are about to call handlers; 2217 * this allows fixup_uretprobe_trampoline_entries() to properly fix up 2218 * captured stack traces from uretprobe handlers, in which pending 2219 * trampoline addresses on the stack are replaced with correct 2220 * original return addresses 2221 */ 2222 utask->return_instances = ri->next; 2223 if (valid) 2224 handle_uretprobe_chain(ri, regs); 2225 ri = free_ret_instance(ri); 2226 utask->depth--; 2227 } while (ri != next); 2228 } while (!valid); 2229 2230 utask->return_instances = ri; 2231 return; 2232 2233 sigill: 2234 uprobe_warn(current, "handle uretprobe, sending SIGILL."); 2235 force_sig(SIGILL); 2236 2237 } 2238 2239 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) 2240 { 2241 return false; 2242 } 2243 2244 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, 2245 struct pt_regs *regs) 2246 { 2247 return true; 2248 } 2249 2250 /* 2251 * Run handler and ask thread to singlestep. 2252 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 2253 */ 2254 static void handle_swbp(struct pt_regs *regs) 2255 { 2256 struct uprobe *uprobe; 2257 unsigned long bp_vaddr; 2258 int is_swbp, srcu_idx; 2259 2260 bp_vaddr = uprobe_get_swbp_addr(regs); 2261 if (bp_vaddr == uprobe_get_trampoline_vaddr()) 2262 return uprobe_handle_trampoline(regs); 2263 2264 srcu_idx = srcu_read_lock(&uprobes_srcu); 2265 2266 uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp); 2267 if (!uprobe) { 2268 if (is_swbp > 0) { 2269 /* No matching uprobe; signal SIGTRAP. */ 2270 force_sig(SIGTRAP); 2271 } else { 2272 /* 2273 * Either we raced with uprobe_unregister() or we can't 2274 * access this memory. The latter is only possible if 2275 * another thread plays with our ->mm. In both cases 2276 * we can simply restart. If this vma was unmapped we 2277 * can pretend this insn was not executed yet and get 2278 * the (correct) SIGSEGV after restart. 2279 */ 2280 instruction_pointer_set(regs, bp_vaddr); 2281 } 2282 goto out; 2283 } 2284 2285 /* change it in advance for ->handler() and restart */ 2286 instruction_pointer_set(regs, bp_vaddr); 2287 2288 /* 2289 * TODO: move copy_insn/etc into _register and remove this hack. 2290 * After we hit the bp, _unregister + _register can install the 2291 * new and not-yet-analyzed uprobe at the same address, restart. 2292 */ 2293 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) 2294 goto out; 2295 2296 /* 2297 * Pairs with the smp_wmb() in prepare_uprobe(). 2298 * 2299 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then 2300 * we must also see the stores to &uprobe->arch performed by the 2301 * prepare_uprobe() call. 2302 */ 2303 smp_rmb(); 2304 2305 /* Tracing handlers use ->utask to communicate with fetch methods */ 2306 if (!get_utask()) 2307 goto out; 2308 2309 if (arch_uprobe_ignore(&uprobe->arch, regs)) 2310 goto out; 2311 2312 handler_chain(uprobe, regs); 2313 2314 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 2315 goto out; 2316 2317 if (pre_ssout(uprobe, regs, bp_vaddr)) 2318 goto out; 2319 2320 out: 2321 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ 2322 srcu_read_unlock(&uprobes_srcu, srcu_idx); 2323 } 2324 2325 /* 2326 * Perform required fix-ups and disable singlestep. 2327 * Allow pending signals to take effect. 2328 */ 2329 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) 2330 { 2331 struct uprobe *uprobe; 2332 int err = 0; 2333 2334 uprobe = utask->active_uprobe; 2335 if (utask->state == UTASK_SSTEP_ACK) 2336 err = arch_uprobe_post_xol(&uprobe->arch, regs); 2337 else if (utask->state == UTASK_SSTEP_TRAPPED) 2338 arch_uprobe_abort_xol(&uprobe->arch, regs); 2339 else 2340 WARN_ON_ONCE(1); 2341 2342 put_uprobe(uprobe); 2343 utask->active_uprobe = NULL; 2344 utask->state = UTASK_RUNNING; 2345 xol_free_insn_slot(current); 2346 2347 spin_lock_irq(¤t->sighand->siglock); 2348 recalc_sigpending(); /* see uprobe_deny_signal() */ 2349 spin_unlock_irq(¤t->sighand->siglock); 2350 2351 if (unlikely(err)) { 2352 uprobe_warn(current, "execute the probed insn, sending SIGILL."); 2353 force_sig(SIGILL); 2354 } 2355 } 2356 2357 /* 2358 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and 2359 * allows the thread to return from interrupt. After that handle_swbp() 2360 * sets utask->active_uprobe. 2361 * 2362 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag 2363 * and allows the thread to return from interrupt. 2364 * 2365 * While returning to userspace, thread notices the TIF_UPROBE flag and calls 2366 * uprobe_notify_resume(). 2367 */ 2368 void uprobe_notify_resume(struct pt_regs *regs) 2369 { 2370 struct uprobe_task *utask; 2371 2372 clear_thread_flag(TIF_UPROBE); 2373 2374 utask = current->utask; 2375 if (utask && utask->active_uprobe) 2376 handle_singlestep(utask, regs); 2377 else 2378 handle_swbp(regs); 2379 } 2380 2381 /* 2382 * uprobe_pre_sstep_notifier gets called from interrupt context as part of 2383 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. 2384 */ 2385 int uprobe_pre_sstep_notifier(struct pt_regs *regs) 2386 { 2387 if (!current->mm) 2388 return 0; 2389 2390 if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && 2391 (!current->utask || !current->utask->return_instances)) 2392 return 0; 2393 2394 set_thread_flag(TIF_UPROBE); 2395 return 1; 2396 } 2397 2398 /* 2399 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier 2400 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. 2401 */ 2402 int uprobe_post_sstep_notifier(struct pt_regs *regs) 2403 { 2404 struct uprobe_task *utask = current->utask; 2405 2406 if (!current->mm || !utask || !utask->active_uprobe) 2407 /* task is currently not uprobed */ 2408 return 0; 2409 2410 utask->state = UTASK_SSTEP_ACK; 2411 set_thread_flag(TIF_UPROBE); 2412 return 1; 2413 } 2414 2415 static struct notifier_block uprobe_exception_nb = { 2416 .notifier_call = arch_uprobe_exception_notify, 2417 .priority = INT_MAX-1, /* notified after kprobes, kgdb */ 2418 }; 2419 2420 void __init uprobes_init(void) 2421 { 2422 int i; 2423 2424 for (i = 0; i < UPROBES_HASH_SZ; i++) 2425 mutex_init(&uprobes_mmap_mutex[i]); 2426 2427 BUG_ON(register_die_notifier(&uprobe_exception_nb)); 2428 } 2429