1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * User-space Probes (UProbes) 4 * 5 * Copyright (C) IBM Corporation, 2008-2012 6 * Authors: 7 * Srikar Dronamraju 8 * Jim Keniston 9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/highmem.h> 14 #include <linux/pagemap.h> /* read_mapping_page */ 15 #include <linux/slab.h> 16 #include <linux/sched.h> 17 #include <linux/sched/mm.h> 18 #include <linux/sched/coredump.h> 19 #include <linux/export.h> 20 #include <linux/rmap.h> /* anon_vma_prepare */ 21 #include <linux/mmu_notifier.h> /* set_pte_at_notify */ 22 #include <linux/swap.h> /* folio_free_swap */ 23 #include <linux/ptrace.h> /* user_enable_single_step */ 24 #include <linux/kdebug.h> /* notifier mechanism */ 25 #include <linux/percpu-rwsem.h> 26 #include <linux/task_work.h> 27 #include <linux/shmem_fs.h> 28 #include <linux/khugepaged.h> 29 30 #include <linux/uprobes.h> 31 32 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) 33 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 34 35 static struct rb_root uprobes_tree = RB_ROOT; 36 /* 37 * allows us to skip the uprobe_mmap if there are no uprobe events active 38 * at this time. Probably a fine grained per inode count is better? 39 */ 40 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) 41 42 static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ 43 44 #define UPROBES_HASH_SZ 13 45 /* serialize uprobe->pending_list */ 46 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 47 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 48 49 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); 50 51 /* Have a copy of original instruction */ 52 #define UPROBE_COPY_INSN 0 53 54 struct uprobe { 55 struct rb_node rb_node; /* node in the rb tree */ 56 refcount_t ref; 57 struct rw_semaphore register_rwsem; 58 struct rw_semaphore consumer_rwsem; 59 struct list_head pending_list; 60 struct uprobe_consumer *consumers; 61 struct inode *inode; /* Also hold a ref to inode */ 62 loff_t offset; 63 loff_t ref_ctr_offset; 64 unsigned long flags; 65 66 /* 67 * The generic code assumes that it has two members of unknown type 68 * owned by the arch-specific code: 69 * 70 * insn - copy_insn() saves the original instruction here for 71 * arch_uprobe_analyze_insn(). 72 * 73 * ixol - potentially modified instruction to execute out of 74 * line, copied to xol_area by xol_get_insn_slot(). 75 */ 76 struct arch_uprobe arch; 77 }; 78 79 struct delayed_uprobe { 80 struct list_head list; 81 struct uprobe *uprobe; 82 struct mm_struct *mm; 83 }; 84 85 static DEFINE_MUTEX(delayed_uprobe_lock); 86 static LIST_HEAD(delayed_uprobe_list); 87 88 /* 89 * Execute out of line area: anonymous executable mapping installed 90 * by the probed task to execute the copy of the original instruction 91 * mangled by set_swbp(). 92 * 93 * On a breakpoint hit, thread contests for a slot. It frees the 94 * slot after singlestep. Currently a fixed number of slots are 95 * allocated. 96 */ 97 struct xol_area { 98 wait_queue_head_t wq; /* if all slots are busy */ 99 atomic_t slot_count; /* number of in-use slots */ 100 unsigned long *bitmap; /* 0 = free slot */ 101 102 struct vm_special_mapping xol_mapping; 103 struct page *pages[2]; 104 /* 105 * We keep the vma's vm_start rather than a pointer to the vma 106 * itself. The probed process or a naughty kernel module could make 107 * the vma go away, and we must handle that reasonably gracefully. 108 */ 109 unsigned long vaddr; /* Page(s) of instruction slots */ 110 }; 111 112 /* 113 * valid_vma: Verify if the specified vma is an executable vma 114 * Relax restrictions while unregistering: vm_flags might have 115 * changed after breakpoint was inserted. 116 * - is_register: indicates if we are in register context. 117 * - Return 1 if the specified virtual address is in an 118 * executable vma. 119 */ 120 static bool valid_vma(struct vm_area_struct *vma, bool is_register) 121 { 122 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; 123 124 if (is_register) 125 flags |= VM_WRITE; 126 127 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; 128 } 129 130 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 131 { 132 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 133 } 134 135 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) 136 { 137 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); 138 } 139 140 /** 141 * __replace_page - replace page in vma by new page. 142 * based on replace_page in mm/ksm.c 143 * 144 * @vma: vma that holds the pte pointing to page 145 * @addr: address the old @page is mapped at 146 * @old_page: the page we are replacing by new_page 147 * @new_page: the modified page we replace page by 148 * 149 * If @new_page is NULL, only unmap @old_page. 150 * 151 * Returns 0 on success, negative error code otherwise. 152 */ 153 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 154 struct page *old_page, struct page *new_page) 155 { 156 struct folio *old_folio = page_folio(old_page); 157 struct folio *new_folio; 158 struct mm_struct *mm = vma->vm_mm; 159 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); 160 int err; 161 struct mmu_notifier_range range; 162 163 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, 164 addr + PAGE_SIZE); 165 166 if (new_page) { 167 new_folio = page_folio(new_page); 168 err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL); 169 if (err) 170 return err; 171 } 172 173 /* For folio_free_swap() below */ 174 folio_lock(old_folio); 175 176 mmu_notifier_invalidate_range_start(&range); 177 err = -EAGAIN; 178 if (!page_vma_mapped_walk(&pvmw)) 179 goto unlock; 180 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); 181 182 if (new_page) { 183 folio_get(new_folio); 184 page_add_new_anon_rmap(new_page, vma, addr); 185 folio_add_lru_vma(new_folio, vma); 186 } else 187 /* no new page, just dec_mm_counter for old_page */ 188 dec_mm_counter(mm, MM_ANONPAGES); 189 190 if (!folio_test_anon(old_folio)) { 191 dec_mm_counter(mm, mm_counter_file(old_page)); 192 inc_mm_counter(mm, MM_ANONPAGES); 193 } 194 195 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); 196 ptep_clear_flush_notify(vma, addr, pvmw.pte); 197 if (new_page) 198 set_pte_at_notify(mm, addr, pvmw.pte, 199 mk_pte(new_page, vma->vm_page_prot)); 200 201 page_remove_rmap(old_page, vma, false); 202 if (!folio_mapped(old_folio)) 203 folio_free_swap(old_folio); 204 page_vma_mapped_walk_done(&pvmw); 205 folio_put(old_folio); 206 207 err = 0; 208 unlock: 209 mmu_notifier_invalidate_range_end(&range); 210 folio_unlock(old_folio); 211 return err; 212 } 213 214 /** 215 * is_swbp_insn - check if instruction is breakpoint instruction. 216 * @insn: instruction to be checked. 217 * Default implementation of is_swbp_insn 218 * Returns true if @insn is a breakpoint instruction. 219 */ 220 bool __weak is_swbp_insn(uprobe_opcode_t *insn) 221 { 222 return *insn == UPROBE_SWBP_INSN; 223 } 224 225 /** 226 * is_trap_insn - check if instruction is breakpoint instruction. 227 * @insn: instruction to be checked. 228 * Default implementation of is_trap_insn 229 * Returns true if @insn is a breakpoint instruction. 230 * 231 * This function is needed for the case where an architecture has multiple 232 * trap instructions (like powerpc). 233 */ 234 bool __weak is_trap_insn(uprobe_opcode_t *insn) 235 { 236 return is_swbp_insn(insn); 237 } 238 239 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) 240 { 241 void *kaddr = kmap_atomic(page); 242 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); 243 kunmap_atomic(kaddr); 244 } 245 246 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) 247 { 248 void *kaddr = kmap_atomic(page); 249 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); 250 kunmap_atomic(kaddr); 251 } 252 253 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) 254 { 255 uprobe_opcode_t old_opcode; 256 bool is_swbp; 257 258 /* 259 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. 260 * We do not check if it is any other 'trap variant' which could 261 * be conditional trap instruction such as the one powerpc supports. 262 * 263 * The logic is that we do not care if the underlying instruction 264 * is a trap variant; uprobes always wins over any other (gdb) 265 * breakpoint. 266 */ 267 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); 268 is_swbp = is_swbp_insn(&old_opcode); 269 270 if (is_swbp_insn(new_opcode)) { 271 if (is_swbp) /* register: already installed? */ 272 return 0; 273 } else { 274 if (!is_swbp) /* unregister: was it changed by us? */ 275 return 0; 276 } 277 278 return 1; 279 } 280 281 static struct delayed_uprobe * 282 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) 283 { 284 struct delayed_uprobe *du; 285 286 list_for_each_entry(du, &delayed_uprobe_list, list) 287 if (du->uprobe == uprobe && du->mm == mm) 288 return du; 289 return NULL; 290 } 291 292 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) 293 { 294 struct delayed_uprobe *du; 295 296 if (delayed_uprobe_check(uprobe, mm)) 297 return 0; 298 299 du = kzalloc(sizeof(*du), GFP_KERNEL); 300 if (!du) 301 return -ENOMEM; 302 303 du->uprobe = uprobe; 304 du->mm = mm; 305 list_add(&du->list, &delayed_uprobe_list); 306 return 0; 307 } 308 309 static void delayed_uprobe_delete(struct delayed_uprobe *du) 310 { 311 if (WARN_ON(!du)) 312 return; 313 list_del(&du->list); 314 kfree(du); 315 } 316 317 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) 318 { 319 struct list_head *pos, *q; 320 struct delayed_uprobe *du; 321 322 if (!uprobe && !mm) 323 return; 324 325 list_for_each_safe(pos, q, &delayed_uprobe_list) { 326 du = list_entry(pos, struct delayed_uprobe, list); 327 328 if (uprobe && du->uprobe != uprobe) 329 continue; 330 if (mm && du->mm != mm) 331 continue; 332 333 delayed_uprobe_delete(du); 334 } 335 } 336 337 static bool valid_ref_ctr_vma(struct uprobe *uprobe, 338 struct vm_area_struct *vma) 339 { 340 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); 341 342 return uprobe->ref_ctr_offset && 343 vma->vm_file && 344 file_inode(vma->vm_file) == uprobe->inode && 345 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && 346 vma->vm_start <= vaddr && 347 vma->vm_end > vaddr; 348 } 349 350 static struct vm_area_struct * 351 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) 352 { 353 VMA_ITERATOR(vmi, mm, 0); 354 struct vm_area_struct *tmp; 355 356 for_each_vma(vmi, tmp) 357 if (valid_ref_ctr_vma(uprobe, tmp)) 358 return tmp; 359 360 return NULL; 361 } 362 363 static int 364 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) 365 { 366 void *kaddr; 367 struct page *page; 368 struct vm_area_struct *vma; 369 int ret; 370 short *ptr; 371 372 if (!vaddr || !d) 373 return -EINVAL; 374 375 ret = get_user_pages_remote(mm, vaddr, 1, 376 FOLL_WRITE, &page, &vma, NULL); 377 if (unlikely(ret <= 0)) { 378 /* 379 * We are asking for 1 page. If get_user_pages_remote() fails, 380 * it may return 0, in that case we have to return error. 381 */ 382 return ret == 0 ? -EBUSY : ret; 383 } 384 385 kaddr = kmap_atomic(page); 386 ptr = kaddr + (vaddr & ~PAGE_MASK); 387 388 if (unlikely(*ptr + d < 0)) { 389 pr_warn("ref_ctr going negative. vaddr: 0x%lx, " 390 "curr val: %d, delta: %d\n", vaddr, *ptr, d); 391 ret = -EINVAL; 392 goto out; 393 } 394 395 *ptr += d; 396 ret = 0; 397 out: 398 kunmap_atomic(kaddr); 399 put_page(page); 400 return ret; 401 } 402 403 static void update_ref_ctr_warn(struct uprobe *uprobe, 404 struct mm_struct *mm, short d) 405 { 406 pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " 407 "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n", 408 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, 409 (unsigned long long) uprobe->offset, 410 (unsigned long long) uprobe->ref_ctr_offset, mm); 411 } 412 413 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, 414 short d) 415 { 416 struct vm_area_struct *rc_vma; 417 unsigned long rc_vaddr; 418 int ret = 0; 419 420 rc_vma = find_ref_ctr_vma(uprobe, mm); 421 422 if (rc_vma) { 423 rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); 424 ret = __update_ref_ctr(mm, rc_vaddr, d); 425 if (ret) 426 update_ref_ctr_warn(uprobe, mm, d); 427 428 if (d > 0) 429 return ret; 430 } 431 432 mutex_lock(&delayed_uprobe_lock); 433 if (d > 0) 434 ret = delayed_uprobe_add(uprobe, mm); 435 else 436 delayed_uprobe_remove(uprobe, mm); 437 mutex_unlock(&delayed_uprobe_lock); 438 439 return ret; 440 } 441 442 /* 443 * NOTE: 444 * Expect the breakpoint instruction to be the smallest size instruction for 445 * the architecture. If an arch has variable length instruction and the 446 * breakpoint instruction is not of the smallest length instruction 447 * supported by that architecture then we need to modify is_trap_at_addr and 448 * uprobe_write_opcode accordingly. This would never be a problem for archs 449 * that have fixed length instructions. 450 * 451 * uprobe_write_opcode - write the opcode at a given virtual address. 452 * @auprobe: arch specific probepoint information. 453 * @mm: the probed process address space. 454 * @vaddr: the virtual address to store the opcode. 455 * @opcode: opcode to be written at @vaddr. 456 * 457 * Called with mm->mmap_lock held for write. 458 * Return 0 (success) or a negative errno. 459 */ 460 int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, 461 unsigned long vaddr, uprobe_opcode_t opcode) 462 { 463 struct uprobe *uprobe; 464 struct page *old_page, *new_page; 465 struct vm_area_struct *vma; 466 int ret, is_register, ref_ctr_updated = 0; 467 bool orig_page_huge = false; 468 unsigned int gup_flags = FOLL_FORCE; 469 470 is_register = is_swbp_insn(&opcode); 471 uprobe = container_of(auprobe, struct uprobe, arch); 472 473 retry: 474 if (is_register) 475 gup_flags |= FOLL_SPLIT_PMD; 476 /* Read the page with vaddr into memory */ 477 ret = get_user_pages_remote(mm, vaddr, 1, gup_flags, 478 &old_page, &vma, NULL); 479 if (ret <= 0) 480 return ret; 481 482 ret = verify_opcode(old_page, vaddr, &opcode); 483 if (ret <= 0) 484 goto put_old; 485 486 if (WARN(!is_register && PageCompound(old_page), 487 "uprobe unregister should never work on compound page\n")) { 488 ret = -EINVAL; 489 goto put_old; 490 } 491 492 /* We are going to replace instruction, update ref_ctr. */ 493 if (!ref_ctr_updated && uprobe->ref_ctr_offset) { 494 ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); 495 if (ret) 496 goto put_old; 497 498 ref_ctr_updated = 1; 499 } 500 501 ret = 0; 502 if (!is_register && !PageAnon(old_page)) 503 goto put_old; 504 505 ret = anon_vma_prepare(vma); 506 if (ret) 507 goto put_old; 508 509 ret = -ENOMEM; 510 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 511 if (!new_page) 512 goto put_old; 513 514 __SetPageUptodate(new_page); 515 copy_highpage(new_page, old_page); 516 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 517 518 if (!is_register) { 519 struct page *orig_page; 520 pgoff_t index; 521 522 VM_BUG_ON_PAGE(!PageAnon(old_page), old_page); 523 524 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT; 525 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping, 526 index); 527 528 if (orig_page) { 529 if (PageUptodate(orig_page) && 530 pages_identical(new_page, orig_page)) { 531 /* let go new_page */ 532 put_page(new_page); 533 new_page = NULL; 534 535 if (PageCompound(orig_page)) 536 orig_page_huge = true; 537 } 538 put_page(orig_page); 539 } 540 } 541 542 ret = __replace_page(vma, vaddr, old_page, new_page); 543 if (new_page) 544 put_page(new_page); 545 put_old: 546 put_page(old_page); 547 548 if (unlikely(ret == -EAGAIN)) 549 goto retry; 550 551 /* Revert back reference counter if instruction update failed. */ 552 if (ret && is_register && ref_ctr_updated) 553 update_ref_ctr(uprobe, mm, -1); 554 555 /* try collapse pmd for compound page */ 556 if (!ret && orig_page_huge) 557 collapse_pte_mapped_thp(mm, vaddr, false); 558 559 return ret; 560 } 561 562 /** 563 * set_swbp - store breakpoint at a given address. 564 * @auprobe: arch specific probepoint information. 565 * @mm: the probed process address space. 566 * @vaddr: the virtual address to insert the opcode. 567 * 568 * For mm @mm, store the breakpoint instruction at @vaddr. 569 * Return 0 (success) or a negative errno. 570 */ 571 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 572 { 573 return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); 574 } 575 576 /** 577 * set_orig_insn - Restore the original instruction. 578 * @mm: the probed process address space. 579 * @auprobe: arch specific probepoint information. 580 * @vaddr: the virtual address to insert the opcode. 581 * 582 * For mm @mm, restore the original opcode (opcode) at @vaddr. 583 * Return 0 (success) or a negative errno. 584 */ 585 int __weak 586 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 587 { 588 return uprobe_write_opcode(auprobe, mm, vaddr, 589 *(uprobe_opcode_t *)&auprobe->insn); 590 } 591 592 static struct uprobe *get_uprobe(struct uprobe *uprobe) 593 { 594 refcount_inc(&uprobe->ref); 595 return uprobe; 596 } 597 598 static void put_uprobe(struct uprobe *uprobe) 599 { 600 if (refcount_dec_and_test(&uprobe->ref)) { 601 /* 602 * If application munmap(exec_vma) before uprobe_unregister() 603 * gets called, we don't get a chance to remove uprobe from 604 * delayed_uprobe_list from remove_breakpoint(). Do it here. 605 */ 606 mutex_lock(&delayed_uprobe_lock); 607 delayed_uprobe_remove(uprobe, NULL); 608 mutex_unlock(&delayed_uprobe_lock); 609 kfree(uprobe); 610 } 611 } 612 613 static __always_inline 614 int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, 615 const struct uprobe *r) 616 { 617 if (l_inode < r->inode) 618 return -1; 619 620 if (l_inode > r->inode) 621 return 1; 622 623 if (l_offset < r->offset) 624 return -1; 625 626 if (l_offset > r->offset) 627 return 1; 628 629 return 0; 630 } 631 632 #define __node_2_uprobe(node) \ 633 rb_entry((node), struct uprobe, rb_node) 634 635 struct __uprobe_key { 636 struct inode *inode; 637 loff_t offset; 638 }; 639 640 static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b) 641 { 642 const struct __uprobe_key *a = key; 643 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b)); 644 } 645 646 static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b) 647 { 648 struct uprobe *u = __node_2_uprobe(a); 649 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b)); 650 } 651 652 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) 653 { 654 struct __uprobe_key key = { 655 .inode = inode, 656 .offset = offset, 657 }; 658 struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key); 659 660 if (node) 661 return get_uprobe(__node_2_uprobe(node)); 662 663 return NULL; 664 } 665 666 /* 667 * Find a uprobe corresponding to a given inode:offset 668 * Acquires uprobes_treelock 669 */ 670 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) 671 { 672 struct uprobe *uprobe; 673 674 spin_lock(&uprobes_treelock); 675 uprobe = __find_uprobe(inode, offset); 676 spin_unlock(&uprobes_treelock); 677 678 return uprobe; 679 } 680 681 static struct uprobe *__insert_uprobe(struct uprobe *uprobe) 682 { 683 struct rb_node *node; 684 685 node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp); 686 if (node) 687 return get_uprobe(__node_2_uprobe(node)); 688 689 /* get access + creation ref */ 690 refcount_set(&uprobe->ref, 2); 691 return NULL; 692 } 693 694 /* 695 * Acquire uprobes_treelock. 696 * Matching uprobe already exists in rbtree; 697 * increment (access refcount) and return the matching uprobe. 698 * 699 * No matching uprobe; insert the uprobe in rb_tree; 700 * get a double refcount (access + creation) and return NULL. 701 */ 702 static struct uprobe *insert_uprobe(struct uprobe *uprobe) 703 { 704 struct uprobe *u; 705 706 spin_lock(&uprobes_treelock); 707 u = __insert_uprobe(uprobe); 708 spin_unlock(&uprobes_treelock); 709 710 return u; 711 } 712 713 static void 714 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) 715 { 716 pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " 717 "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n", 718 uprobe->inode->i_ino, (unsigned long long) uprobe->offset, 719 (unsigned long long) cur_uprobe->ref_ctr_offset, 720 (unsigned long long) uprobe->ref_ctr_offset); 721 } 722 723 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, 724 loff_t ref_ctr_offset) 725 { 726 struct uprobe *uprobe, *cur_uprobe; 727 728 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); 729 if (!uprobe) 730 return NULL; 731 732 uprobe->inode = inode; 733 uprobe->offset = offset; 734 uprobe->ref_ctr_offset = ref_ctr_offset; 735 init_rwsem(&uprobe->register_rwsem); 736 init_rwsem(&uprobe->consumer_rwsem); 737 738 /* add to uprobes_tree, sorted on inode:offset */ 739 cur_uprobe = insert_uprobe(uprobe); 740 /* a uprobe exists for this inode:offset combination */ 741 if (cur_uprobe) { 742 if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { 743 ref_ctr_mismatch_warn(cur_uprobe, uprobe); 744 put_uprobe(cur_uprobe); 745 kfree(uprobe); 746 return ERR_PTR(-EINVAL); 747 } 748 kfree(uprobe); 749 uprobe = cur_uprobe; 750 } 751 752 return uprobe; 753 } 754 755 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) 756 { 757 down_write(&uprobe->consumer_rwsem); 758 uc->next = uprobe->consumers; 759 uprobe->consumers = uc; 760 up_write(&uprobe->consumer_rwsem); 761 } 762 763 /* 764 * For uprobe @uprobe, delete the consumer @uc. 765 * Return true if the @uc is deleted successfully 766 * or return false. 767 */ 768 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) 769 { 770 struct uprobe_consumer **con; 771 bool ret = false; 772 773 down_write(&uprobe->consumer_rwsem); 774 for (con = &uprobe->consumers; *con; con = &(*con)->next) { 775 if (*con == uc) { 776 *con = uc->next; 777 ret = true; 778 break; 779 } 780 } 781 up_write(&uprobe->consumer_rwsem); 782 783 return ret; 784 } 785 786 static int __copy_insn(struct address_space *mapping, struct file *filp, 787 void *insn, int nbytes, loff_t offset) 788 { 789 struct page *page; 790 /* 791 * Ensure that the page that has the original instruction is populated 792 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(), 793 * see uprobe_register(). 794 */ 795 if (mapping->a_ops->read_folio) 796 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); 797 else 798 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 799 if (IS_ERR(page)) 800 return PTR_ERR(page); 801 802 copy_from_page(page, offset, insn, nbytes); 803 put_page(page); 804 805 return 0; 806 } 807 808 static int copy_insn(struct uprobe *uprobe, struct file *filp) 809 { 810 struct address_space *mapping = uprobe->inode->i_mapping; 811 loff_t offs = uprobe->offset; 812 void *insn = &uprobe->arch.insn; 813 int size = sizeof(uprobe->arch.insn); 814 int len, err = -EIO; 815 816 /* Copy only available bytes, -EIO if nothing was read */ 817 do { 818 if (offs >= i_size_read(uprobe->inode)) 819 break; 820 821 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); 822 err = __copy_insn(mapping, filp, insn, len, offs); 823 if (err) 824 break; 825 826 insn += len; 827 offs += len; 828 size -= len; 829 } while (size); 830 831 return err; 832 } 833 834 static int prepare_uprobe(struct uprobe *uprobe, struct file *file, 835 struct mm_struct *mm, unsigned long vaddr) 836 { 837 int ret = 0; 838 839 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 840 return ret; 841 842 /* TODO: move this into _register, until then we abuse this sem. */ 843 down_write(&uprobe->consumer_rwsem); 844 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 845 goto out; 846 847 ret = copy_insn(uprobe, file); 848 if (ret) 849 goto out; 850 851 ret = -ENOTSUPP; 852 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) 853 goto out; 854 855 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); 856 if (ret) 857 goto out; 858 859 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ 860 set_bit(UPROBE_COPY_INSN, &uprobe->flags); 861 862 out: 863 up_write(&uprobe->consumer_rwsem); 864 865 return ret; 866 } 867 868 static inline bool consumer_filter(struct uprobe_consumer *uc, 869 enum uprobe_filter_ctx ctx, struct mm_struct *mm) 870 { 871 return !uc->filter || uc->filter(uc, ctx, mm); 872 } 873 874 static bool filter_chain(struct uprobe *uprobe, 875 enum uprobe_filter_ctx ctx, struct mm_struct *mm) 876 { 877 struct uprobe_consumer *uc; 878 bool ret = false; 879 880 down_read(&uprobe->consumer_rwsem); 881 for (uc = uprobe->consumers; uc; uc = uc->next) { 882 ret = consumer_filter(uc, ctx, mm); 883 if (ret) 884 break; 885 } 886 up_read(&uprobe->consumer_rwsem); 887 888 return ret; 889 } 890 891 static int 892 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 893 struct vm_area_struct *vma, unsigned long vaddr) 894 { 895 bool first_uprobe; 896 int ret; 897 898 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); 899 if (ret) 900 return ret; 901 902 /* 903 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 904 * the task can hit this breakpoint right after __replace_page(). 905 */ 906 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); 907 if (first_uprobe) 908 set_bit(MMF_HAS_UPROBES, &mm->flags); 909 910 ret = set_swbp(&uprobe->arch, mm, vaddr); 911 if (!ret) 912 clear_bit(MMF_RECALC_UPROBES, &mm->flags); 913 else if (first_uprobe) 914 clear_bit(MMF_HAS_UPROBES, &mm->flags); 915 916 return ret; 917 } 918 919 static int 920 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 921 { 922 set_bit(MMF_RECALC_UPROBES, &mm->flags); 923 return set_orig_insn(&uprobe->arch, mm, vaddr); 924 } 925 926 static inline bool uprobe_is_active(struct uprobe *uprobe) 927 { 928 return !RB_EMPTY_NODE(&uprobe->rb_node); 929 } 930 /* 931 * There could be threads that have already hit the breakpoint. They 932 * will recheck the current insn and restart if find_uprobe() fails. 933 * See find_active_uprobe(). 934 */ 935 static void delete_uprobe(struct uprobe *uprobe) 936 { 937 if (WARN_ON(!uprobe_is_active(uprobe))) 938 return; 939 940 spin_lock(&uprobes_treelock); 941 rb_erase(&uprobe->rb_node, &uprobes_tree); 942 spin_unlock(&uprobes_treelock); 943 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ 944 put_uprobe(uprobe); 945 } 946 947 struct map_info { 948 struct map_info *next; 949 struct mm_struct *mm; 950 unsigned long vaddr; 951 }; 952 953 static inline struct map_info *free_map_info(struct map_info *info) 954 { 955 struct map_info *next = info->next; 956 kfree(info); 957 return next; 958 } 959 960 static struct map_info * 961 build_map_info(struct address_space *mapping, loff_t offset, bool is_register) 962 { 963 unsigned long pgoff = offset >> PAGE_SHIFT; 964 struct vm_area_struct *vma; 965 struct map_info *curr = NULL; 966 struct map_info *prev = NULL; 967 struct map_info *info; 968 int more = 0; 969 970 again: 971 i_mmap_lock_read(mapping); 972 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 973 if (!valid_vma(vma, is_register)) 974 continue; 975 976 if (!prev && !more) { 977 /* 978 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through 979 * reclaim. This is optimistic, no harm done if it fails. 980 */ 981 prev = kmalloc(sizeof(struct map_info), 982 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); 983 if (prev) 984 prev->next = NULL; 985 } 986 if (!prev) { 987 more++; 988 continue; 989 } 990 991 if (!mmget_not_zero(vma->vm_mm)) 992 continue; 993 994 info = prev; 995 prev = prev->next; 996 info->next = curr; 997 curr = info; 998 999 info->mm = vma->vm_mm; 1000 info->vaddr = offset_to_vaddr(vma, offset); 1001 } 1002 i_mmap_unlock_read(mapping); 1003 1004 if (!more) 1005 goto out; 1006 1007 prev = curr; 1008 while (curr) { 1009 mmput(curr->mm); 1010 curr = curr->next; 1011 } 1012 1013 do { 1014 info = kmalloc(sizeof(struct map_info), GFP_KERNEL); 1015 if (!info) { 1016 curr = ERR_PTR(-ENOMEM); 1017 goto out; 1018 } 1019 info->next = prev; 1020 prev = info; 1021 } while (--more); 1022 1023 goto again; 1024 out: 1025 while (prev) 1026 prev = free_map_info(prev); 1027 return curr; 1028 } 1029 1030 static int 1031 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) 1032 { 1033 bool is_register = !!new; 1034 struct map_info *info; 1035 int err = 0; 1036 1037 percpu_down_write(&dup_mmap_sem); 1038 info = build_map_info(uprobe->inode->i_mapping, 1039 uprobe->offset, is_register); 1040 if (IS_ERR(info)) { 1041 err = PTR_ERR(info); 1042 goto out; 1043 } 1044 1045 while (info) { 1046 struct mm_struct *mm = info->mm; 1047 struct vm_area_struct *vma; 1048 1049 if (err && is_register) 1050 goto free; 1051 1052 mmap_write_lock(mm); 1053 vma = find_vma(mm, info->vaddr); 1054 if (!vma || !valid_vma(vma, is_register) || 1055 file_inode(vma->vm_file) != uprobe->inode) 1056 goto unlock; 1057 1058 if (vma->vm_start > info->vaddr || 1059 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) 1060 goto unlock; 1061 1062 if (is_register) { 1063 /* consult only the "caller", new consumer. */ 1064 if (consumer_filter(new, 1065 UPROBE_FILTER_REGISTER, mm)) 1066 err = install_breakpoint(uprobe, mm, vma, info->vaddr); 1067 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { 1068 if (!filter_chain(uprobe, 1069 UPROBE_FILTER_UNREGISTER, mm)) 1070 err |= remove_breakpoint(uprobe, mm, info->vaddr); 1071 } 1072 1073 unlock: 1074 mmap_write_unlock(mm); 1075 free: 1076 mmput(mm); 1077 info = free_map_info(info); 1078 } 1079 out: 1080 percpu_up_write(&dup_mmap_sem); 1081 return err; 1082 } 1083 1084 static void 1085 __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) 1086 { 1087 int err; 1088 1089 if (WARN_ON(!consumer_del(uprobe, uc))) 1090 return; 1091 1092 err = register_for_each_vma(uprobe, NULL); 1093 /* TODO : cant unregister? schedule a worker thread */ 1094 if (!uprobe->consumers && !err) 1095 delete_uprobe(uprobe); 1096 } 1097 1098 /* 1099 * uprobe_unregister - unregister an already registered probe. 1100 * @inode: the file in which the probe has to be removed. 1101 * @offset: offset from the start of the file. 1102 * @uc: identify which probe if multiple probes are colocated. 1103 */ 1104 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 1105 { 1106 struct uprobe *uprobe; 1107 1108 uprobe = find_uprobe(inode, offset); 1109 if (WARN_ON(!uprobe)) 1110 return; 1111 1112 down_write(&uprobe->register_rwsem); 1113 __uprobe_unregister(uprobe, uc); 1114 up_write(&uprobe->register_rwsem); 1115 put_uprobe(uprobe); 1116 } 1117 EXPORT_SYMBOL_GPL(uprobe_unregister); 1118 1119 /* 1120 * __uprobe_register - register a probe 1121 * @inode: the file in which the probe has to be placed. 1122 * @offset: offset from the start of the file. 1123 * @uc: information on howto handle the probe.. 1124 * 1125 * Apart from the access refcount, __uprobe_register() takes a creation 1126 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting 1127 * inserted into the rbtree (i.e first consumer for a @inode:@offset 1128 * tuple). Creation refcount stops uprobe_unregister from freeing the 1129 * @uprobe even before the register operation is complete. Creation 1130 * refcount is released when the last @uc for the @uprobe 1131 * unregisters. Caller of __uprobe_register() is required to keep @inode 1132 * (and the containing mount) referenced. 1133 * 1134 * Return errno if it cannot successully install probes 1135 * else return 0 (success) 1136 */ 1137 static int __uprobe_register(struct inode *inode, loff_t offset, 1138 loff_t ref_ctr_offset, struct uprobe_consumer *uc) 1139 { 1140 struct uprobe *uprobe; 1141 int ret; 1142 1143 /* Uprobe must have at least one set consumer */ 1144 if (!uc->handler && !uc->ret_handler) 1145 return -EINVAL; 1146 1147 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ 1148 if (!inode->i_mapping->a_ops->read_folio && 1149 !shmem_mapping(inode->i_mapping)) 1150 return -EIO; 1151 /* Racy, just to catch the obvious mistakes */ 1152 if (offset > i_size_read(inode)) 1153 return -EINVAL; 1154 1155 /* 1156 * This ensures that copy_from_page(), copy_to_page() and 1157 * __update_ref_ctr() can't cross page boundary. 1158 */ 1159 if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) 1160 return -EINVAL; 1161 if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) 1162 return -EINVAL; 1163 1164 retry: 1165 uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); 1166 if (!uprobe) 1167 return -ENOMEM; 1168 if (IS_ERR(uprobe)) 1169 return PTR_ERR(uprobe); 1170 1171 /* 1172 * We can race with uprobe_unregister()->delete_uprobe(). 1173 * Check uprobe_is_active() and retry if it is false. 1174 */ 1175 down_write(&uprobe->register_rwsem); 1176 ret = -EAGAIN; 1177 if (likely(uprobe_is_active(uprobe))) { 1178 consumer_add(uprobe, uc); 1179 ret = register_for_each_vma(uprobe, uc); 1180 if (ret) 1181 __uprobe_unregister(uprobe, uc); 1182 } 1183 up_write(&uprobe->register_rwsem); 1184 put_uprobe(uprobe); 1185 1186 if (unlikely(ret == -EAGAIN)) 1187 goto retry; 1188 return ret; 1189 } 1190 1191 int uprobe_register(struct inode *inode, loff_t offset, 1192 struct uprobe_consumer *uc) 1193 { 1194 return __uprobe_register(inode, offset, 0, uc); 1195 } 1196 EXPORT_SYMBOL_GPL(uprobe_register); 1197 1198 int uprobe_register_refctr(struct inode *inode, loff_t offset, 1199 loff_t ref_ctr_offset, struct uprobe_consumer *uc) 1200 { 1201 return __uprobe_register(inode, offset, ref_ctr_offset, uc); 1202 } 1203 EXPORT_SYMBOL_GPL(uprobe_register_refctr); 1204 1205 /* 1206 * uprobe_apply - unregister an already registered probe. 1207 * @inode: the file in which the probe has to be removed. 1208 * @offset: offset from the start of the file. 1209 * @uc: consumer which wants to add more or remove some breakpoints 1210 * @add: add or remove the breakpoints 1211 */ 1212 int uprobe_apply(struct inode *inode, loff_t offset, 1213 struct uprobe_consumer *uc, bool add) 1214 { 1215 struct uprobe *uprobe; 1216 struct uprobe_consumer *con; 1217 int ret = -ENOENT; 1218 1219 uprobe = find_uprobe(inode, offset); 1220 if (WARN_ON(!uprobe)) 1221 return ret; 1222 1223 down_write(&uprobe->register_rwsem); 1224 for (con = uprobe->consumers; con && con != uc ; con = con->next) 1225 ; 1226 if (con) 1227 ret = register_for_each_vma(uprobe, add ? uc : NULL); 1228 up_write(&uprobe->register_rwsem); 1229 put_uprobe(uprobe); 1230 1231 return ret; 1232 } 1233 1234 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) 1235 { 1236 VMA_ITERATOR(vmi, mm, 0); 1237 struct vm_area_struct *vma; 1238 int err = 0; 1239 1240 mmap_read_lock(mm); 1241 for_each_vma(vmi, vma) { 1242 unsigned long vaddr; 1243 loff_t offset; 1244 1245 if (!valid_vma(vma, false) || 1246 file_inode(vma->vm_file) != uprobe->inode) 1247 continue; 1248 1249 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1250 if (uprobe->offset < offset || 1251 uprobe->offset >= offset + vma->vm_end - vma->vm_start) 1252 continue; 1253 1254 vaddr = offset_to_vaddr(vma, uprobe->offset); 1255 err |= remove_breakpoint(uprobe, mm, vaddr); 1256 } 1257 mmap_read_unlock(mm); 1258 1259 return err; 1260 } 1261 1262 static struct rb_node * 1263 find_node_in_range(struct inode *inode, loff_t min, loff_t max) 1264 { 1265 struct rb_node *n = uprobes_tree.rb_node; 1266 1267 while (n) { 1268 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); 1269 1270 if (inode < u->inode) { 1271 n = n->rb_left; 1272 } else if (inode > u->inode) { 1273 n = n->rb_right; 1274 } else { 1275 if (max < u->offset) 1276 n = n->rb_left; 1277 else if (min > u->offset) 1278 n = n->rb_right; 1279 else 1280 break; 1281 } 1282 } 1283 1284 return n; 1285 } 1286 1287 /* 1288 * For a given range in vma, build a list of probes that need to be inserted. 1289 */ 1290 static void build_probe_list(struct inode *inode, 1291 struct vm_area_struct *vma, 1292 unsigned long start, unsigned long end, 1293 struct list_head *head) 1294 { 1295 loff_t min, max; 1296 struct rb_node *n, *t; 1297 struct uprobe *u; 1298 1299 INIT_LIST_HEAD(head); 1300 min = vaddr_to_offset(vma, start); 1301 max = min + (end - start) - 1; 1302 1303 spin_lock(&uprobes_treelock); 1304 n = find_node_in_range(inode, min, max); 1305 if (n) { 1306 for (t = n; t; t = rb_prev(t)) { 1307 u = rb_entry(t, struct uprobe, rb_node); 1308 if (u->inode != inode || u->offset < min) 1309 break; 1310 list_add(&u->pending_list, head); 1311 get_uprobe(u); 1312 } 1313 for (t = n; (t = rb_next(t)); ) { 1314 u = rb_entry(t, struct uprobe, rb_node); 1315 if (u->inode != inode || u->offset > max) 1316 break; 1317 list_add(&u->pending_list, head); 1318 get_uprobe(u); 1319 } 1320 } 1321 spin_unlock(&uprobes_treelock); 1322 } 1323 1324 /* @vma contains reference counter, not the probed instruction. */ 1325 static int delayed_ref_ctr_inc(struct vm_area_struct *vma) 1326 { 1327 struct list_head *pos, *q; 1328 struct delayed_uprobe *du; 1329 unsigned long vaddr; 1330 int ret = 0, err = 0; 1331 1332 mutex_lock(&delayed_uprobe_lock); 1333 list_for_each_safe(pos, q, &delayed_uprobe_list) { 1334 du = list_entry(pos, struct delayed_uprobe, list); 1335 1336 if (du->mm != vma->vm_mm || 1337 !valid_ref_ctr_vma(du->uprobe, vma)) 1338 continue; 1339 1340 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); 1341 ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); 1342 if (ret) { 1343 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); 1344 if (!err) 1345 err = ret; 1346 } 1347 delayed_uprobe_delete(du); 1348 } 1349 mutex_unlock(&delayed_uprobe_lock); 1350 return err; 1351 } 1352 1353 /* 1354 * Called from mmap_region/vma_merge with mm->mmap_lock acquired. 1355 * 1356 * Currently we ignore all errors and always return 0, the callers 1357 * can't handle the failure anyway. 1358 */ 1359 int uprobe_mmap(struct vm_area_struct *vma) 1360 { 1361 struct list_head tmp_list; 1362 struct uprobe *uprobe, *u; 1363 struct inode *inode; 1364 1365 if (no_uprobe_events()) 1366 return 0; 1367 1368 if (vma->vm_file && 1369 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && 1370 test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) 1371 delayed_ref_ctr_inc(vma); 1372 1373 if (!valid_vma(vma, true)) 1374 return 0; 1375 1376 inode = file_inode(vma->vm_file); 1377 if (!inode) 1378 return 0; 1379 1380 mutex_lock(uprobes_mmap_hash(inode)); 1381 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); 1382 /* 1383 * We can race with uprobe_unregister(), this uprobe can be already 1384 * removed. But in this case filter_chain() must return false, all 1385 * consumers have gone away. 1386 */ 1387 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 1388 if (!fatal_signal_pending(current) && 1389 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { 1390 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); 1391 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); 1392 } 1393 put_uprobe(uprobe); 1394 } 1395 mutex_unlock(uprobes_mmap_hash(inode)); 1396 1397 return 0; 1398 } 1399 1400 static bool 1401 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1402 { 1403 loff_t min, max; 1404 struct inode *inode; 1405 struct rb_node *n; 1406 1407 inode = file_inode(vma->vm_file); 1408 1409 min = vaddr_to_offset(vma, start); 1410 max = min + (end - start) - 1; 1411 1412 spin_lock(&uprobes_treelock); 1413 n = find_node_in_range(inode, min, max); 1414 spin_unlock(&uprobes_treelock); 1415 1416 return !!n; 1417 } 1418 1419 /* 1420 * Called in context of a munmap of a vma. 1421 */ 1422 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1423 { 1424 if (no_uprobe_events() || !valid_vma(vma, false)) 1425 return; 1426 1427 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1428 return; 1429 1430 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || 1431 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) 1432 return; 1433 1434 if (vma_has_uprobes(vma, start, end)) 1435 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); 1436 } 1437 1438 /* Slot allocation for XOL */ 1439 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) 1440 { 1441 struct vm_area_struct *vma; 1442 int ret; 1443 1444 if (mmap_write_lock_killable(mm)) 1445 return -EINTR; 1446 1447 if (mm->uprobes_state.xol_area) { 1448 ret = -EALREADY; 1449 goto fail; 1450 } 1451 1452 if (!area->vaddr) { 1453 /* Try to map as high as possible, this is only a hint. */ 1454 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, 1455 PAGE_SIZE, 0, 0); 1456 if (IS_ERR_VALUE(area->vaddr)) { 1457 ret = area->vaddr; 1458 goto fail; 1459 } 1460 } 1461 1462 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, 1463 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, 1464 &area->xol_mapping); 1465 if (IS_ERR(vma)) { 1466 ret = PTR_ERR(vma); 1467 goto fail; 1468 } 1469 1470 ret = 0; 1471 /* pairs with get_xol_area() */ 1472 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ 1473 fail: 1474 mmap_write_unlock(mm); 1475 1476 return ret; 1477 } 1478 1479 static struct xol_area *__create_xol_area(unsigned long vaddr) 1480 { 1481 struct mm_struct *mm = current->mm; 1482 uprobe_opcode_t insn = UPROBE_SWBP_INSN; 1483 struct xol_area *area; 1484 1485 area = kmalloc(sizeof(*area), GFP_KERNEL); 1486 if (unlikely(!area)) 1487 goto out; 1488 1489 area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long), 1490 GFP_KERNEL); 1491 if (!area->bitmap) 1492 goto free_area; 1493 1494 area->xol_mapping.name = "[uprobes]"; 1495 area->xol_mapping.fault = NULL; 1496 area->xol_mapping.pages = area->pages; 1497 area->pages[0] = alloc_page(GFP_HIGHUSER); 1498 if (!area->pages[0]) 1499 goto free_bitmap; 1500 area->pages[1] = NULL; 1501 1502 area->vaddr = vaddr; 1503 init_waitqueue_head(&area->wq); 1504 /* Reserve the 1st slot for get_trampoline_vaddr() */ 1505 set_bit(0, area->bitmap); 1506 atomic_set(&area->slot_count, 1); 1507 arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); 1508 1509 if (!xol_add_vma(mm, area)) 1510 return area; 1511 1512 __free_page(area->pages[0]); 1513 free_bitmap: 1514 kfree(area->bitmap); 1515 free_area: 1516 kfree(area); 1517 out: 1518 return NULL; 1519 } 1520 1521 /* 1522 * get_xol_area - Allocate process's xol_area if necessary. 1523 * This area will be used for storing instructions for execution out of line. 1524 * 1525 * Returns the allocated area or NULL. 1526 */ 1527 static struct xol_area *get_xol_area(void) 1528 { 1529 struct mm_struct *mm = current->mm; 1530 struct xol_area *area; 1531 1532 if (!mm->uprobes_state.xol_area) 1533 __create_xol_area(0); 1534 1535 /* Pairs with xol_add_vma() smp_store_release() */ 1536 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ 1537 return area; 1538 } 1539 1540 /* 1541 * uprobe_clear_state - Free the area allocated for slots. 1542 */ 1543 void uprobe_clear_state(struct mm_struct *mm) 1544 { 1545 struct xol_area *area = mm->uprobes_state.xol_area; 1546 1547 mutex_lock(&delayed_uprobe_lock); 1548 delayed_uprobe_remove(NULL, mm); 1549 mutex_unlock(&delayed_uprobe_lock); 1550 1551 if (!area) 1552 return; 1553 1554 put_page(area->pages[0]); 1555 kfree(area->bitmap); 1556 kfree(area); 1557 } 1558 1559 void uprobe_start_dup_mmap(void) 1560 { 1561 percpu_down_read(&dup_mmap_sem); 1562 } 1563 1564 void uprobe_end_dup_mmap(void) 1565 { 1566 percpu_up_read(&dup_mmap_sem); 1567 } 1568 1569 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) 1570 { 1571 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { 1572 set_bit(MMF_HAS_UPROBES, &newmm->flags); 1573 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ 1574 set_bit(MMF_RECALC_UPROBES, &newmm->flags); 1575 } 1576 } 1577 1578 /* 1579 * - search for a free slot. 1580 */ 1581 static unsigned long xol_take_insn_slot(struct xol_area *area) 1582 { 1583 unsigned long slot_addr; 1584 int slot_nr; 1585 1586 do { 1587 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); 1588 if (slot_nr < UINSNS_PER_PAGE) { 1589 if (!test_and_set_bit(slot_nr, area->bitmap)) 1590 break; 1591 1592 slot_nr = UINSNS_PER_PAGE; 1593 continue; 1594 } 1595 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); 1596 } while (slot_nr >= UINSNS_PER_PAGE); 1597 1598 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); 1599 atomic_inc(&area->slot_count); 1600 1601 return slot_addr; 1602 } 1603 1604 /* 1605 * xol_get_insn_slot - allocate a slot for xol. 1606 * Returns the allocated slot address or 0. 1607 */ 1608 static unsigned long xol_get_insn_slot(struct uprobe *uprobe) 1609 { 1610 struct xol_area *area; 1611 unsigned long xol_vaddr; 1612 1613 area = get_xol_area(); 1614 if (!area) 1615 return 0; 1616 1617 xol_vaddr = xol_take_insn_slot(area); 1618 if (unlikely(!xol_vaddr)) 1619 return 0; 1620 1621 arch_uprobe_copy_ixol(area->pages[0], xol_vaddr, 1622 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); 1623 1624 return xol_vaddr; 1625 } 1626 1627 /* 1628 * xol_free_insn_slot - If slot was earlier allocated by 1629 * @xol_get_insn_slot(), make the slot available for 1630 * subsequent requests. 1631 */ 1632 static void xol_free_insn_slot(struct task_struct *tsk) 1633 { 1634 struct xol_area *area; 1635 unsigned long vma_end; 1636 unsigned long slot_addr; 1637 1638 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) 1639 return; 1640 1641 slot_addr = tsk->utask->xol_vaddr; 1642 if (unlikely(!slot_addr)) 1643 return; 1644 1645 area = tsk->mm->uprobes_state.xol_area; 1646 vma_end = area->vaddr + PAGE_SIZE; 1647 if (area->vaddr <= slot_addr && slot_addr < vma_end) { 1648 unsigned long offset; 1649 int slot_nr; 1650 1651 offset = slot_addr - area->vaddr; 1652 slot_nr = offset / UPROBE_XOL_SLOT_BYTES; 1653 if (slot_nr >= UINSNS_PER_PAGE) 1654 return; 1655 1656 clear_bit(slot_nr, area->bitmap); 1657 atomic_dec(&area->slot_count); 1658 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ 1659 if (waitqueue_active(&area->wq)) 1660 wake_up(&area->wq); 1661 1662 tsk->utask->xol_vaddr = 0; 1663 } 1664 } 1665 1666 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 1667 void *src, unsigned long len) 1668 { 1669 /* Initialize the slot */ 1670 copy_to_page(page, vaddr, src, len); 1671 1672 /* 1673 * We probably need flush_icache_user_page() but it needs vma. 1674 * This should work on most of architectures by default. If 1675 * architecture needs to do something different it can define 1676 * its own version of the function. 1677 */ 1678 flush_dcache_page(page); 1679 } 1680 1681 /** 1682 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 1683 * @regs: Reflects the saved state of the task after it has hit a breakpoint 1684 * instruction. 1685 * Return the address of the breakpoint instruction. 1686 */ 1687 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) 1688 { 1689 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; 1690 } 1691 1692 unsigned long uprobe_get_trap_addr(struct pt_regs *regs) 1693 { 1694 struct uprobe_task *utask = current->utask; 1695 1696 if (unlikely(utask && utask->active_uprobe)) 1697 return utask->vaddr; 1698 1699 return instruction_pointer(regs); 1700 } 1701 1702 static struct return_instance *free_ret_instance(struct return_instance *ri) 1703 { 1704 struct return_instance *next = ri->next; 1705 put_uprobe(ri->uprobe); 1706 kfree(ri); 1707 return next; 1708 } 1709 1710 /* 1711 * Called with no locks held. 1712 * Called in context of an exiting or an exec-ing thread. 1713 */ 1714 void uprobe_free_utask(struct task_struct *t) 1715 { 1716 struct uprobe_task *utask = t->utask; 1717 struct return_instance *ri; 1718 1719 if (!utask) 1720 return; 1721 1722 if (utask->active_uprobe) 1723 put_uprobe(utask->active_uprobe); 1724 1725 ri = utask->return_instances; 1726 while (ri) 1727 ri = free_ret_instance(ri); 1728 1729 xol_free_insn_slot(t); 1730 kfree(utask); 1731 t->utask = NULL; 1732 } 1733 1734 /* 1735 * Allocate a uprobe_task object for the task if necessary. 1736 * Called when the thread hits a breakpoint. 1737 * 1738 * Returns: 1739 * - pointer to new uprobe_task on success 1740 * - NULL otherwise 1741 */ 1742 static struct uprobe_task *get_utask(void) 1743 { 1744 if (!current->utask) 1745 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 1746 return current->utask; 1747 } 1748 1749 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) 1750 { 1751 struct uprobe_task *n_utask; 1752 struct return_instance **p, *o, *n; 1753 1754 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 1755 if (!n_utask) 1756 return -ENOMEM; 1757 t->utask = n_utask; 1758 1759 p = &n_utask->return_instances; 1760 for (o = o_utask->return_instances; o; o = o->next) { 1761 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 1762 if (!n) 1763 return -ENOMEM; 1764 1765 *n = *o; 1766 get_uprobe(n->uprobe); 1767 n->next = NULL; 1768 1769 *p = n; 1770 p = &n->next; 1771 n_utask->depth++; 1772 } 1773 1774 return 0; 1775 } 1776 1777 static void uprobe_warn(struct task_struct *t, const char *msg) 1778 { 1779 pr_warn("uprobe: %s:%d failed to %s\n", 1780 current->comm, current->pid, msg); 1781 } 1782 1783 static void dup_xol_work(struct callback_head *work) 1784 { 1785 if (current->flags & PF_EXITING) 1786 return; 1787 1788 if (!__create_xol_area(current->utask->dup_xol_addr) && 1789 !fatal_signal_pending(current)) 1790 uprobe_warn(current, "dup xol area"); 1791 } 1792 1793 /* 1794 * Called in context of a new clone/fork from copy_process. 1795 */ 1796 void uprobe_copy_process(struct task_struct *t, unsigned long flags) 1797 { 1798 struct uprobe_task *utask = current->utask; 1799 struct mm_struct *mm = current->mm; 1800 struct xol_area *area; 1801 1802 t->utask = NULL; 1803 1804 if (!utask || !utask->return_instances) 1805 return; 1806 1807 if (mm == t->mm && !(flags & CLONE_VFORK)) 1808 return; 1809 1810 if (dup_utask(t, utask)) 1811 return uprobe_warn(t, "dup ret instances"); 1812 1813 /* The task can fork() after dup_xol_work() fails */ 1814 area = mm->uprobes_state.xol_area; 1815 if (!area) 1816 return uprobe_warn(t, "dup xol area"); 1817 1818 if (mm == t->mm) 1819 return; 1820 1821 t->utask->dup_xol_addr = area->vaddr; 1822 init_task_work(&t->utask->dup_xol_work, dup_xol_work); 1823 task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME); 1824 } 1825 1826 /* 1827 * Current area->vaddr notion assume the trampoline address is always 1828 * equal area->vaddr. 1829 * 1830 * Returns -1 in case the xol_area is not allocated. 1831 */ 1832 static unsigned long get_trampoline_vaddr(void) 1833 { 1834 struct xol_area *area; 1835 unsigned long trampoline_vaddr = -1; 1836 1837 /* Pairs with xol_add_vma() smp_store_release() */ 1838 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ 1839 if (area) 1840 trampoline_vaddr = area->vaddr; 1841 1842 return trampoline_vaddr; 1843 } 1844 1845 static void cleanup_return_instances(struct uprobe_task *utask, bool chained, 1846 struct pt_regs *regs) 1847 { 1848 struct return_instance *ri = utask->return_instances; 1849 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; 1850 1851 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { 1852 ri = free_ret_instance(ri); 1853 utask->depth--; 1854 } 1855 utask->return_instances = ri; 1856 } 1857 1858 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) 1859 { 1860 struct return_instance *ri; 1861 struct uprobe_task *utask; 1862 unsigned long orig_ret_vaddr, trampoline_vaddr; 1863 bool chained; 1864 1865 if (!get_xol_area()) 1866 return; 1867 1868 utask = get_utask(); 1869 if (!utask) 1870 return; 1871 1872 if (utask->depth >= MAX_URETPROBE_DEPTH) { 1873 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" 1874 " nestedness limit pid/tgid=%d/%d\n", 1875 current->pid, current->tgid); 1876 return; 1877 } 1878 1879 ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 1880 if (!ri) 1881 return; 1882 1883 trampoline_vaddr = get_trampoline_vaddr(); 1884 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); 1885 if (orig_ret_vaddr == -1) 1886 goto fail; 1887 1888 /* drop the entries invalidated by longjmp() */ 1889 chained = (orig_ret_vaddr == trampoline_vaddr); 1890 cleanup_return_instances(utask, chained, regs); 1891 1892 /* 1893 * We don't want to keep trampoline address in stack, rather keep the 1894 * original return address of first caller thru all the consequent 1895 * instances. This also makes breakpoint unwrapping easier. 1896 */ 1897 if (chained) { 1898 if (!utask->return_instances) { 1899 /* 1900 * This situation is not possible. Likely we have an 1901 * attack from user-space. 1902 */ 1903 uprobe_warn(current, "handle tail call"); 1904 goto fail; 1905 } 1906 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; 1907 } 1908 1909 ri->uprobe = get_uprobe(uprobe); 1910 ri->func = instruction_pointer(regs); 1911 ri->stack = user_stack_pointer(regs); 1912 ri->orig_ret_vaddr = orig_ret_vaddr; 1913 ri->chained = chained; 1914 1915 utask->depth++; 1916 ri->next = utask->return_instances; 1917 utask->return_instances = ri; 1918 1919 return; 1920 fail: 1921 kfree(ri); 1922 } 1923 1924 /* Prepare to single-step probed instruction out of line. */ 1925 static int 1926 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) 1927 { 1928 struct uprobe_task *utask; 1929 unsigned long xol_vaddr; 1930 int err; 1931 1932 utask = get_utask(); 1933 if (!utask) 1934 return -ENOMEM; 1935 1936 xol_vaddr = xol_get_insn_slot(uprobe); 1937 if (!xol_vaddr) 1938 return -ENOMEM; 1939 1940 utask->xol_vaddr = xol_vaddr; 1941 utask->vaddr = bp_vaddr; 1942 1943 err = arch_uprobe_pre_xol(&uprobe->arch, regs); 1944 if (unlikely(err)) { 1945 xol_free_insn_slot(current); 1946 return err; 1947 } 1948 1949 utask->active_uprobe = uprobe; 1950 utask->state = UTASK_SSTEP; 1951 return 0; 1952 } 1953 1954 /* 1955 * If we are singlestepping, then ensure this thread is not connected to 1956 * non-fatal signals until completion of singlestep. When xol insn itself 1957 * triggers the signal, restart the original insn even if the task is 1958 * already SIGKILL'ed (since coredump should report the correct ip). This 1959 * is even more important if the task has a handler for SIGSEGV/etc, The 1960 * _same_ instruction should be repeated again after return from the signal 1961 * handler, and SSTEP can never finish in this case. 1962 */ 1963 bool uprobe_deny_signal(void) 1964 { 1965 struct task_struct *t = current; 1966 struct uprobe_task *utask = t->utask; 1967 1968 if (likely(!utask || !utask->active_uprobe)) 1969 return false; 1970 1971 WARN_ON_ONCE(utask->state != UTASK_SSTEP); 1972 1973 if (task_sigpending(t)) { 1974 spin_lock_irq(&t->sighand->siglock); 1975 clear_tsk_thread_flag(t, TIF_SIGPENDING); 1976 spin_unlock_irq(&t->sighand->siglock); 1977 1978 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 1979 utask->state = UTASK_SSTEP_TRAPPED; 1980 set_tsk_thread_flag(t, TIF_UPROBE); 1981 } 1982 } 1983 1984 return true; 1985 } 1986 1987 static void mmf_recalc_uprobes(struct mm_struct *mm) 1988 { 1989 VMA_ITERATOR(vmi, mm, 0); 1990 struct vm_area_struct *vma; 1991 1992 for_each_vma(vmi, vma) { 1993 if (!valid_vma(vma, false)) 1994 continue; 1995 /* 1996 * This is not strictly accurate, we can race with 1997 * uprobe_unregister() and see the already removed 1998 * uprobe if delete_uprobe() was not yet called. 1999 * Or this uprobe can be filtered out. 2000 */ 2001 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) 2002 return; 2003 } 2004 2005 clear_bit(MMF_HAS_UPROBES, &mm->flags); 2006 } 2007 2008 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) 2009 { 2010 struct page *page; 2011 uprobe_opcode_t opcode; 2012 int result; 2013 2014 if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) 2015 return -EINVAL; 2016 2017 pagefault_disable(); 2018 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); 2019 pagefault_enable(); 2020 2021 if (likely(result == 0)) 2022 goto out; 2023 2024 /* 2025 * The NULL 'tsk' here ensures that any faults that occur here 2026 * will not be accounted to the task. 'mm' *is* current->mm, 2027 * but we treat this as a 'remote' access since it is 2028 * essentially a kernel access to the memory. 2029 */ 2030 result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, 2031 NULL, NULL); 2032 if (result < 0) 2033 return result; 2034 2035 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 2036 put_page(page); 2037 out: 2038 /* This needs to return true for any variant of the trap insn */ 2039 return is_trap_insn(&opcode); 2040 } 2041 2042 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) 2043 { 2044 struct mm_struct *mm = current->mm; 2045 struct uprobe *uprobe = NULL; 2046 struct vm_area_struct *vma; 2047 2048 mmap_read_lock(mm); 2049 vma = vma_lookup(mm, bp_vaddr); 2050 if (vma) { 2051 if (valid_vma(vma, false)) { 2052 struct inode *inode = file_inode(vma->vm_file); 2053 loff_t offset = vaddr_to_offset(vma, bp_vaddr); 2054 2055 uprobe = find_uprobe(inode, offset); 2056 } 2057 2058 if (!uprobe) 2059 *is_swbp = is_trap_at_addr(mm, bp_vaddr); 2060 } else { 2061 *is_swbp = -EFAULT; 2062 } 2063 2064 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) 2065 mmf_recalc_uprobes(mm); 2066 mmap_read_unlock(mm); 2067 2068 return uprobe; 2069 } 2070 2071 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) 2072 { 2073 struct uprobe_consumer *uc; 2074 int remove = UPROBE_HANDLER_REMOVE; 2075 bool need_prep = false; /* prepare return uprobe, when needed */ 2076 2077 down_read(&uprobe->register_rwsem); 2078 for (uc = uprobe->consumers; uc; uc = uc->next) { 2079 int rc = 0; 2080 2081 if (uc->handler) { 2082 rc = uc->handler(uc, regs); 2083 WARN(rc & ~UPROBE_HANDLER_MASK, 2084 "bad rc=0x%x from %ps()\n", rc, uc->handler); 2085 } 2086 2087 if (uc->ret_handler) 2088 need_prep = true; 2089 2090 remove &= rc; 2091 } 2092 2093 if (need_prep && !remove) 2094 prepare_uretprobe(uprobe, regs); /* put bp at return */ 2095 2096 if (remove && uprobe->consumers) { 2097 WARN_ON(!uprobe_is_active(uprobe)); 2098 unapply_uprobe(uprobe, current->mm); 2099 } 2100 up_read(&uprobe->register_rwsem); 2101 } 2102 2103 static void 2104 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) 2105 { 2106 struct uprobe *uprobe = ri->uprobe; 2107 struct uprobe_consumer *uc; 2108 2109 down_read(&uprobe->register_rwsem); 2110 for (uc = uprobe->consumers; uc; uc = uc->next) { 2111 if (uc->ret_handler) 2112 uc->ret_handler(uc, ri->func, regs); 2113 } 2114 up_read(&uprobe->register_rwsem); 2115 } 2116 2117 static struct return_instance *find_next_ret_chain(struct return_instance *ri) 2118 { 2119 bool chained; 2120 2121 do { 2122 chained = ri->chained; 2123 ri = ri->next; /* can't be NULL if chained */ 2124 } while (chained); 2125 2126 return ri; 2127 } 2128 2129 static void handle_trampoline(struct pt_regs *regs) 2130 { 2131 struct uprobe_task *utask; 2132 struct return_instance *ri, *next; 2133 bool valid; 2134 2135 utask = current->utask; 2136 if (!utask) 2137 goto sigill; 2138 2139 ri = utask->return_instances; 2140 if (!ri) 2141 goto sigill; 2142 2143 do { 2144 /* 2145 * We should throw out the frames invalidated by longjmp(). 2146 * If this chain is valid, then the next one should be alive 2147 * or NULL; the latter case means that nobody but ri->func 2148 * could hit this trampoline on return. TODO: sigaltstack(). 2149 */ 2150 next = find_next_ret_chain(ri); 2151 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs); 2152 2153 instruction_pointer_set(regs, ri->orig_ret_vaddr); 2154 do { 2155 if (valid) 2156 handle_uretprobe_chain(ri, regs); 2157 ri = free_ret_instance(ri); 2158 utask->depth--; 2159 } while (ri != next); 2160 } while (!valid); 2161 2162 utask->return_instances = ri; 2163 return; 2164 2165 sigill: 2166 uprobe_warn(current, "handle uretprobe, sending SIGILL."); 2167 force_sig(SIGILL); 2168 2169 } 2170 2171 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) 2172 { 2173 return false; 2174 } 2175 2176 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, 2177 struct pt_regs *regs) 2178 { 2179 return true; 2180 } 2181 2182 /* 2183 * Run handler and ask thread to singlestep. 2184 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 2185 */ 2186 static void handle_swbp(struct pt_regs *regs) 2187 { 2188 struct uprobe *uprobe; 2189 unsigned long bp_vaddr; 2190 int is_swbp; 2191 2192 bp_vaddr = uprobe_get_swbp_addr(regs); 2193 if (bp_vaddr == get_trampoline_vaddr()) 2194 return handle_trampoline(regs); 2195 2196 uprobe = find_active_uprobe(bp_vaddr, &is_swbp); 2197 if (!uprobe) { 2198 if (is_swbp > 0) { 2199 /* No matching uprobe; signal SIGTRAP. */ 2200 force_sig(SIGTRAP); 2201 } else { 2202 /* 2203 * Either we raced with uprobe_unregister() or we can't 2204 * access this memory. The latter is only possible if 2205 * another thread plays with our ->mm. In both cases 2206 * we can simply restart. If this vma was unmapped we 2207 * can pretend this insn was not executed yet and get 2208 * the (correct) SIGSEGV after restart. 2209 */ 2210 instruction_pointer_set(regs, bp_vaddr); 2211 } 2212 return; 2213 } 2214 2215 /* change it in advance for ->handler() and restart */ 2216 instruction_pointer_set(regs, bp_vaddr); 2217 2218 /* 2219 * TODO: move copy_insn/etc into _register and remove this hack. 2220 * After we hit the bp, _unregister + _register can install the 2221 * new and not-yet-analyzed uprobe at the same address, restart. 2222 */ 2223 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) 2224 goto out; 2225 2226 /* 2227 * Pairs with the smp_wmb() in prepare_uprobe(). 2228 * 2229 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then 2230 * we must also see the stores to &uprobe->arch performed by the 2231 * prepare_uprobe() call. 2232 */ 2233 smp_rmb(); 2234 2235 /* Tracing handlers use ->utask to communicate with fetch methods */ 2236 if (!get_utask()) 2237 goto out; 2238 2239 if (arch_uprobe_ignore(&uprobe->arch, regs)) 2240 goto out; 2241 2242 handler_chain(uprobe, regs); 2243 2244 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 2245 goto out; 2246 2247 if (!pre_ssout(uprobe, regs, bp_vaddr)) 2248 return; 2249 2250 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ 2251 out: 2252 put_uprobe(uprobe); 2253 } 2254 2255 /* 2256 * Perform required fix-ups and disable singlestep. 2257 * Allow pending signals to take effect. 2258 */ 2259 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) 2260 { 2261 struct uprobe *uprobe; 2262 int err = 0; 2263 2264 uprobe = utask->active_uprobe; 2265 if (utask->state == UTASK_SSTEP_ACK) 2266 err = arch_uprobe_post_xol(&uprobe->arch, regs); 2267 else if (utask->state == UTASK_SSTEP_TRAPPED) 2268 arch_uprobe_abort_xol(&uprobe->arch, regs); 2269 else 2270 WARN_ON_ONCE(1); 2271 2272 put_uprobe(uprobe); 2273 utask->active_uprobe = NULL; 2274 utask->state = UTASK_RUNNING; 2275 xol_free_insn_slot(current); 2276 2277 spin_lock_irq(¤t->sighand->siglock); 2278 recalc_sigpending(); /* see uprobe_deny_signal() */ 2279 spin_unlock_irq(¤t->sighand->siglock); 2280 2281 if (unlikely(err)) { 2282 uprobe_warn(current, "execute the probed insn, sending SIGILL."); 2283 force_sig(SIGILL); 2284 } 2285 } 2286 2287 /* 2288 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and 2289 * allows the thread to return from interrupt. After that handle_swbp() 2290 * sets utask->active_uprobe. 2291 * 2292 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag 2293 * and allows the thread to return from interrupt. 2294 * 2295 * While returning to userspace, thread notices the TIF_UPROBE flag and calls 2296 * uprobe_notify_resume(). 2297 */ 2298 void uprobe_notify_resume(struct pt_regs *regs) 2299 { 2300 struct uprobe_task *utask; 2301 2302 clear_thread_flag(TIF_UPROBE); 2303 2304 utask = current->utask; 2305 if (utask && utask->active_uprobe) 2306 handle_singlestep(utask, regs); 2307 else 2308 handle_swbp(regs); 2309 } 2310 2311 /* 2312 * uprobe_pre_sstep_notifier gets called from interrupt context as part of 2313 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. 2314 */ 2315 int uprobe_pre_sstep_notifier(struct pt_regs *regs) 2316 { 2317 if (!current->mm) 2318 return 0; 2319 2320 if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && 2321 (!current->utask || !current->utask->return_instances)) 2322 return 0; 2323 2324 set_thread_flag(TIF_UPROBE); 2325 return 1; 2326 } 2327 2328 /* 2329 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier 2330 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. 2331 */ 2332 int uprobe_post_sstep_notifier(struct pt_regs *regs) 2333 { 2334 struct uprobe_task *utask = current->utask; 2335 2336 if (!current->mm || !utask || !utask->active_uprobe) 2337 /* task is currently not uprobed */ 2338 return 0; 2339 2340 utask->state = UTASK_SSTEP_ACK; 2341 set_thread_flag(TIF_UPROBE); 2342 return 1; 2343 } 2344 2345 static struct notifier_block uprobe_exception_nb = { 2346 .notifier_call = arch_uprobe_exception_notify, 2347 .priority = INT_MAX-1, /* notified after kprobes, kgdb */ 2348 }; 2349 2350 void __init uprobes_init(void) 2351 { 2352 int i; 2353 2354 for (i = 0; i < UPROBES_HASH_SZ; i++) 2355 mutex_init(&uprobes_mmap_mutex[i]); 2356 2357 BUG_ON(register_die_notifier(&uprobe_exception_nb)); 2358 } 2359