1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * User-space Probes (UProbes) 4 * 5 * Copyright (C) IBM Corporation, 2008-2012 6 * Authors: 7 * Srikar Dronamraju 8 * Jim Keniston 9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/highmem.h> 14 #include <linux/pagemap.h> /* read_mapping_page */ 15 #include <linux/slab.h> 16 #include <linux/sched.h> 17 #include <linux/sched/mm.h> 18 #include <linux/export.h> 19 #include <linux/rmap.h> /* anon_vma_prepare */ 20 #include <linux/mmu_notifier.h> 21 #include <linux/swap.h> /* folio_free_swap */ 22 #include <linux/ptrace.h> /* user_enable_single_step */ 23 #include <linux/kdebug.h> /* notifier mechanism */ 24 #include <linux/percpu-rwsem.h> 25 #include <linux/task_work.h> 26 #include <linux/shmem_fs.h> 27 #include <linux/khugepaged.h> 28 29 #include <linux/uprobes.h> 30 31 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) 32 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 33 34 static struct rb_root uprobes_tree = RB_ROOT; 35 /* 36 * allows us to skip the uprobe_mmap if there are no uprobe events active 37 * at this time. Probably a fine grained per inode count is better? 38 */ 39 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) 40 41 static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */ 42 static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock); 43 44 DEFINE_STATIC_SRCU(uprobes_srcu); 45 46 #define UPROBES_HASH_SZ 13 47 /* serialize uprobe->pending_list */ 48 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 49 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 50 51 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); 52 53 /* Have a copy of original instruction */ 54 #define UPROBE_COPY_INSN 0 55 56 struct uprobe { 57 struct rb_node rb_node; /* node in the rb tree */ 58 refcount_t ref; 59 struct rw_semaphore register_rwsem; 60 struct rw_semaphore consumer_rwsem; 61 struct list_head pending_list; 62 struct list_head consumers; 63 struct inode *inode; /* Also hold a ref to inode */ 64 struct rcu_head rcu; 65 loff_t offset; 66 loff_t ref_ctr_offset; 67 unsigned long flags; 68 69 /* 70 * The generic code assumes that it has two members of unknown type 71 * owned by the arch-specific code: 72 * 73 * insn - copy_insn() saves the original instruction here for 74 * arch_uprobe_analyze_insn(). 75 * 76 * ixol - potentially modified instruction to execute out of 77 * line, copied to xol_area by xol_get_insn_slot(). 78 */ 79 struct arch_uprobe arch; 80 }; 81 82 struct delayed_uprobe { 83 struct list_head list; 84 struct uprobe *uprobe; 85 struct mm_struct *mm; 86 }; 87 88 static DEFINE_MUTEX(delayed_uprobe_lock); 89 static LIST_HEAD(delayed_uprobe_list); 90 91 /* 92 * Execute out of line area: anonymous executable mapping installed 93 * by the probed task to execute the copy of the original instruction 94 * mangled by set_swbp(). 95 * 96 * On a breakpoint hit, thread contests for a slot. It frees the 97 * slot after singlestep. Currently a fixed number of slots are 98 * allocated. 99 */ 100 struct xol_area { 101 wait_queue_head_t wq; /* if all slots are busy */ 102 atomic_t slot_count; /* number of in-use slots */ 103 unsigned long *bitmap; /* 0 = free slot */ 104 105 struct page *page; 106 /* 107 * We keep the vma's vm_start rather than a pointer to the vma 108 * itself. The probed process or a naughty kernel module could make 109 * the vma go away, and we must handle that reasonably gracefully. 110 */ 111 unsigned long vaddr; /* Page(s) of instruction slots */ 112 }; 113 114 static void uprobe_warn(struct task_struct *t, const char *msg) 115 { 116 pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg); 117 } 118 119 /* 120 * valid_vma: Verify if the specified vma is an executable vma 121 * Relax restrictions while unregistering: vm_flags might have 122 * changed after breakpoint was inserted. 123 * - is_register: indicates if we are in register context. 124 * - Return 1 if the specified virtual address is in an 125 * executable vma. 126 */ 127 static bool valid_vma(struct vm_area_struct *vma, bool is_register) 128 { 129 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; 130 131 if (is_register) 132 flags |= VM_WRITE; 133 134 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; 135 } 136 137 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 138 { 139 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 140 } 141 142 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) 143 { 144 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); 145 } 146 147 /** 148 * __replace_page - replace page in vma by new page. 149 * based on replace_page in mm/ksm.c 150 * 151 * @vma: vma that holds the pte pointing to page 152 * @addr: address the old @page is mapped at 153 * @old_page: the page we are replacing by new_page 154 * @new_page: the modified page we replace page by 155 * 156 * If @new_page is NULL, only unmap @old_page. 157 * 158 * Returns 0 on success, negative error code otherwise. 159 */ 160 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 161 struct page *old_page, struct page *new_page) 162 { 163 struct folio *old_folio = page_folio(old_page); 164 struct folio *new_folio; 165 struct mm_struct *mm = vma->vm_mm; 166 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); 167 int err; 168 struct mmu_notifier_range range; 169 170 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, 171 addr + PAGE_SIZE); 172 173 if (new_page) { 174 new_folio = page_folio(new_page); 175 err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL); 176 if (err) 177 return err; 178 } 179 180 /* For folio_free_swap() below */ 181 folio_lock(old_folio); 182 183 mmu_notifier_invalidate_range_start(&range); 184 err = -EAGAIN; 185 if (!page_vma_mapped_walk(&pvmw)) 186 goto unlock; 187 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); 188 189 if (new_page) { 190 folio_get(new_folio); 191 folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE); 192 folio_add_lru_vma(new_folio, vma); 193 } else 194 /* no new page, just dec_mm_counter for old_page */ 195 dec_mm_counter(mm, MM_ANONPAGES); 196 197 if (!folio_test_anon(old_folio)) { 198 dec_mm_counter(mm, mm_counter_file(old_folio)); 199 inc_mm_counter(mm, MM_ANONPAGES); 200 } 201 202 flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); 203 ptep_clear_flush(vma, addr, pvmw.pte); 204 if (new_page) 205 set_pte_at(mm, addr, pvmw.pte, 206 mk_pte(new_page, vma->vm_page_prot)); 207 208 folio_remove_rmap_pte(old_folio, old_page, vma); 209 if (!folio_mapped(old_folio)) 210 folio_free_swap(old_folio); 211 page_vma_mapped_walk_done(&pvmw); 212 folio_put(old_folio); 213 214 err = 0; 215 unlock: 216 mmu_notifier_invalidate_range_end(&range); 217 folio_unlock(old_folio); 218 return err; 219 } 220 221 /** 222 * is_swbp_insn - check if instruction is breakpoint instruction. 223 * @insn: instruction to be checked. 224 * Default implementation of is_swbp_insn 225 * Returns true if @insn is a breakpoint instruction. 226 */ 227 bool __weak is_swbp_insn(uprobe_opcode_t *insn) 228 { 229 return *insn == UPROBE_SWBP_INSN; 230 } 231 232 /** 233 * is_trap_insn - check if instruction is breakpoint instruction. 234 * @insn: instruction to be checked. 235 * Default implementation of is_trap_insn 236 * Returns true if @insn is a breakpoint instruction. 237 * 238 * This function is needed for the case where an architecture has multiple 239 * trap instructions (like powerpc). 240 */ 241 bool __weak is_trap_insn(uprobe_opcode_t *insn) 242 { 243 return is_swbp_insn(insn); 244 } 245 246 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) 247 { 248 void *kaddr = kmap_atomic(page); 249 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); 250 kunmap_atomic(kaddr); 251 } 252 253 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) 254 { 255 void *kaddr = kmap_atomic(page); 256 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); 257 kunmap_atomic(kaddr); 258 } 259 260 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) 261 { 262 uprobe_opcode_t old_opcode; 263 bool is_swbp; 264 265 /* 266 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. 267 * We do not check if it is any other 'trap variant' which could 268 * be conditional trap instruction such as the one powerpc supports. 269 * 270 * The logic is that we do not care if the underlying instruction 271 * is a trap variant; uprobes always wins over any other (gdb) 272 * breakpoint. 273 */ 274 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); 275 is_swbp = is_swbp_insn(&old_opcode); 276 277 if (is_swbp_insn(new_opcode)) { 278 if (is_swbp) /* register: already installed? */ 279 return 0; 280 } else { 281 if (!is_swbp) /* unregister: was it changed by us? */ 282 return 0; 283 } 284 285 return 1; 286 } 287 288 static struct delayed_uprobe * 289 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) 290 { 291 struct delayed_uprobe *du; 292 293 list_for_each_entry(du, &delayed_uprobe_list, list) 294 if (du->uprobe == uprobe && du->mm == mm) 295 return du; 296 return NULL; 297 } 298 299 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) 300 { 301 struct delayed_uprobe *du; 302 303 if (delayed_uprobe_check(uprobe, mm)) 304 return 0; 305 306 du = kzalloc(sizeof(*du), GFP_KERNEL); 307 if (!du) 308 return -ENOMEM; 309 310 du->uprobe = uprobe; 311 du->mm = mm; 312 list_add(&du->list, &delayed_uprobe_list); 313 return 0; 314 } 315 316 static void delayed_uprobe_delete(struct delayed_uprobe *du) 317 { 318 if (WARN_ON(!du)) 319 return; 320 list_del(&du->list); 321 kfree(du); 322 } 323 324 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) 325 { 326 struct list_head *pos, *q; 327 struct delayed_uprobe *du; 328 329 if (!uprobe && !mm) 330 return; 331 332 list_for_each_safe(pos, q, &delayed_uprobe_list) { 333 du = list_entry(pos, struct delayed_uprobe, list); 334 335 if (uprobe && du->uprobe != uprobe) 336 continue; 337 if (mm && du->mm != mm) 338 continue; 339 340 delayed_uprobe_delete(du); 341 } 342 } 343 344 static bool valid_ref_ctr_vma(struct uprobe *uprobe, 345 struct vm_area_struct *vma) 346 { 347 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); 348 349 return uprobe->ref_ctr_offset && 350 vma->vm_file && 351 file_inode(vma->vm_file) == uprobe->inode && 352 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && 353 vma->vm_start <= vaddr && 354 vma->vm_end > vaddr; 355 } 356 357 static struct vm_area_struct * 358 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) 359 { 360 VMA_ITERATOR(vmi, mm, 0); 361 struct vm_area_struct *tmp; 362 363 for_each_vma(vmi, tmp) 364 if (valid_ref_ctr_vma(uprobe, tmp)) 365 return tmp; 366 367 return NULL; 368 } 369 370 static int 371 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) 372 { 373 void *kaddr; 374 struct page *page; 375 int ret; 376 short *ptr; 377 378 if (!vaddr || !d) 379 return -EINVAL; 380 381 ret = get_user_pages_remote(mm, vaddr, 1, 382 FOLL_WRITE, &page, NULL); 383 if (unlikely(ret <= 0)) { 384 /* 385 * We are asking for 1 page. If get_user_pages_remote() fails, 386 * it may return 0, in that case we have to return error. 387 */ 388 return ret == 0 ? -EBUSY : ret; 389 } 390 391 kaddr = kmap_atomic(page); 392 ptr = kaddr + (vaddr & ~PAGE_MASK); 393 394 if (unlikely(*ptr + d < 0)) { 395 pr_warn("ref_ctr going negative. vaddr: 0x%lx, " 396 "curr val: %d, delta: %d\n", vaddr, *ptr, d); 397 ret = -EINVAL; 398 goto out; 399 } 400 401 *ptr += d; 402 ret = 0; 403 out: 404 kunmap_atomic(kaddr); 405 put_page(page); 406 return ret; 407 } 408 409 static void update_ref_ctr_warn(struct uprobe *uprobe, 410 struct mm_struct *mm, short d) 411 { 412 pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " 413 "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n", 414 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, 415 (unsigned long long) uprobe->offset, 416 (unsigned long long) uprobe->ref_ctr_offset, mm); 417 } 418 419 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, 420 short d) 421 { 422 struct vm_area_struct *rc_vma; 423 unsigned long rc_vaddr; 424 int ret = 0; 425 426 rc_vma = find_ref_ctr_vma(uprobe, mm); 427 428 if (rc_vma) { 429 rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); 430 ret = __update_ref_ctr(mm, rc_vaddr, d); 431 if (ret) 432 update_ref_ctr_warn(uprobe, mm, d); 433 434 if (d > 0) 435 return ret; 436 } 437 438 mutex_lock(&delayed_uprobe_lock); 439 if (d > 0) 440 ret = delayed_uprobe_add(uprobe, mm); 441 else 442 delayed_uprobe_remove(uprobe, mm); 443 mutex_unlock(&delayed_uprobe_lock); 444 445 return ret; 446 } 447 448 /* 449 * NOTE: 450 * Expect the breakpoint instruction to be the smallest size instruction for 451 * the architecture. If an arch has variable length instruction and the 452 * breakpoint instruction is not of the smallest length instruction 453 * supported by that architecture then we need to modify is_trap_at_addr and 454 * uprobe_write_opcode accordingly. This would never be a problem for archs 455 * that have fixed length instructions. 456 * 457 * uprobe_write_opcode - write the opcode at a given virtual address. 458 * @auprobe: arch specific probepoint information. 459 * @mm: the probed process address space. 460 * @vaddr: the virtual address to store the opcode. 461 * @opcode: opcode to be written at @vaddr. 462 * 463 * Called with mm->mmap_lock held for read or write. 464 * Return 0 (success) or a negative errno. 465 */ 466 int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, 467 unsigned long vaddr, uprobe_opcode_t opcode) 468 { 469 struct uprobe *uprobe; 470 struct page *old_page, *new_page; 471 struct vm_area_struct *vma; 472 int ret, is_register, ref_ctr_updated = 0; 473 bool orig_page_huge = false; 474 unsigned int gup_flags = FOLL_FORCE; 475 476 is_register = is_swbp_insn(&opcode); 477 uprobe = container_of(auprobe, struct uprobe, arch); 478 479 retry: 480 if (is_register) 481 gup_flags |= FOLL_SPLIT_PMD; 482 /* Read the page with vaddr into memory */ 483 old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma); 484 if (IS_ERR(old_page)) 485 return PTR_ERR(old_page); 486 487 ret = verify_opcode(old_page, vaddr, &opcode); 488 if (ret <= 0) 489 goto put_old; 490 491 if (WARN(!is_register && PageCompound(old_page), 492 "uprobe unregister should never work on compound page\n")) { 493 ret = -EINVAL; 494 goto put_old; 495 } 496 497 /* We are going to replace instruction, update ref_ctr. */ 498 if (!ref_ctr_updated && uprobe->ref_ctr_offset) { 499 ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); 500 if (ret) 501 goto put_old; 502 503 ref_ctr_updated = 1; 504 } 505 506 ret = 0; 507 if (!is_register && !PageAnon(old_page)) 508 goto put_old; 509 510 ret = anon_vma_prepare(vma); 511 if (ret) 512 goto put_old; 513 514 ret = -ENOMEM; 515 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 516 if (!new_page) 517 goto put_old; 518 519 __SetPageUptodate(new_page); 520 copy_highpage(new_page, old_page); 521 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 522 523 if (!is_register) { 524 struct page *orig_page; 525 pgoff_t index; 526 527 VM_BUG_ON_PAGE(!PageAnon(old_page), old_page); 528 529 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT; 530 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping, 531 index); 532 533 if (orig_page) { 534 if (PageUptodate(orig_page) && 535 pages_identical(new_page, orig_page)) { 536 /* let go new_page */ 537 put_page(new_page); 538 new_page = NULL; 539 540 if (PageCompound(orig_page)) 541 orig_page_huge = true; 542 } 543 put_page(orig_page); 544 } 545 } 546 547 ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page); 548 if (new_page) 549 put_page(new_page); 550 put_old: 551 put_page(old_page); 552 553 if (unlikely(ret == -EAGAIN)) 554 goto retry; 555 556 /* Revert back reference counter if instruction update failed. */ 557 if (ret && is_register && ref_ctr_updated) 558 update_ref_ctr(uprobe, mm, -1); 559 560 /* try collapse pmd for compound page */ 561 if (!ret && orig_page_huge) 562 collapse_pte_mapped_thp(mm, vaddr, false); 563 564 return ret; 565 } 566 567 /** 568 * set_swbp - store breakpoint at a given address. 569 * @auprobe: arch specific probepoint information. 570 * @mm: the probed process address space. 571 * @vaddr: the virtual address to insert the opcode. 572 * 573 * For mm @mm, store the breakpoint instruction at @vaddr. 574 * Return 0 (success) or a negative errno. 575 */ 576 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 577 { 578 return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); 579 } 580 581 /** 582 * set_orig_insn - Restore the original instruction. 583 * @mm: the probed process address space. 584 * @auprobe: arch specific probepoint information. 585 * @vaddr: the virtual address to insert the opcode. 586 * 587 * For mm @mm, restore the original opcode (opcode) at @vaddr. 588 * Return 0 (success) or a negative errno. 589 */ 590 int __weak 591 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 592 { 593 return uprobe_write_opcode(auprobe, mm, vaddr, 594 *(uprobe_opcode_t *)&auprobe->insn); 595 } 596 597 /* uprobe should have guaranteed positive refcount */ 598 static struct uprobe *get_uprobe(struct uprobe *uprobe) 599 { 600 refcount_inc(&uprobe->ref); 601 return uprobe; 602 } 603 604 /* 605 * uprobe should have guaranteed lifetime, which can be either of: 606 * - caller already has refcount taken (and wants an extra one); 607 * - uprobe is RCU protected and won't be freed until after grace period; 608 * - we are holding uprobes_treelock (for read or write, doesn't matter). 609 */ 610 static struct uprobe *try_get_uprobe(struct uprobe *uprobe) 611 { 612 if (refcount_inc_not_zero(&uprobe->ref)) 613 return uprobe; 614 return NULL; 615 } 616 617 static inline bool uprobe_is_active(struct uprobe *uprobe) 618 { 619 return !RB_EMPTY_NODE(&uprobe->rb_node); 620 } 621 622 static void uprobe_free_rcu(struct rcu_head *rcu) 623 { 624 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); 625 626 kfree(uprobe); 627 } 628 629 static void put_uprobe(struct uprobe *uprobe) 630 { 631 if (!refcount_dec_and_test(&uprobe->ref)) 632 return; 633 634 write_lock(&uprobes_treelock); 635 636 if (uprobe_is_active(uprobe)) { 637 write_seqcount_begin(&uprobes_seqcount); 638 rb_erase(&uprobe->rb_node, &uprobes_tree); 639 write_seqcount_end(&uprobes_seqcount); 640 } 641 642 write_unlock(&uprobes_treelock); 643 644 /* 645 * If application munmap(exec_vma) before uprobe_unregister() 646 * gets called, we don't get a chance to remove uprobe from 647 * delayed_uprobe_list from remove_breakpoint(). Do it here. 648 */ 649 mutex_lock(&delayed_uprobe_lock); 650 delayed_uprobe_remove(uprobe, NULL); 651 mutex_unlock(&delayed_uprobe_lock); 652 653 call_srcu(&uprobes_srcu, &uprobe->rcu, uprobe_free_rcu); 654 } 655 656 static __always_inline 657 int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, 658 const struct uprobe *r) 659 { 660 if (l_inode < r->inode) 661 return -1; 662 663 if (l_inode > r->inode) 664 return 1; 665 666 if (l_offset < r->offset) 667 return -1; 668 669 if (l_offset > r->offset) 670 return 1; 671 672 return 0; 673 } 674 675 #define __node_2_uprobe(node) \ 676 rb_entry((node), struct uprobe, rb_node) 677 678 struct __uprobe_key { 679 struct inode *inode; 680 loff_t offset; 681 }; 682 683 static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b) 684 { 685 const struct __uprobe_key *a = key; 686 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b)); 687 } 688 689 static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b) 690 { 691 struct uprobe *u = __node_2_uprobe(a); 692 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b)); 693 } 694 695 /* 696 * Assumes being inside RCU protected region. 697 * No refcount is taken on returned uprobe. 698 */ 699 static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset) 700 { 701 struct __uprobe_key key = { 702 .inode = inode, 703 .offset = offset, 704 }; 705 struct rb_node *node; 706 unsigned int seq; 707 708 lockdep_assert(srcu_read_lock_held(&uprobes_srcu)); 709 710 do { 711 seq = read_seqcount_begin(&uprobes_seqcount); 712 node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key); 713 /* 714 * Lockless RB-tree lookups can result only in false negatives. 715 * If the element is found, it is correct and can be returned 716 * under RCU protection. If we find nothing, we need to 717 * validate that seqcount didn't change. If it did, we have to 718 * try again as we might have missed the element (false 719 * negative). If seqcount is unchanged, search truly failed. 720 */ 721 if (node) 722 return __node_2_uprobe(node); 723 } while (read_seqcount_retry(&uprobes_seqcount, seq)); 724 725 return NULL; 726 } 727 728 /* 729 * Attempt to insert a new uprobe into uprobes_tree. 730 * 731 * If uprobe already exists (for given inode+offset), we just increment 732 * refcount of previously existing uprobe. 733 * 734 * If not, a provided new instance of uprobe is inserted into the tree (with 735 * assumed initial refcount == 1). 736 * 737 * In any case, we return a uprobe instance that ends up being in uprobes_tree. 738 * Caller has to clean up new uprobe instance, if it ended up not being 739 * inserted into the tree. 740 * 741 * We assume that uprobes_treelock is held for writing. 742 */ 743 static struct uprobe *__insert_uprobe(struct uprobe *uprobe) 744 { 745 struct rb_node *node; 746 again: 747 node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp); 748 if (node) { 749 struct uprobe *u = __node_2_uprobe(node); 750 751 if (!try_get_uprobe(u)) { 752 rb_erase(node, &uprobes_tree); 753 RB_CLEAR_NODE(&u->rb_node); 754 goto again; 755 } 756 757 return u; 758 } 759 760 return uprobe; 761 } 762 763 /* 764 * Acquire uprobes_treelock and insert uprobe into uprobes_tree 765 * (or reuse existing one, see __insert_uprobe() comments above). 766 */ 767 static struct uprobe *insert_uprobe(struct uprobe *uprobe) 768 { 769 struct uprobe *u; 770 771 write_lock(&uprobes_treelock); 772 write_seqcount_begin(&uprobes_seqcount); 773 u = __insert_uprobe(uprobe); 774 write_seqcount_end(&uprobes_seqcount); 775 write_unlock(&uprobes_treelock); 776 777 return u; 778 } 779 780 static void 781 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) 782 { 783 pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " 784 "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n", 785 uprobe->inode->i_ino, (unsigned long long) uprobe->offset, 786 (unsigned long long) cur_uprobe->ref_ctr_offset, 787 (unsigned long long) uprobe->ref_ctr_offset); 788 } 789 790 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, 791 loff_t ref_ctr_offset) 792 { 793 struct uprobe *uprobe, *cur_uprobe; 794 795 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); 796 if (!uprobe) 797 return ERR_PTR(-ENOMEM); 798 799 uprobe->inode = inode; 800 uprobe->offset = offset; 801 uprobe->ref_ctr_offset = ref_ctr_offset; 802 INIT_LIST_HEAD(&uprobe->consumers); 803 init_rwsem(&uprobe->register_rwsem); 804 init_rwsem(&uprobe->consumer_rwsem); 805 RB_CLEAR_NODE(&uprobe->rb_node); 806 refcount_set(&uprobe->ref, 1); 807 808 /* add to uprobes_tree, sorted on inode:offset */ 809 cur_uprobe = insert_uprobe(uprobe); 810 /* a uprobe exists for this inode:offset combination */ 811 if (cur_uprobe != uprobe) { 812 if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { 813 ref_ctr_mismatch_warn(cur_uprobe, uprobe); 814 put_uprobe(cur_uprobe); 815 kfree(uprobe); 816 return ERR_PTR(-EINVAL); 817 } 818 kfree(uprobe); 819 uprobe = cur_uprobe; 820 } 821 822 return uprobe; 823 } 824 825 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) 826 { 827 down_write(&uprobe->consumer_rwsem); 828 list_add_rcu(&uc->cons_node, &uprobe->consumers); 829 up_write(&uprobe->consumer_rwsem); 830 } 831 832 /* 833 * For uprobe @uprobe, delete the consumer @uc. 834 * Should never be called with consumer that's not part of @uprobe->consumers. 835 */ 836 static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) 837 { 838 down_write(&uprobe->consumer_rwsem); 839 list_del_rcu(&uc->cons_node); 840 up_write(&uprobe->consumer_rwsem); 841 } 842 843 static int __copy_insn(struct address_space *mapping, struct file *filp, 844 void *insn, int nbytes, loff_t offset) 845 { 846 struct page *page; 847 /* 848 * Ensure that the page that has the original instruction is populated 849 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(), 850 * see uprobe_register(). 851 */ 852 if (mapping->a_ops->read_folio) 853 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); 854 else 855 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 856 if (IS_ERR(page)) 857 return PTR_ERR(page); 858 859 copy_from_page(page, offset, insn, nbytes); 860 put_page(page); 861 862 return 0; 863 } 864 865 static int copy_insn(struct uprobe *uprobe, struct file *filp) 866 { 867 struct address_space *mapping = uprobe->inode->i_mapping; 868 loff_t offs = uprobe->offset; 869 void *insn = &uprobe->arch.insn; 870 int size = sizeof(uprobe->arch.insn); 871 int len, err = -EIO; 872 873 /* Copy only available bytes, -EIO if nothing was read */ 874 do { 875 if (offs >= i_size_read(uprobe->inode)) 876 break; 877 878 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); 879 err = __copy_insn(mapping, filp, insn, len, offs); 880 if (err) 881 break; 882 883 insn += len; 884 offs += len; 885 size -= len; 886 } while (size); 887 888 return err; 889 } 890 891 static int prepare_uprobe(struct uprobe *uprobe, struct file *file, 892 struct mm_struct *mm, unsigned long vaddr) 893 { 894 int ret = 0; 895 896 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 897 return ret; 898 899 /* TODO: move this into _register, until then we abuse this sem. */ 900 down_write(&uprobe->consumer_rwsem); 901 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 902 goto out; 903 904 ret = copy_insn(uprobe, file); 905 if (ret) 906 goto out; 907 908 ret = -ENOTSUPP; 909 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) 910 goto out; 911 912 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); 913 if (ret) 914 goto out; 915 916 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ 917 set_bit(UPROBE_COPY_INSN, &uprobe->flags); 918 919 out: 920 up_write(&uprobe->consumer_rwsem); 921 922 return ret; 923 } 924 925 static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm) 926 { 927 return !uc->filter || uc->filter(uc, mm); 928 } 929 930 static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm) 931 { 932 struct uprobe_consumer *uc; 933 bool ret = false; 934 935 down_read(&uprobe->consumer_rwsem); 936 list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, 937 srcu_read_lock_held(&uprobes_srcu)) { 938 ret = consumer_filter(uc, mm); 939 if (ret) 940 break; 941 } 942 up_read(&uprobe->consumer_rwsem); 943 944 return ret; 945 } 946 947 static int 948 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 949 struct vm_area_struct *vma, unsigned long vaddr) 950 { 951 bool first_uprobe; 952 int ret; 953 954 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); 955 if (ret) 956 return ret; 957 958 /* 959 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 960 * the task can hit this breakpoint right after __replace_page(). 961 */ 962 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); 963 if (first_uprobe) 964 set_bit(MMF_HAS_UPROBES, &mm->flags); 965 966 ret = set_swbp(&uprobe->arch, mm, vaddr); 967 if (!ret) 968 clear_bit(MMF_RECALC_UPROBES, &mm->flags); 969 else if (first_uprobe) 970 clear_bit(MMF_HAS_UPROBES, &mm->flags); 971 972 return ret; 973 } 974 975 static int 976 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 977 { 978 set_bit(MMF_RECALC_UPROBES, &mm->flags); 979 return set_orig_insn(&uprobe->arch, mm, vaddr); 980 } 981 982 struct map_info { 983 struct map_info *next; 984 struct mm_struct *mm; 985 unsigned long vaddr; 986 }; 987 988 static inline struct map_info *free_map_info(struct map_info *info) 989 { 990 struct map_info *next = info->next; 991 kfree(info); 992 return next; 993 } 994 995 static struct map_info * 996 build_map_info(struct address_space *mapping, loff_t offset, bool is_register) 997 { 998 unsigned long pgoff = offset >> PAGE_SHIFT; 999 struct vm_area_struct *vma; 1000 struct map_info *curr = NULL; 1001 struct map_info *prev = NULL; 1002 struct map_info *info; 1003 int more = 0; 1004 1005 again: 1006 i_mmap_lock_read(mapping); 1007 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1008 if (!valid_vma(vma, is_register)) 1009 continue; 1010 1011 if (!prev && !more) { 1012 /* 1013 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through 1014 * reclaim. This is optimistic, no harm done if it fails. 1015 */ 1016 prev = kmalloc(sizeof(struct map_info), 1017 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); 1018 if (prev) 1019 prev->next = NULL; 1020 } 1021 if (!prev) { 1022 more++; 1023 continue; 1024 } 1025 1026 if (!mmget_not_zero(vma->vm_mm)) 1027 continue; 1028 1029 info = prev; 1030 prev = prev->next; 1031 info->next = curr; 1032 curr = info; 1033 1034 info->mm = vma->vm_mm; 1035 info->vaddr = offset_to_vaddr(vma, offset); 1036 } 1037 i_mmap_unlock_read(mapping); 1038 1039 if (!more) 1040 goto out; 1041 1042 prev = curr; 1043 while (curr) { 1044 mmput(curr->mm); 1045 curr = curr->next; 1046 } 1047 1048 do { 1049 info = kmalloc(sizeof(struct map_info), GFP_KERNEL); 1050 if (!info) { 1051 curr = ERR_PTR(-ENOMEM); 1052 goto out; 1053 } 1054 info->next = prev; 1055 prev = info; 1056 } while (--more); 1057 1058 goto again; 1059 out: 1060 while (prev) 1061 prev = free_map_info(prev); 1062 return curr; 1063 } 1064 1065 static int 1066 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) 1067 { 1068 bool is_register = !!new; 1069 struct map_info *info; 1070 int err = 0; 1071 1072 percpu_down_write(&dup_mmap_sem); 1073 info = build_map_info(uprobe->inode->i_mapping, 1074 uprobe->offset, is_register); 1075 if (IS_ERR(info)) { 1076 err = PTR_ERR(info); 1077 goto out; 1078 } 1079 1080 while (info) { 1081 struct mm_struct *mm = info->mm; 1082 struct vm_area_struct *vma; 1083 1084 if (err && is_register) 1085 goto free; 1086 /* 1087 * We take mmap_lock for writing to avoid the race with 1088 * find_active_uprobe_rcu() which takes mmap_lock for reading. 1089 * Thus this install_breakpoint() can not make 1090 * is_trap_at_addr() true right after find_uprobe_rcu() 1091 * returns NULL in find_active_uprobe_rcu(). 1092 */ 1093 mmap_write_lock(mm); 1094 vma = find_vma(mm, info->vaddr); 1095 if (!vma || !valid_vma(vma, is_register) || 1096 file_inode(vma->vm_file) != uprobe->inode) 1097 goto unlock; 1098 1099 if (vma->vm_start > info->vaddr || 1100 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) 1101 goto unlock; 1102 1103 if (is_register) { 1104 /* consult only the "caller", new consumer. */ 1105 if (consumer_filter(new, mm)) 1106 err = install_breakpoint(uprobe, mm, vma, info->vaddr); 1107 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { 1108 if (!filter_chain(uprobe, mm)) 1109 err |= remove_breakpoint(uprobe, mm, info->vaddr); 1110 } 1111 1112 unlock: 1113 mmap_write_unlock(mm); 1114 free: 1115 mmput(mm); 1116 info = free_map_info(info); 1117 } 1118 out: 1119 percpu_up_write(&dup_mmap_sem); 1120 return err; 1121 } 1122 1123 /** 1124 * uprobe_unregister_nosync - unregister an already registered probe. 1125 * @uprobe: uprobe to remove 1126 * @uc: identify which probe if multiple probes are colocated. 1127 */ 1128 void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) 1129 { 1130 int err; 1131 1132 down_write(&uprobe->register_rwsem); 1133 consumer_del(uprobe, uc); 1134 err = register_for_each_vma(uprobe, NULL); 1135 up_write(&uprobe->register_rwsem); 1136 1137 /* TODO : cant unregister? schedule a worker thread */ 1138 if (unlikely(err)) { 1139 uprobe_warn(current, "unregister, leaking uprobe"); 1140 return; 1141 } 1142 1143 put_uprobe(uprobe); 1144 } 1145 EXPORT_SYMBOL_GPL(uprobe_unregister_nosync); 1146 1147 void uprobe_unregister_sync(void) 1148 { 1149 /* 1150 * Now that handler_chain() and handle_uretprobe_chain() iterate over 1151 * uprobe->consumers list under RCU protection without holding 1152 * uprobe->register_rwsem, we need to wait for RCU grace period to 1153 * make sure that we can't call into just unregistered 1154 * uprobe_consumer's callbacks anymore. If we don't do that, fast and 1155 * unlucky enough caller can free consumer's memory and cause 1156 * handler_chain() or handle_uretprobe_chain() to do an use-after-free. 1157 */ 1158 synchronize_srcu(&uprobes_srcu); 1159 } 1160 EXPORT_SYMBOL_GPL(uprobe_unregister_sync); 1161 1162 /** 1163 * uprobe_register - register a probe 1164 * @inode: the file in which the probe has to be placed. 1165 * @offset: offset from the start of the file. 1166 * @ref_ctr_offset: offset of SDT marker / reference counter 1167 * @uc: information on howto handle the probe.. 1168 * 1169 * Apart from the access refcount, uprobe_register() takes a creation 1170 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting 1171 * inserted into the rbtree (i.e first consumer for a @inode:@offset 1172 * tuple). Creation refcount stops uprobe_unregister from freeing the 1173 * @uprobe even before the register operation is complete. Creation 1174 * refcount is released when the last @uc for the @uprobe 1175 * unregisters. Caller of uprobe_register() is required to keep @inode 1176 * (and the containing mount) referenced. 1177 * 1178 * Return: pointer to the new uprobe on success or an ERR_PTR on failure. 1179 */ 1180 struct uprobe *uprobe_register(struct inode *inode, 1181 loff_t offset, loff_t ref_ctr_offset, 1182 struct uprobe_consumer *uc) 1183 { 1184 struct uprobe *uprobe; 1185 int ret; 1186 1187 /* Uprobe must have at least one set consumer */ 1188 if (!uc->handler && !uc->ret_handler) 1189 return ERR_PTR(-EINVAL); 1190 1191 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ 1192 if (!inode->i_mapping->a_ops->read_folio && 1193 !shmem_mapping(inode->i_mapping)) 1194 return ERR_PTR(-EIO); 1195 /* Racy, just to catch the obvious mistakes */ 1196 if (offset > i_size_read(inode)) 1197 return ERR_PTR(-EINVAL); 1198 1199 /* 1200 * This ensures that copy_from_page(), copy_to_page() and 1201 * __update_ref_ctr() can't cross page boundary. 1202 */ 1203 if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) 1204 return ERR_PTR(-EINVAL); 1205 if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) 1206 return ERR_PTR(-EINVAL); 1207 1208 uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); 1209 if (IS_ERR(uprobe)) 1210 return uprobe; 1211 1212 down_write(&uprobe->register_rwsem); 1213 consumer_add(uprobe, uc); 1214 ret = register_for_each_vma(uprobe, uc); 1215 up_write(&uprobe->register_rwsem); 1216 1217 if (ret) { 1218 uprobe_unregister_nosync(uprobe, uc); 1219 /* 1220 * Registration might have partially succeeded, so we can have 1221 * this consumer being called right at this time. We need to 1222 * sync here. It's ok, it's unlikely slow path. 1223 */ 1224 uprobe_unregister_sync(); 1225 return ERR_PTR(ret); 1226 } 1227 1228 return uprobe; 1229 } 1230 EXPORT_SYMBOL_GPL(uprobe_register); 1231 1232 /** 1233 * uprobe_apply - add or remove the breakpoints according to @uc->filter 1234 * @uprobe: uprobe which "owns" the breakpoint 1235 * @uc: consumer which wants to add more or remove some breakpoints 1236 * @add: add or remove the breakpoints 1237 * Return: 0 on success or negative error code. 1238 */ 1239 int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add) 1240 { 1241 struct uprobe_consumer *con; 1242 int ret = -ENOENT, srcu_idx; 1243 1244 down_write(&uprobe->register_rwsem); 1245 1246 srcu_idx = srcu_read_lock(&uprobes_srcu); 1247 list_for_each_entry_srcu(con, &uprobe->consumers, cons_node, 1248 srcu_read_lock_held(&uprobes_srcu)) { 1249 if (con == uc) { 1250 ret = register_for_each_vma(uprobe, add ? uc : NULL); 1251 break; 1252 } 1253 } 1254 srcu_read_unlock(&uprobes_srcu, srcu_idx); 1255 1256 up_write(&uprobe->register_rwsem); 1257 1258 return ret; 1259 } 1260 1261 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) 1262 { 1263 VMA_ITERATOR(vmi, mm, 0); 1264 struct vm_area_struct *vma; 1265 int err = 0; 1266 1267 mmap_read_lock(mm); 1268 for_each_vma(vmi, vma) { 1269 unsigned long vaddr; 1270 loff_t offset; 1271 1272 if (!valid_vma(vma, false) || 1273 file_inode(vma->vm_file) != uprobe->inode) 1274 continue; 1275 1276 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1277 if (uprobe->offset < offset || 1278 uprobe->offset >= offset + vma->vm_end - vma->vm_start) 1279 continue; 1280 1281 vaddr = offset_to_vaddr(vma, uprobe->offset); 1282 err |= remove_breakpoint(uprobe, mm, vaddr); 1283 } 1284 mmap_read_unlock(mm); 1285 1286 return err; 1287 } 1288 1289 static struct rb_node * 1290 find_node_in_range(struct inode *inode, loff_t min, loff_t max) 1291 { 1292 struct rb_node *n = uprobes_tree.rb_node; 1293 1294 while (n) { 1295 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); 1296 1297 if (inode < u->inode) { 1298 n = n->rb_left; 1299 } else if (inode > u->inode) { 1300 n = n->rb_right; 1301 } else { 1302 if (max < u->offset) 1303 n = n->rb_left; 1304 else if (min > u->offset) 1305 n = n->rb_right; 1306 else 1307 break; 1308 } 1309 } 1310 1311 return n; 1312 } 1313 1314 /* 1315 * For a given range in vma, build a list of probes that need to be inserted. 1316 */ 1317 static void build_probe_list(struct inode *inode, 1318 struct vm_area_struct *vma, 1319 unsigned long start, unsigned long end, 1320 struct list_head *head) 1321 { 1322 loff_t min, max; 1323 struct rb_node *n, *t; 1324 struct uprobe *u; 1325 1326 INIT_LIST_HEAD(head); 1327 min = vaddr_to_offset(vma, start); 1328 max = min + (end - start) - 1; 1329 1330 read_lock(&uprobes_treelock); 1331 n = find_node_in_range(inode, min, max); 1332 if (n) { 1333 for (t = n; t; t = rb_prev(t)) { 1334 u = rb_entry(t, struct uprobe, rb_node); 1335 if (u->inode != inode || u->offset < min) 1336 break; 1337 /* if uprobe went away, it's safe to ignore it */ 1338 if (try_get_uprobe(u)) 1339 list_add(&u->pending_list, head); 1340 } 1341 for (t = n; (t = rb_next(t)); ) { 1342 u = rb_entry(t, struct uprobe, rb_node); 1343 if (u->inode != inode || u->offset > max) 1344 break; 1345 /* if uprobe went away, it's safe to ignore it */ 1346 if (try_get_uprobe(u)) 1347 list_add(&u->pending_list, head); 1348 } 1349 } 1350 read_unlock(&uprobes_treelock); 1351 } 1352 1353 /* @vma contains reference counter, not the probed instruction. */ 1354 static int delayed_ref_ctr_inc(struct vm_area_struct *vma) 1355 { 1356 struct list_head *pos, *q; 1357 struct delayed_uprobe *du; 1358 unsigned long vaddr; 1359 int ret = 0, err = 0; 1360 1361 mutex_lock(&delayed_uprobe_lock); 1362 list_for_each_safe(pos, q, &delayed_uprobe_list) { 1363 du = list_entry(pos, struct delayed_uprobe, list); 1364 1365 if (du->mm != vma->vm_mm || 1366 !valid_ref_ctr_vma(du->uprobe, vma)) 1367 continue; 1368 1369 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); 1370 ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); 1371 if (ret) { 1372 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); 1373 if (!err) 1374 err = ret; 1375 } 1376 delayed_uprobe_delete(du); 1377 } 1378 mutex_unlock(&delayed_uprobe_lock); 1379 return err; 1380 } 1381 1382 /* 1383 * Called from mmap_region/vma_merge with mm->mmap_lock acquired. 1384 * 1385 * Currently we ignore all errors and always return 0, the callers 1386 * can't handle the failure anyway. 1387 */ 1388 int uprobe_mmap(struct vm_area_struct *vma) 1389 { 1390 struct list_head tmp_list; 1391 struct uprobe *uprobe, *u; 1392 struct inode *inode; 1393 1394 if (no_uprobe_events()) 1395 return 0; 1396 1397 if (vma->vm_file && 1398 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && 1399 test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) 1400 delayed_ref_ctr_inc(vma); 1401 1402 if (!valid_vma(vma, true)) 1403 return 0; 1404 1405 inode = file_inode(vma->vm_file); 1406 if (!inode) 1407 return 0; 1408 1409 mutex_lock(uprobes_mmap_hash(inode)); 1410 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); 1411 /* 1412 * We can race with uprobe_unregister(), this uprobe can be already 1413 * removed. But in this case filter_chain() must return false, all 1414 * consumers have gone away. 1415 */ 1416 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 1417 if (!fatal_signal_pending(current) && 1418 filter_chain(uprobe, vma->vm_mm)) { 1419 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); 1420 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); 1421 } 1422 put_uprobe(uprobe); 1423 } 1424 mutex_unlock(uprobes_mmap_hash(inode)); 1425 1426 return 0; 1427 } 1428 1429 static bool 1430 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1431 { 1432 loff_t min, max; 1433 struct inode *inode; 1434 struct rb_node *n; 1435 1436 inode = file_inode(vma->vm_file); 1437 1438 min = vaddr_to_offset(vma, start); 1439 max = min + (end - start) - 1; 1440 1441 read_lock(&uprobes_treelock); 1442 n = find_node_in_range(inode, min, max); 1443 read_unlock(&uprobes_treelock); 1444 1445 return !!n; 1446 } 1447 1448 /* 1449 * Called in context of a munmap of a vma. 1450 */ 1451 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1452 { 1453 if (no_uprobe_events() || !valid_vma(vma, false)) 1454 return; 1455 1456 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1457 return; 1458 1459 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || 1460 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) 1461 return; 1462 1463 if (vma_has_uprobes(vma, start, end)) 1464 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); 1465 } 1466 1467 static vm_fault_t xol_fault(const struct vm_special_mapping *sm, 1468 struct vm_area_struct *vma, struct vm_fault *vmf) 1469 { 1470 struct xol_area *area = vma->vm_mm->uprobes_state.xol_area; 1471 1472 vmf->page = area->page; 1473 get_page(vmf->page); 1474 return 0; 1475 } 1476 1477 static const struct vm_special_mapping xol_mapping = { 1478 .name = "[uprobes]", 1479 .fault = xol_fault, 1480 }; 1481 1482 /* Slot allocation for XOL */ 1483 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) 1484 { 1485 struct vm_area_struct *vma; 1486 int ret; 1487 1488 if (mmap_write_lock_killable(mm)) 1489 return -EINTR; 1490 1491 if (mm->uprobes_state.xol_area) { 1492 ret = -EALREADY; 1493 goto fail; 1494 } 1495 1496 if (!area->vaddr) { 1497 /* Try to map as high as possible, this is only a hint. */ 1498 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, 1499 PAGE_SIZE, 0, 0); 1500 if (IS_ERR_VALUE(area->vaddr)) { 1501 ret = area->vaddr; 1502 goto fail; 1503 } 1504 } 1505 1506 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, 1507 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, 1508 &xol_mapping); 1509 if (IS_ERR(vma)) { 1510 ret = PTR_ERR(vma); 1511 goto fail; 1512 } 1513 1514 ret = 0; 1515 /* pairs with get_xol_area() */ 1516 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ 1517 fail: 1518 mmap_write_unlock(mm); 1519 1520 return ret; 1521 } 1522 1523 void * __weak arch_uprobe_trampoline(unsigned long *psize) 1524 { 1525 static uprobe_opcode_t insn = UPROBE_SWBP_INSN; 1526 1527 *psize = UPROBE_SWBP_INSN_SIZE; 1528 return &insn; 1529 } 1530 1531 static struct xol_area *__create_xol_area(unsigned long vaddr) 1532 { 1533 struct mm_struct *mm = current->mm; 1534 unsigned long insns_size; 1535 struct xol_area *area; 1536 void *insns; 1537 1538 area = kzalloc(sizeof(*area), GFP_KERNEL); 1539 if (unlikely(!area)) 1540 goto out; 1541 1542 area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long), 1543 GFP_KERNEL); 1544 if (!area->bitmap) 1545 goto free_area; 1546 1547 area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); 1548 if (!area->page) 1549 goto free_bitmap; 1550 1551 area->vaddr = vaddr; 1552 init_waitqueue_head(&area->wq); 1553 /* Reserve the 1st slot for get_trampoline_vaddr() */ 1554 set_bit(0, area->bitmap); 1555 atomic_set(&area->slot_count, 1); 1556 insns = arch_uprobe_trampoline(&insns_size); 1557 arch_uprobe_copy_ixol(area->page, 0, insns, insns_size); 1558 1559 if (!xol_add_vma(mm, area)) 1560 return area; 1561 1562 __free_page(area->page); 1563 free_bitmap: 1564 kfree(area->bitmap); 1565 free_area: 1566 kfree(area); 1567 out: 1568 return NULL; 1569 } 1570 1571 /* 1572 * get_xol_area - Allocate process's xol_area if necessary. 1573 * This area will be used for storing instructions for execution out of line. 1574 * 1575 * Returns the allocated area or NULL. 1576 */ 1577 static struct xol_area *get_xol_area(void) 1578 { 1579 struct mm_struct *mm = current->mm; 1580 struct xol_area *area; 1581 1582 if (!mm->uprobes_state.xol_area) 1583 __create_xol_area(0); 1584 1585 /* Pairs with xol_add_vma() smp_store_release() */ 1586 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ 1587 return area; 1588 } 1589 1590 /* 1591 * uprobe_clear_state - Free the area allocated for slots. 1592 */ 1593 void uprobe_clear_state(struct mm_struct *mm) 1594 { 1595 struct xol_area *area = mm->uprobes_state.xol_area; 1596 1597 mutex_lock(&delayed_uprobe_lock); 1598 delayed_uprobe_remove(NULL, mm); 1599 mutex_unlock(&delayed_uprobe_lock); 1600 1601 if (!area) 1602 return; 1603 1604 put_page(area->page); 1605 kfree(area->bitmap); 1606 kfree(area); 1607 } 1608 1609 void uprobe_start_dup_mmap(void) 1610 { 1611 percpu_down_read(&dup_mmap_sem); 1612 } 1613 1614 void uprobe_end_dup_mmap(void) 1615 { 1616 percpu_up_read(&dup_mmap_sem); 1617 } 1618 1619 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) 1620 { 1621 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { 1622 set_bit(MMF_HAS_UPROBES, &newmm->flags); 1623 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ 1624 set_bit(MMF_RECALC_UPROBES, &newmm->flags); 1625 } 1626 } 1627 1628 /* 1629 * - search for a free slot. 1630 */ 1631 static unsigned long xol_take_insn_slot(struct xol_area *area) 1632 { 1633 unsigned long slot_addr; 1634 int slot_nr; 1635 1636 do { 1637 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); 1638 if (slot_nr < UINSNS_PER_PAGE) { 1639 if (!test_and_set_bit(slot_nr, area->bitmap)) 1640 break; 1641 1642 slot_nr = UINSNS_PER_PAGE; 1643 continue; 1644 } 1645 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); 1646 } while (slot_nr >= UINSNS_PER_PAGE); 1647 1648 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); 1649 atomic_inc(&area->slot_count); 1650 1651 return slot_addr; 1652 } 1653 1654 /* 1655 * xol_get_insn_slot - allocate a slot for xol. 1656 * Returns the allocated slot address or 0. 1657 */ 1658 static unsigned long xol_get_insn_slot(struct uprobe *uprobe) 1659 { 1660 struct xol_area *area; 1661 unsigned long xol_vaddr; 1662 1663 area = get_xol_area(); 1664 if (!area) 1665 return 0; 1666 1667 xol_vaddr = xol_take_insn_slot(area); 1668 if (unlikely(!xol_vaddr)) 1669 return 0; 1670 1671 arch_uprobe_copy_ixol(area->page, xol_vaddr, 1672 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); 1673 1674 return xol_vaddr; 1675 } 1676 1677 /* 1678 * xol_free_insn_slot - If slot was earlier allocated by 1679 * @xol_get_insn_slot(), make the slot available for 1680 * subsequent requests. 1681 */ 1682 static void xol_free_insn_slot(struct task_struct *tsk) 1683 { 1684 struct xol_area *area; 1685 unsigned long vma_end; 1686 unsigned long slot_addr; 1687 1688 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) 1689 return; 1690 1691 slot_addr = tsk->utask->xol_vaddr; 1692 if (unlikely(!slot_addr)) 1693 return; 1694 1695 area = tsk->mm->uprobes_state.xol_area; 1696 vma_end = area->vaddr + PAGE_SIZE; 1697 if (area->vaddr <= slot_addr && slot_addr < vma_end) { 1698 unsigned long offset; 1699 int slot_nr; 1700 1701 offset = slot_addr - area->vaddr; 1702 slot_nr = offset / UPROBE_XOL_SLOT_BYTES; 1703 if (slot_nr >= UINSNS_PER_PAGE) 1704 return; 1705 1706 clear_bit(slot_nr, area->bitmap); 1707 atomic_dec(&area->slot_count); 1708 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ 1709 if (waitqueue_active(&area->wq)) 1710 wake_up(&area->wq); 1711 1712 tsk->utask->xol_vaddr = 0; 1713 } 1714 } 1715 1716 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 1717 void *src, unsigned long len) 1718 { 1719 /* Initialize the slot */ 1720 copy_to_page(page, vaddr, src, len); 1721 1722 /* 1723 * We probably need flush_icache_user_page() but it needs vma. 1724 * This should work on most of architectures by default. If 1725 * architecture needs to do something different it can define 1726 * its own version of the function. 1727 */ 1728 flush_dcache_page(page); 1729 } 1730 1731 /** 1732 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 1733 * @regs: Reflects the saved state of the task after it has hit a breakpoint 1734 * instruction. 1735 * Return the address of the breakpoint instruction. 1736 */ 1737 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) 1738 { 1739 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; 1740 } 1741 1742 unsigned long uprobe_get_trap_addr(struct pt_regs *regs) 1743 { 1744 struct uprobe_task *utask = current->utask; 1745 1746 if (unlikely(utask && utask->active_uprobe)) 1747 return utask->vaddr; 1748 1749 return instruction_pointer(regs); 1750 } 1751 1752 static struct return_instance *free_ret_instance(struct return_instance *ri) 1753 { 1754 struct return_instance *next = ri->next; 1755 put_uprobe(ri->uprobe); 1756 kfree(ri); 1757 return next; 1758 } 1759 1760 /* 1761 * Called with no locks held. 1762 * Called in context of an exiting or an exec-ing thread. 1763 */ 1764 void uprobe_free_utask(struct task_struct *t) 1765 { 1766 struct uprobe_task *utask = t->utask; 1767 struct return_instance *ri; 1768 1769 if (!utask) 1770 return; 1771 1772 if (utask->active_uprobe) 1773 put_uprobe(utask->active_uprobe); 1774 1775 ri = utask->return_instances; 1776 while (ri) 1777 ri = free_ret_instance(ri); 1778 1779 xol_free_insn_slot(t); 1780 kfree(utask); 1781 t->utask = NULL; 1782 } 1783 1784 /* 1785 * Allocate a uprobe_task object for the task if necessary. 1786 * Called when the thread hits a breakpoint. 1787 * 1788 * Returns: 1789 * - pointer to new uprobe_task on success 1790 * - NULL otherwise 1791 */ 1792 static struct uprobe_task *get_utask(void) 1793 { 1794 if (!current->utask) 1795 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 1796 return current->utask; 1797 } 1798 1799 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) 1800 { 1801 struct uprobe_task *n_utask; 1802 struct return_instance **p, *o, *n; 1803 1804 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 1805 if (!n_utask) 1806 return -ENOMEM; 1807 t->utask = n_utask; 1808 1809 p = &n_utask->return_instances; 1810 for (o = o_utask->return_instances; o; o = o->next) { 1811 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 1812 if (!n) 1813 return -ENOMEM; 1814 1815 *n = *o; 1816 /* 1817 * uprobe's refcnt has to be positive at this point, kept by 1818 * utask->return_instances items; return_instances can't be 1819 * removed right now, as task is blocked due to duping; so 1820 * get_uprobe() is safe to use here. 1821 */ 1822 get_uprobe(n->uprobe); 1823 n->next = NULL; 1824 1825 *p = n; 1826 p = &n->next; 1827 n_utask->depth++; 1828 } 1829 1830 return 0; 1831 } 1832 1833 static void dup_xol_work(struct callback_head *work) 1834 { 1835 if (current->flags & PF_EXITING) 1836 return; 1837 1838 if (!__create_xol_area(current->utask->dup_xol_addr) && 1839 !fatal_signal_pending(current)) 1840 uprobe_warn(current, "dup xol area"); 1841 } 1842 1843 /* 1844 * Called in context of a new clone/fork from copy_process. 1845 */ 1846 void uprobe_copy_process(struct task_struct *t, unsigned long flags) 1847 { 1848 struct uprobe_task *utask = current->utask; 1849 struct mm_struct *mm = current->mm; 1850 struct xol_area *area; 1851 1852 t->utask = NULL; 1853 1854 if (!utask || !utask->return_instances) 1855 return; 1856 1857 if (mm == t->mm && !(flags & CLONE_VFORK)) 1858 return; 1859 1860 if (dup_utask(t, utask)) 1861 return uprobe_warn(t, "dup ret instances"); 1862 1863 /* The task can fork() after dup_xol_work() fails */ 1864 area = mm->uprobes_state.xol_area; 1865 if (!area) 1866 return uprobe_warn(t, "dup xol area"); 1867 1868 if (mm == t->mm) 1869 return; 1870 1871 t->utask->dup_xol_addr = area->vaddr; 1872 init_task_work(&t->utask->dup_xol_work, dup_xol_work); 1873 task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME); 1874 } 1875 1876 /* 1877 * Current area->vaddr notion assume the trampoline address is always 1878 * equal area->vaddr. 1879 * 1880 * Returns -1 in case the xol_area is not allocated. 1881 */ 1882 unsigned long uprobe_get_trampoline_vaddr(void) 1883 { 1884 struct xol_area *area; 1885 unsigned long trampoline_vaddr = -1; 1886 1887 /* Pairs with xol_add_vma() smp_store_release() */ 1888 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ 1889 if (area) 1890 trampoline_vaddr = area->vaddr; 1891 1892 return trampoline_vaddr; 1893 } 1894 1895 static void cleanup_return_instances(struct uprobe_task *utask, bool chained, 1896 struct pt_regs *regs) 1897 { 1898 struct return_instance *ri = utask->return_instances; 1899 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; 1900 1901 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { 1902 ri = free_ret_instance(ri); 1903 utask->depth--; 1904 } 1905 utask->return_instances = ri; 1906 } 1907 1908 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) 1909 { 1910 struct return_instance *ri; 1911 struct uprobe_task *utask; 1912 unsigned long orig_ret_vaddr, trampoline_vaddr; 1913 bool chained; 1914 1915 if (!get_xol_area()) 1916 return; 1917 1918 utask = get_utask(); 1919 if (!utask) 1920 return; 1921 1922 if (utask->depth >= MAX_URETPROBE_DEPTH) { 1923 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" 1924 " nestedness limit pid/tgid=%d/%d\n", 1925 current->pid, current->tgid); 1926 return; 1927 } 1928 1929 /* we need to bump refcount to store uprobe in utask */ 1930 if (!try_get_uprobe(uprobe)) 1931 return; 1932 1933 ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 1934 if (!ri) 1935 goto fail; 1936 1937 trampoline_vaddr = uprobe_get_trampoline_vaddr(); 1938 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); 1939 if (orig_ret_vaddr == -1) 1940 goto fail; 1941 1942 /* drop the entries invalidated by longjmp() */ 1943 chained = (orig_ret_vaddr == trampoline_vaddr); 1944 cleanup_return_instances(utask, chained, regs); 1945 1946 /* 1947 * We don't want to keep trampoline address in stack, rather keep the 1948 * original return address of first caller thru all the consequent 1949 * instances. This also makes breakpoint unwrapping easier. 1950 */ 1951 if (chained) { 1952 if (!utask->return_instances) { 1953 /* 1954 * This situation is not possible. Likely we have an 1955 * attack from user-space. 1956 */ 1957 uprobe_warn(current, "handle tail call"); 1958 goto fail; 1959 } 1960 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; 1961 } 1962 ri->uprobe = uprobe; 1963 ri->func = instruction_pointer(regs); 1964 ri->stack = user_stack_pointer(regs); 1965 ri->orig_ret_vaddr = orig_ret_vaddr; 1966 ri->chained = chained; 1967 1968 utask->depth++; 1969 ri->next = utask->return_instances; 1970 utask->return_instances = ri; 1971 1972 return; 1973 fail: 1974 kfree(ri); 1975 put_uprobe(uprobe); 1976 } 1977 1978 /* Prepare to single-step probed instruction out of line. */ 1979 static int 1980 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) 1981 { 1982 struct uprobe_task *utask; 1983 unsigned long xol_vaddr; 1984 int err; 1985 1986 utask = get_utask(); 1987 if (!utask) 1988 return -ENOMEM; 1989 1990 if (!try_get_uprobe(uprobe)) 1991 return -EINVAL; 1992 1993 xol_vaddr = xol_get_insn_slot(uprobe); 1994 if (!xol_vaddr) { 1995 err = -ENOMEM; 1996 goto err_out; 1997 } 1998 1999 utask->xol_vaddr = xol_vaddr; 2000 utask->vaddr = bp_vaddr; 2001 2002 err = arch_uprobe_pre_xol(&uprobe->arch, regs); 2003 if (unlikely(err)) { 2004 xol_free_insn_slot(current); 2005 goto err_out; 2006 } 2007 2008 utask->active_uprobe = uprobe; 2009 utask->state = UTASK_SSTEP; 2010 return 0; 2011 err_out: 2012 put_uprobe(uprobe); 2013 return err; 2014 } 2015 2016 /* 2017 * If we are singlestepping, then ensure this thread is not connected to 2018 * non-fatal signals until completion of singlestep. When xol insn itself 2019 * triggers the signal, restart the original insn even if the task is 2020 * already SIGKILL'ed (since coredump should report the correct ip). This 2021 * is even more important if the task has a handler for SIGSEGV/etc, The 2022 * _same_ instruction should be repeated again after return from the signal 2023 * handler, and SSTEP can never finish in this case. 2024 */ 2025 bool uprobe_deny_signal(void) 2026 { 2027 struct task_struct *t = current; 2028 struct uprobe_task *utask = t->utask; 2029 2030 if (likely(!utask || !utask->active_uprobe)) 2031 return false; 2032 2033 WARN_ON_ONCE(utask->state != UTASK_SSTEP); 2034 2035 if (task_sigpending(t)) { 2036 spin_lock_irq(&t->sighand->siglock); 2037 clear_tsk_thread_flag(t, TIF_SIGPENDING); 2038 spin_unlock_irq(&t->sighand->siglock); 2039 2040 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 2041 utask->state = UTASK_SSTEP_TRAPPED; 2042 set_tsk_thread_flag(t, TIF_UPROBE); 2043 } 2044 } 2045 2046 return true; 2047 } 2048 2049 static void mmf_recalc_uprobes(struct mm_struct *mm) 2050 { 2051 VMA_ITERATOR(vmi, mm, 0); 2052 struct vm_area_struct *vma; 2053 2054 for_each_vma(vmi, vma) { 2055 if (!valid_vma(vma, false)) 2056 continue; 2057 /* 2058 * This is not strictly accurate, we can race with 2059 * uprobe_unregister() and see the already removed 2060 * uprobe if delete_uprobe() was not yet called. 2061 * Or this uprobe can be filtered out. 2062 */ 2063 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) 2064 return; 2065 } 2066 2067 clear_bit(MMF_HAS_UPROBES, &mm->flags); 2068 } 2069 2070 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) 2071 { 2072 struct page *page; 2073 uprobe_opcode_t opcode; 2074 int result; 2075 2076 if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) 2077 return -EINVAL; 2078 2079 pagefault_disable(); 2080 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); 2081 pagefault_enable(); 2082 2083 if (likely(result == 0)) 2084 goto out; 2085 2086 result = get_user_pages(vaddr, 1, FOLL_FORCE, &page); 2087 if (result < 0) 2088 return result; 2089 2090 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 2091 put_page(page); 2092 out: 2093 /* This needs to return true for any variant of the trap insn */ 2094 return is_trap_insn(&opcode); 2095 } 2096 2097 /* assumes being inside RCU protected region */ 2098 static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp) 2099 { 2100 struct mm_struct *mm = current->mm; 2101 struct uprobe *uprobe = NULL; 2102 struct vm_area_struct *vma; 2103 2104 mmap_read_lock(mm); 2105 vma = vma_lookup(mm, bp_vaddr); 2106 if (vma) { 2107 if (valid_vma(vma, false)) { 2108 struct inode *inode = file_inode(vma->vm_file); 2109 loff_t offset = vaddr_to_offset(vma, bp_vaddr); 2110 2111 uprobe = find_uprobe_rcu(inode, offset); 2112 } 2113 2114 if (!uprobe) 2115 *is_swbp = is_trap_at_addr(mm, bp_vaddr); 2116 } else { 2117 *is_swbp = -EFAULT; 2118 } 2119 2120 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) 2121 mmf_recalc_uprobes(mm); 2122 mmap_read_unlock(mm); 2123 2124 return uprobe; 2125 } 2126 2127 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) 2128 { 2129 struct uprobe_consumer *uc; 2130 int remove = UPROBE_HANDLER_REMOVE; 2131 bool need_prep = false; /* prepare return uprobe, when needed */ 2132 bool has_consumers = false; 2133 2134 current->utask->auprobe = &uprobe->arch; 2135 2136 list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, 2137 srcu_read_lock_held(&uprobes_srcu)) { 2138 int rc = 0; 2139 2140 if (uc->handler) { 2141 rc = uc->handler(uc, regs); 2142 WARN(rc & ~UPROBE_HANDLER_MASK, 2143 "bad rc=0x%x from %ps()\n", rc, uc->handler); 2144 } 2145 2146 if (uc->ret_handler) 2147 need_prep = true; 2148 2149 remove &= rc; 2150 has_consumers = true; 2151 } 2152 current->utask->auprobe = NULL; 2153 2154 if (need_prep && !remove) 2155 prepare_uretprobe(uprobe, regs); /* put bp at return */ 2156 2157 if (remove && has_consumers) { 2158 down_read(&uprobe->register_rwsem); 2159 2160 /* re-check that removal is still required, this time under lock */ 2161 if (!filter_chain(uprobe, current->mm)) { 2162 WARN_ON(!uprobe_is_active(uprobe)); 2163 unapply_uprobe(uprobe, current->mm); 2164 } 2165 2166 up_read(&uprobe->register_rwsem); 2167 } 2168 } 2169 2170 static void 2171 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) 2172 { 2173 struct uprobe *uprobe = ri->uprobe; 2174 struct uprobe_consumer *uc; 2175 int srcu_idx; 2176 2177 srcu_idx = srcu_read_lock(&uprobes_srcu); 2178 list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, 2179 srcu_read_lock_held(&uprobes_srcu)) { 2180 if (uc->ret_handler) 2181 uc->ret_handler(uc, ri->func, regs); 2182 } 2183 srcu_read_unlock(&uprobes_srcu, srcu_idx); 2184 } 2185 2186 static struct return_instance *find_next_ret_chain(struct return_instance *ri) 2187 { 2188 bool chained; 2189 2190 do { 2191 chained = ri->chained; 2192 ri = ri->next; /* can't be NULL if chained */ 2193 } while (chained); 2194 2195 return ri; 2196 } 2197 2198 void uprobe_handle_trampoline(struct pt_regs *regs) 2199 { 2200 struct uprobe_task *utask; 2201 struct return_instance *ri, *next; 2202 bool valid; 2203 2204 utask = current->utask; 2205 if (!utask) 2206 goto sigill; 2207 2208 ri = utask->return_instances; 2209 if (!ri) 2210 goto sigill; 2211 2212 do { 2213 /* 2214 * We should throw out the frames invalidated by longjmp(). 2215 * If this chain is valid, then the next one should be alive 2216 * or NULL; the latter case means that nobody but ri->func 2217 * could hit this trampoline on return. TODO: sigaltstack(). 2218 */ 2219 next = find_next_ret_chain(ri); 2220 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs); 2221 2222 instruction_pointer_set(regs, ri->orig_ret_vaddr); 2223 do { 2224 /* pop current instance from the stack of pending return instances, 2225 * as it's not pending anymore: we just fixed up original 2226 * instruction pointer in regs and are about to call handlers; 2227 * this allows fixup_uretprobe_trampoline_entries() to properly fix up 2228 * captured stack traces from uretprobe handlers, in which pending 2229 * trampoline addresses on the stack are replaced with correct 2230 * original return addresses 2231 */ 2232 utask->return_instances = ri->next; 2233 if (valid) 2234 handle_uretprobe_chain(ri, regs); 2235 ri = free_ret_instance(ri); 2236 utask->depth--; 2237 } while (ri != next); 2238 } while (!valid); 2239 2240 utask->return_instances = ri; 2241 return; 2242 2243 sigill: 2244 uprobe_warn(current, "handle uretprobe, sending SIGILL."); 2245 force_sig(SIGILL); 2246 2247 } 2248 2249 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) 2250 { 2251 return false; 2252 } 2253 2254 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, 2255 struct pt_regs *regs) 2256 { 2257 return true; 2258 } 2259 2260 /* 2261 * Run handler and ask thread to singlestep. 2262 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 2263 */ 2264 static void handle_swbp(struct pt_regs *regs) 2265 { 2266 struct uprobe *uprobe; 2267 unsigned long bp_vaddr; 2268 int is_swbp, srcu_idx; 2269 2270 bp_vaddr = uprobe_get_swbp_addr(regs); 2271 if (bp_vaddr == uprobe_get_trampoline_vaddr()) 2272 return uprobe_handle_trampoline(regs); 2273 2274 srcu_idx = srcu_read_lock(&uprobes_srcu); 2275 2276 uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp); 2277 if (!uprobe) { 2278 if (is_swbp > 0) { 2279 /* No matching uprobe; signal SIGTRAP. */ 2280 force_sig(SIGTRAP); 2281 } else { 2282 /* 2283 * Either we raced with uprobe_unregister() or we can't 2284 * access this memory. The latter is only possible if 2285 * another thread plays with our ->mm. In both cases 2286 * we can simply restart. If this vma was unmapped we 2287 * can pretend this insn was not executed yet and get 2288 * the (correct) SIGSEGV after restart. 2289 */ 2290 instruction_pointer_set(regs, bp_vaddr); 2291 } 2292 goto out; 2293 } 2294 2295 /* change it in advance for ->handler() and restart */ 2296 instruction_pointer_set(regs, bp_vaddr); 2297 2298 /* 2299 * TODO: move copy_insn/etc into _register and remove this hack. 2300 * After we hit the bp, _unregister + _register can install the 2301 * new and not-yet-analyzed uprobe at the same address, restart. 2302 */ 2303 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) 2304 goto out; 2305 2306 /* 2307 * Pairs with the smp_wmb() in prepare_uprobe(). 2308 * 2309 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then 2310 * we must also see the stores to &uprobe->arch performed by the 2311 * prepare_uprobe() call. 2312 */ 2313 smp_rmb(); 2314 2315 /* Tracing handlers use ->utask to communicate with fetch methods */ 2316 if (!get_utask()) 2317 goto out; 2318 2319 if (arch_uprobe_ignore(&uprobe->arch, regs)) 2320 goto out; 2321 2322 handler_chain(uprobe, regs); 2323 2324 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 2325 goto out; 2326 2327 if (pre_ssout(uprobe, regs, bp_vaddr)) 2328 goto out; 2329 2330 out: 2331 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ 2332 srcu_read_unlock(&uprobes_srcu, srcu_idx); 2333 } 2334 2335 /* 2336 * Perform required fix-ups and disable singlestep. 2337 * Allow pending signals to take effect. 2338 */ 2339 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) 2340 { 2341 struct uprobe *uprobe; 2342 int err = 0; 2343 2344 uprobe = utask->active_uprobe; 2345 if (utask->state == UTASK_SSTEP_ACK) 2346 err = arch_uprobe_post_xol(&uprobe->arch, regs); 2347 else if (utask->state == UTASK_SSTEP_TRAPPED) 2348 arch_uprobe_abort_xol(&uprobe->arch, regs); 2349 else 2350 WARN_ON_ONCE(1); 2351 2352 put_uprobe(uprobe); 2353 utask->active_uprobe = NULL; 2354 utask->state = UTASK_RUNNING; 2355 xol_free_insn_slot(current); 2356 2357 spin_lock_irq(¤t->sighand->siglock); 2358 recalc_sigpending(); /* see uprobe_deny_signal() */ 2359 spin_unlock_irq(¤t->sighand->siglock); 2360 2361 if (unlikely(err)) { 2362 uprobe_warn(current, "execute the probed insn, sending SIGILL."); 2363 force_sig(SIGILL); 2364 } 2365 } 2366 2367 /* 2368 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and 2369 * allows the thread to return from interrupt. After that handle_swbp() 2370 * sets utask->active_uprobe. 2371 * 2372 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag 2373 * and allows the thread to return from interrupt. 2374 * 2375 * While returning to userspace, thread notices the TIF_UPROBE flag and calls 2376 * uprobe_notify_resume(). 2377 */ 2378 void uprobe_notify_resume(struct pt_regs *regs) 2379 { 2380 struct uprobe_task *utask; 2381 2382 clear_thread_flag(TIF_UPROBE); 2383 2384 utask = current->utask; 2385 if (utask && utask->active_uprobe) 2386 handle_singlestep(utask, regs); 2387 else 2388 handle_swbp(regs); 2389 } 2390 2391 /* 2392 * uprobe_pre_sstep_notifier gets called from interrupt context as part of 2393 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. 2394 */ 2395 int uprobe_pre_sstep_notifier(struct pt_regs *regs) 2396 { 2397 if (!current->mm) 2398 return 0; 2399 2400 if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && 2401 (!current->utask || !current->utask->return_instances)) 2402 return 0; 2403 2404 set_thread_flag(TIF_UPROBE); 2405 return 1; 2406 } 2407 2408 /* 2409 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier 2410 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. 2411 */ 2412 int uprobe_post_sstep_notifier(struct pt_regs *regs) 2413 { 2414 struct uprobe_task *utask = current->utask; 2415 2416 if (!current->mm || !utask || !utask->active_uprobe) 2417 /* task is currently not uprobed */ 2418 return 0; 2419 2420 utask->state = UTASK_SSTEP_ACK; 2421 set_thread_flag(TIF_UPROBE); 2422 return 1; 2423 } 2424 2425 static struct notifier_block uprobe_exception_nb = { 2426 .notifier_call = arch_uprobe_exception_notify, 2427 .priority = INT_MAX-1, /* notified after kprobes, kgdb */ 2428 }; 2429 2430 void __init uprobes_init(void) 2431 { 2432 int i; 2433 2434 for (i = 0; i < UPROBES_HASH_SZ; i++) 2435 mutex_init(&uprobes_mmap_mutex[i]); 2436 2437 BUG_ON(register_die_notifier(&uprobe_exception_nb)); 2438 } 2439