1 /* 2 * User-space Probes (UProbes) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) IBM Corporation, 2008-2012 19 * Authors: 20 * Srikar Dronamraju 21 * Jim Keniston 22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 23 */ 24 25 #include <linux/kernel.h> 26 #include <linux/highmem.h> 27 #include <linux/pagemap.h> /* read_mapping_page */ 28 #include <linux/slab.h> 29 #include <linux/sched.h> 30 #include <linux/rmap.h> /* anon_vma_prepare */ 31 #include <linux/mmu_notifier.h> /* set_pte_at_notify */ 32 #include <linux/swap.h> /* try_to_free_swap */ 33 #include <linux/ptrace.h> /* user_enable_single_step */ 34 #include <linux/kdebug.h> /* notifier mechanism */ 35 #include "../../mm/internal.h" /* munlock_vma_page */ 36 37 #include <linux/uprobes.h> 38 39 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) 40 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 41 42 static struct rb_root uprobes_tree = RB_ROOT; 43 44 static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ 45 46 #define UPROBES_HASH_SZ 13 47 48 /* 49 * We need separate register/unregister and mmap/munmap lock hashes because 50 * of mmap_sem nesting. 51 * 52 * uprobe_register() needs to install probes on (potentially) all processes 53 * and thus needs to acquire multiple mmap_sems (consequtively, not 54 * concurrently), whereas uprobe_mmap() is called while holding mmap_sem 55 * for the particular process doing the mmap. 56 * 57 * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem 58 * because of lock order against i_mmap_mutex. This means there's a hole in 59 * the register vma iteration where a mmap() can happen. 60 * 61 * Thus uprobe_register() can race with uprobe_mmap() and we can try and 62 * install a probe where one is already installed. 63 */ 64 65 /* serialize (un)register */ 66 static struct mutex uprobes_mutex[UPROBES_HASH_SZ]; 67 68 #define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 69 70 /* serialize uprobe->pending_list */ 71 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 72 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 73 74 /* 75 * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe 76 * events active at this time. Probably a fine grained per inode count is 77 * better? 78 */ 79 static atomic_t uprobe_events = ATOMIC_INIT(0); 80 81 struct uprobe { 82 struct rb_node rb_node; /* node in the rb tree */ 83 atomic_t ref; 84 struct rw_semaphore consumer_rwsem; 85 struct list_head pending_list; 86 struct uprobe_consumer *consumers; 87 struct inode *inode; /* Also hold a ref to inode */ 88 loff_t offset; 89 int flags; 90 struct arch_uprobe arch; 91 }; 92 93 /* 94 * valid_vma: Verify if the specified vma is an executable vma 95 * Relax restrictions while unregistering: vm_flags might have 96 * changed after breakpoint was inserted. 97 * - is_register: indicates if we are in register context. 98 * - Return 1 if the specified virtual address is in an 99 * executable vma. 100 */ 101 static bool valid_vma(struct vm_area_struct *vma, bool is_register) 102 { 103 if (!vma->vm_file) 104 return false; 105 106 if (!is_register) 107 return true; 108 109 if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) 110 == (VM_READ|VM_EXEC)) 111 return true; 112 113 return false; 114 } 115 116 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 117 { 118 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 119 } 120 121 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) 122 { 123 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); 124 } 125 126 /** 127 * __replace_page - replace page in vma by new page. 128 * based on replace_page in mm/ksm.c 129 * 130 * @vma: vma that holds the pte pointing to page 131 * @addr: address the old @page is mapped at 132 * @page: the cowed page we are replacing by kpage 133 * @kpage: the modified page we replace page by 134 * 135 * Returns 0 on success, -EFAULT on failure. 136 */ 137 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 138 struct page *page, struct page *kpage) 139 { 140 struct mm_struct *mm = vma->vm_mm; 141 spinlock_t *ptl; 142 pte_t *ptep; 143 int err; 144 145 /* For try_to_free_swap() and munlock_vma_page() below */ 146 lock_page(page); 147 148 err = -EAGAIN; 149 ptep = page_check_address(page, mm, addr, &ptl, 0); 150 if (!ptep) 151 goto unlock; 152 153 get_page(kpage); 154 page_add_new_anon_rmap(kpage, vma, addr); 155 156 if (!PageAnon(page)) { 157 dec_mm_counter(mm, MM_FILEPAGES); 158 inc_mm_counter(mm, MM_ANONPAGES); 159 } 160 161 flush_cache_page(vma, addr, pte_pfn(*ptep)); 162 ptep_clear_flush(vma, addr, ptep); 163 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 164 165 page_remove_rmap(page); 166 if (!page_mapped(page)) 167 try_to_free_swap(page); 168 pte_unmap_unlock(ptep, ptl); 169 170 if (vma->vm_flags & VM_LOCKED) 171 munlock_vma_page(page); 172 put_page(page); 173 174 err = 0; 175 unlock: 176 unlock_page(page); 177 return err; 178 } 179 180 /** 181 * is_swbp_insn - check if instruction is breakpoint instruction. 182 * @insn: instruction to be checked. 183 * Default implementation of is_swbp_insn 184 * Returns true if @insn is a breakpoint instruction. 185 */ 186 bool __weak is_swbp_insn(uprobe_opcode_t *insn) 187 { 188 return *insn == UPROBE_SWBP_INSN; 189 } 190 191 /* 192 * NOTE: 193 * Expect the breakpoint instruction to be the smallest size instruction for 194 * the architecture. If an arch has variable length instruction and the 195 * breakpoint instruction is not of the smallest length instruction 196 * supported by that architecture then we need to modify read_opcode / 197 * write_opcode accordingly. This would never be a problem for archs that 198 * have fixed length instructions. 199 */ 200 201 /* 202 * write_opcode - write the opcode at a given virtual address. 203 * @auprobe: arch breakpointing information. 204 * @mm: the probed process address space. 205 * @vaddr: the virtual address to store the opcode. 206 * @opcode: opcode to be written at @vaddr. 207 * 208 * Called with mm->mmap_sem held (for read and with a reference to 209 * mm). 210 * 211 * For mm @mm, write the opcode at @vaddr. 212 * Return 0 (success) or a negative errno. 213 */ 214 static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, 215 unsigned long vaddr, uprobe_opcode_t opcode) 216 { 217 struct page *old_page, *new_page; 218 void *vaddr_old, *vaddr_new; 219 struct vm_area_struct *vma; 220 int ret; 221 222 retry: 223 /* Read the page with vaddr into memory */ 224 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma); 225 if (ret <= 0) 226 return ret; 227 228 ret = -ENOMEM; 229 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 230 if (!new_page) 231 goto put_old; 232 233 __SetPageUptodate(new_page); 234 235 /* copy the page now that we've got it stable */ 236 vaddr_old = kmap_atomic(old_page); 237 vaddr_new = kmap_atomic(new_page); 238 239 memcpy(vaddr_new, vaddr_old, PAGE_SIZE); 240 memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE); 241 242 kunmap_atomic(vaddr_new); 243 kunmap_atomic(vaddr_old); 244 245 ret = anon_vma_prepare(vma); 246 if (ret) 247 goto put_new; 248 249 ret = __replace_page(vma, vaddr, old_page, new_page); 250 251 put_new: 252 page_cache_release(new_page); 253 put_old: 254 put_page(old_page); 255 256 if (unlikely(ret == -EAGAIN)) 257 goto retry; 258 return ret; 259 } 260 261 /** 262 * read_opcode - read the opcode at a given virtual address. 263 * @mm: the probed process address space. 264 * @vaddr: the virtual address to read the opcode. 265 * @opcode: location to store the read opcode. 266 * 267 * Called with mm->mmap_sem held (for read and with a reference to 268 * mm. 269 * 270 * For mm @mm, read the opcode at @vaddr and store it in @opcode. 271 * Return 0 (success) or a negative errno. 272 */ 273 static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode) 274 { 275 struct page *page; 276 void *vaddr_new; 277 int ret; 278 279 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL); 280 if (ret <= 0) 281 return ret; 282 283 vaddr_new = kmap_atomic(page); 284 vaddr &= ~PAGE_MASK; 285 memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE); 286 kunmap_atomic(vaddr_new); 287 288 put_page(page); 289 290 return 0; 291 } 292 293 static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr) 294 { 295 uprobe_opcode_t opcode; 296 int result; 297 298 if (current->mm == mm) { 299 pagefault_disable(); 300 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr, 301 sizeof(opcode)); 302 pagefault_enable(); 303 304 if (likely(result == 0)) 305 goto out; 306 } 307 308 result = read_opcode(mm, vaddr, &opcode); 309 if (result) 310 return result; 311 out: 312 if (is_swbp_insn(&opcode)) 313 return 1; 314 315 return 0; 316 } 317 318 /** 319 * set_swbp - store breakpoint at a given address. 320 * @auprobe: arch specific probepoint information. 321 * @mm: the probed process address space. 322 * @vaddr: the virtual address to insert the opcode. 323 * 324 * For mm @mm, store the breakpoint instruction at @vaddr. 325 * Return 0 (success) or a negative errno. 326 */ 327 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 328 { 329 int result; 330 /* 331 * See the comment near uprobes_hash(). 332 */ 333 result = is_swbp_at_addr(mm, vaddr); 334 if (result == 1) 335 return 0; 336 337 if (result) 338 return result; 339 340 return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); 341 } 342 343 /** 344 * set_orig_insn - Restore the original instruction. 345 * @mm: the probed process address space. 346 * @auprobe: arch specific probepoint information. 347 * @vaddr: the virtual address to insert the opcode. 348 * 349 * For mm @mm, restore the original opcode (opcode) at @vaddr. 350 * Return 0 (success) or a negative errno. 351 */ 352 int __weak 353 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 354 { 355 int result; 356 357 result = is_swbp_at_addr(mm, vaddr); 358 if (!result) 359 return -EINVAL; 360 361 if (result != 1) 362 return result; 363 364 return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); 365 } 366 367 static int match_uprobe(struct uprobe *l, struct uprobe *r) 368 { 369 if (l->inode < r->inode) 370 return -1; 371 372 if (l->inode > r->inode) 373 return 1; 374 375 if (l->offset < r->offset) 376 return -1; 377 378 if (l->offset > r->offset) 379 return 1; 380 381 return 0; 382 } 383 384 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) 385 { 386 struct uprobe u = { .inode = inode, .offset = offset }; 387 struct rb_node *n = uprobes_tree.rb_node; 388 struct uprobe *uprobe; 389 int match; 390 391 while (n) { 392 uprobe = rb_entry(n, struct uprobe, rb_node); 393 match = match_uprobe(&u, uprobe); 394 if (!match) { 395 atomic_inc(&uprobe->ref); 396 return uprobe; 397 } 398 399 if (match < 0) 400 n = n->rb_left; 401 else 402 n = n->rb_right; 403 } 404 return NULL; 405 } 406 407 /* 408 * Find a uprobe corresponding to a given inode:offset 409 * Acquires uprobes_treelock 410 */ 411 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) 412 { 413 struct uprobe *uprobe; 414 415 spin_lock(&uprobes_treelock); 416 uprobe = __find_uprobe(inode, offset); 417 spin_unlock(&uprobes_treelock); 418 419 return uprobe; 420 } 421 422 static struct uprobe *__insert_uprobe(struct uprobe *uprobe) 423 { 424 struct rb_node **p = &uprobes_tree.rb_node; 425 struct rb_node *parent = NULL; 426 struct uprobe *u; 427 int match; 428 429 while (*p) { 430 parent = *p; 431 u = rb_entry(parent, struct uprobe, rb_node); 432 match = match_uprobe(uprobe, u); 433 if (!match) { 434 atomic_inc(&u->ref); 435 return u; 436 } 437 438 if (match < 0) 439 p = &parent->rb_left; 440 else 441 p = &parent->rb_right; 442 443 } 444 445 u = NULL; 446 rb_link_node(&uprobe->rb_node, parent, p); 447 rb_insert_color(&uprobe->rb_node, &uprobes_tree); 448 /* get access + creation ref */ 449 atomic_set(&uprobe->ref, 2); 450 451 return u; 452 } 453 454 /* 455 * Acquire uprobes_treelock. 456 * Matching uprobe already exists in rbtree; 457 * increment (access refcount) and return the matching uprobe. 458 * 459 * No matching uprobe; insert the uprobe in rb_tree; 460 * get a double refcount (access + creation) and return NULL. 461 */ 462 static struct uprobe *insert_uprobe(struct uprobe *uprobe) 463 { 464 struct uprobe *u; 465 466 spin_lock(&uprobes_treelock); 467 u = __insert_uprobe(uprobe); 468 spin_unlock(&uprobes_treelock); 469 470 /* For now assume that the instruction need not be single-stepped */ 471 uprobe->flags |= UPROBE_SKIP_SSTEP; 472 473 return u; 474 } 475 476 static void put_uprobe(struct uprobe *uprobe) 477 { 478 if (atomic_dec_and_test(&uprobe->ref)) 479 kfree(uprobe); 480 } 481 482 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) 483 { 484 struct uprobe *uprobe, *cur_uprobe; 485 486 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); 487 if (!uprobe) 488 return NULL; 489 490 uprobe->inode = igrab(inode); 491 uprobe->offset = offset; 492 init_rwsem(&uprobe->consumer_rwsem); 493 494 /* add to uprobes_tree, sorted on inode:offset */ 495 cur_uprobe = insert_uprobe(uprobe); 496 497 /* a uprobe exists for this inode:offset combination */ 498 if (cur_uprobe) { 499 kfree(uprobe); 500 uprobe = cur_uprobe; 501 iput(inode); 502 } else { 503 atomic_inc(&uprobe_events); 504 } 505 506 return uprobe; 507 } 508 509 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) 510 { 511 struct uprobe_consumer *uc; 512 513 if (!(uprobe->flags & UPROBE_RUN_HANDLER)) 514 return; 515 516 down_read(&uprobe->consumer_rwsem); 517 for (uc = uprobe->consumers; uc; uc = uc->next) { 518 if (!uc->filter || uc->filter(uc, current)) 519 uc->handler(uc, regs); 520 } 521 up_read(&uprobe->consumer_rwsem); 522 } 523 524 /* Returns the previous consumer */ 525 static struct uprobe_consumer * 526 consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) 527 { 528 down_write(&uprobe->consumer_rwsem); 529 uc->next = uprobe->consumers; 530 uprobe->consumers = uc; 531 up_write(&uprobe->consumer_rwsem); 532 533 return uc->next; 534 } 535 536 /* 537 * For uprobe @uprobe, delete the consumer @uc. 538 * Return true if the @uc is deleted successfully 539 * or return false. 540 */ 541 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) 542 { 543 struct uprobe_consumer **con; 544 bool ret = false; 545 546 down_write(&uprobe->consumer_rwsem); 547 for (con = &uprobe->consumers; *con; con = &(*con)->next) { 548 if (*con == uc) { 549 *con = uc->next; 550 ret = true; 551 break; 552 } 553 } 554 up_write(&uprobe->consumer_rwsem); 555 556 return ret; 557 } 558 559 static int 560 __copy_insn(struct address_space *mapping, struct file *filp, char *insn, 561 unsigned long nbytes, loff_t offset) 562 { 563 struct page *page; 564 void *vaddr; 565 unsigned long off; 566 pgoff_t idx; 567 568 if (!filp) 569 return -EINVAL; 570 571 if (!mapping->a_ops->readpage) 572 return -EIO; 573 574 idx = offset >> PAGE_CACHE_SHIFT; 575 off = offset & ~PAGE_MASK; 576 577 /* 578 * Ensure that the page that has the original instruction is 579 * populated and in page-cache. 580 */ 581 page = read_mapping_page(mapping, idx, filp); 582 if (IS_ERR(page)) 583 return PTR_ERR(page); 584 585 vaddr = kmap_atomic(page); 586 memcpy(insn, vaddr + off, nbytes); 587 kunmap_atomic(vaddr); 588 page_cache_release(page); 589 590 return 0; 591 } 592 593 static int copy_insn(struct uprobe *uprobe, struct file *filp) 594 { 595 struct address_space *mapping; 596 unsigned long nbytes; 597 int bytes; 598 599 nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK); 600 mapping = uprobe->inode->i_mapping; 601 602 /* Instruction at end of binary; copy only available bytes */ 603 if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size) 604 bytes = uprobe->inode->i_size - uprobe->offset; 605 else 606 bytes = MAX_UINSN_BYTES; 607 608 /* Instruction at the page-boundary; copy bytes in second page */ 609 if (nbytes < bytes) { 610 int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes, 611 bytes - nbytes, uprobe->offset + nbytes); 612 if (err) 613 return err; 614 bytes = nbytes; 615 } 616 return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset); 617 } 618 619 /* 620 * How mm->uprobes_state.count gets updated 621 * uprobe_mmap() increments the count if 622 * - it successfully adds a breakpoint. 623 * - it cannot add a breakpoint, but sees that there is a underlying 624 * breakpoint (via a is_swbp_at_addr()). 625 * 626 * uprobe_munmap() decrements the count if 627 * - it sees a underlying breakpoint, (via is_swbp_at_addr) 628 * (Subsequent uprobe_unregister wouldnt find the breakpoint 629 * unless a uprobe_mmap kicks in, since the old vma would be 630 * dropped just after uprobe_munmap.) 631 * 632 * uprobe_register increments the count if: 633 * - it successfully adds a breakpoint. 634 * 635 * uprobe_unregister decrements the count if: 636 * - it sees a underlying breakpoint and removes successfully. 637 * (via is_swbp_at_addr) 638 * (Subsequent uprobe_munmap wouldnt find the breakpoint 639 * since there is no underlying breakpoint after the 640 * breakpoint removal.) 641 */ 642 static int 643 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 644 struct vm_area_struct *vma, unsigned long vaddr) 645 { 646 bool first_uprobe; 647 int ret; 648 649 /* 650 * If probe is being deleted, unregister thread could be done with 651 * the vma-rmap-walk through. Adding a probe now can be fatal since 652 * nobody will be able to cleanup. Also we could be from fork or 653 * mremap path, where the probe might have already been inserted. 654 * Hence behave as if probe already existed. 655 */ 656 if (!uprobe->consumers) 657 return 0; 658 659 if (!(uprobe->flags & UPROBE_COPY_INSN)) { 660 ret = copy_insn(uprobe, vma->vm_file); 661 if (ret) 662 return ret; 663 664 if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn)) 665 return -ENOTSUPP; 666 667 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); 668 if (ret) 669 return ret; 670 671 /* write_opcode() assumes we don't cross page boundary */ 672 BUG_ON((uprobe->offset & ~PAGE_MASK) + 673 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); 674 675 uprobe->flags |= UPROBE_COPY_INSN; 676 } 677 678 /* 679 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 680 * the task can hit this breakpoint right after __replace_page(). 681 */ 682 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); 683 if (first_uprobe) 684 set_bit(MMF_HAS_UPROBES, &mm->flags); 685 686 ret = set_swbp(&uprobe->arch, mm, vaddr); 687 if (!ret) 688 clear_bit(MMF_RECALC_UPROBES, &mm->flags); 689 else if (first_uprobe) 690 clear_bit(MMF_HAS_UPROBES, &mm->flags); 691 692 return ret; 693 } 694 695 static void 696 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 697 { 698 /* can happen if uprobe_register() fails */ 699 if (!test_bit(MMF_HAS_UPROBES, &mm->flags)) 700 return; 701 702 set_bit(MMF_RECALC_UPROBES, &mm->flags); 703 set_orig_insn(&uprobe->arch, mm, vaddr); 704 } 705 706 /* 707 * There could be threads that have already hit the breakpoint. They 708 * will recheck the current insn and restart if find_uprobe() fails. 709 * See find_active_uprobe(). 710 */ 711 static void delete_uprobe(struct uprobe *uprobe) 712 { 713 spin_lock(&uprobes_treelock); 714 rb_erase(&uprobe->rb_node, &uprobes_tree); 715 spin_unlock(&uprobes_treelock); 716 iput(uprobe->inode); 717 put_uprobe(uprobe); 718 atomic_dec(&uprobe_events); 719 } 720 721 struct map_info { 722 struct map_info *next; 723 struct mm_struct *mm; 724 unsigned long vaddr; 725 }; 726 727 static inline struct map_info *free_map_info(struct map_info *info) 728 { 729 struct map_info *next = info->next; 730 kfree(info); 731 return next; 732 } 733 734 static struct map_info * 735 build_map_info(struct address_space *mapping, loff_t offset, bool is_register) 736 { 737 unsigned long pgoff = offset >> PAGE_SHIFT; 738 struct prio_tree_iter iter; 739 struct vm_area_struct *vma; 740 struct map_info *curr = NULL; 741 struct map_info *prev = NULL; 742 struct map_info *info; 743 int more = 0; 744 745 again: 746 mutex_lock(&mapping->i_mmap_mutex); 747 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 748 if (!valid_vma(vma, is_register)) 749 continue; 750 751 if (!prev && !more) { 752 /* 753 * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through 754 * reclaim. This is optimistic, no harm done if it fails. 755 */ 756 prev = kmalloc(sizeof(struct map_info), 757 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); 758 if (prev) 759 prev->next = NULL; 760 } 761 if (!prev) { 762 more++; 763 continue; 764 } 765 766 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) 767 continue; 768 769 info = prev; 770 prev = prev->next; 771 info->next = curr; 772 curr = info; 773 774 info->mm = vma->vm_mm; 775 info->vaddr = offset_to_vaddr(vma, offset); 776 } 777 mutex_unlock(&mapping->i_mmap_mutex); 778 779 if (!more) 780 goto out; 781 782 prev = curr; 783 while (curr) { 784 mmput(curr->mm); 785 curr = curr->next; 786 } 787 788 do { 789 info = kmalloc(sizeof(struct map_info), GFP_KERNEL); 790 if (!info) { 791 curr = ERR_PTR(-ENOMEM); 792 goto out; 793 } 794 info->next = prev; 795 prev = info; 796 } while (--more); 797 798 goto again; 799 out: 800 while (prev) 801 prev = free_map_info(prev); 802 return curr; 803 } 804 805 static int register_for_each_vma(struct uprobe *uprobe, bool is_register) 806 { 807 struct map_info *info; 808 int err = 0; 809 810 info = build_map_info(uprobe->inode->i_mapping, 811 uprobe->offset, is_register); 812 if (IS_ERR(info)) 813 return PTR_ERR(info); 814 815 while (info) { 816 struct mm_struct *mm = info->mm; 817 struct vm_area_struct *vma; 818 819 if (err) 820 goto free; 821 822 down_write(&mm->mmap_sem); 823 vma = find_vma(mm, info->vaddr); 824 if (!vma || !valid_vma(vma, is_register) || 825 vma->vm_file->f_mapping->host != uprobe->inode) 826 goto unlock; 827 828 if (vma->vm_start > info->vaddr || 829 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) 830 goto unlock; 831 832 if (is_register) 833 err = install_breakpoint(uprobe, mm, vma, info->vaddr); 834 else 835 remove_breakpoint(uprobe, mm, info->vaddr); 836 837 unlock: 838 up_write(&mm->mmap_sem); 839 free: 840 mmput(mm); 841 info = free_map_info(info); 842 } 843 844 return err; 845 } 846 847 static int __uprobe_register(struct uprobe *uprobe) 848 { 849 return register_for_each_vma(uprobe, true); 850 } 851 852 static void __uprobe_unregister(struct uprobe *uprobe) 853 { 854 if (!register_for_each_vma(uprobe, false)) 855 delete_uprobe(uprobe); 856 857 /* TODO : cant unregister? schedule a worker thread */ 858 } 859 860 /* 861 * uprobe_register - register a probe 862 * @inode: the file in which the probe has to be placed. 863 * @offset: offset from the start of the file. 864 * @uc: information on howto handle the probe.. 865 * 866 * Apart from the access refcount, uprobe_register() takes a creation 867 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting 868 * inserted into the rbtree (i.e first consumer for a @inode:@offset 869 * tuple). Creation refcount stops uprobe_unregister from freeing the 870 * @uprobe even before the register operation is complete. Creation 871 * refcount is released when the last @uc for the @uprobe 872 * unregisters. 873 * 874 * Return errno if it cannot successully install probes 875 * else return 0 (success) 876 */ 877 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 878 { 879 struct uprobe *uprobe; 880 int ret; 881 882 if (!inode || !uc || uc->next) 883 return -EINVAL; 884 885 if (offset > i_size_read(inode)) 886 return -EINVAL; 887 888 ret = 0; 889 mutex_lock(uprobes_hash(inode)); 890 uprobe = alloc_uprobe(inode, offset); 891 892 if (uprobe && !consumer_add(uprobe, uc)) { 893 ret = __uprobe_register(uprobe); 894 if (ret) { 895 uprobe->consumers = NULL; 896 __uprobe_unregister(uprobe); 897 } else { 898 uprobe->flags |= UPROBE_RUN_HANDLER; 899 } 900 } 901 902 mutex_unlock(uprobes_hash(inode)); 903 if (uprobe) 904 put_uprobe(uprobe); 905 906 return ret; 907 } 908 909 /* 910 * uprobe_unregister - unregister a already registered probe. 911 * @inode: the file in which the probe has to be removed. 912 * @offset: offset from the start of the file. 913 * @uc: identify which probe if multiple probes are colocated. 914 */ 915 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 916 { 917 struct uprobe *uprobe; 918 919 if (!inode || !uc) 920 return; 921 922 uprobe = find_uprobe(inode, offset); 923 if (!uprobe) 924 return; 925 926 mutex_lock(uprobes_hash(inode)); 927 928 if (consumer_del(uprobe, uc)) { 929 if (!uprobe->consumers) { 930 __uprobe_unregister(uprobe); 931 uprobe->flags &= ~UPROBE_RUN_HANDLER; 932 } 933 } 934 935 mutex_unlock(uprobes_hash(inode)); 936 if (uprobe) 937 put_uprobe(uprobe); 938 } 939 940 static struct rb_node * 941 find_node_in_range(struct inode *inode, loff_t min, loff_t max) 942 { 943 struct rb_node *n = uprobes_tree.rb_node; 944 945 while (n) { 946 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); 947 948 if (inode < u->inode) { 949 n = n->rb_left; 950 } else if (inode > u->inode) { 951 n = n->rb_right; 952 } else { 953 if (max < u->offset) 954 n = n->rb_left; 955 else if (min > u->offset) 956 n = n->rb_right; 957 else 958 break; 959 } 960 } 961 962 return n; 963 } 964 965 /* 966 * For a given range in vma, build a list of probes that need to be inserted. 967 */ 968 static void build_probe_list(struct inode *inode, 969 struct vm_area_struct *vma, 970 unsigned long start, unsigned long end, 971 struct list_head *head) 972 { 973 loff_t min, max; 974 struct rb_node *n, *t; 975 struct uprobe *u; 976 977 INIT_LIST_HEAD(head); 978 min = vaddr_to_offset(vma, start); 979 max = min + (end - start) - 1; 980 981 spin_lock(&uprobes_treelock); 982 n = find_node_in_range(inode, min, max); 983 if (n) { 984 for (t = n; t; t = rb_prev(t)) { 985 u = rb_entry(t, struct uprobe, rb_node); 986 if (u->inode != inode || u->offset < min) 987 break; 988 list_add(&u->pending_list, head); 989 atomic_inc(&u->ref); 990 } 991 for (t = n; (t = rb_next(t)); ) { 992 u = rb_entry(t, struct uprobe, rb_node); 993 if (u->inode != inode || u->offset > max) 994 break; 995 list_add(&u->pending_list, head); 996 atomic_inc(&u->ref); 997 } 998 } 999 spin_unlock(&uprobes_treelock); 1000 } 1001 1002 /* 1003 * Called from mmap_region/vma_adjust with mm->mmap_sem acquired. 1004 * 1005 * Currently we ignore all errors and always return 0, the callers 1006 * can't handle the failure anyway. 1007 */ 1008 int uprobe_mmap(struct vm_area_struct *vma) 1009 { 1010 struct list_head tmp_list; 1011 struct uprobe *uprobe, *u; 1012 struct inode *inode; 1013 1014 if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) 1015 return 0; 1016 1017 inode = vma->vm_file->f_mapping->host; 1018 if (!inode) 1019 return 0; 1020 1021 mutex_lock(uprobes_mmap_hash(inode)); 1022 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); 1023 1024 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 1025 if (!fatal_signal_pending(current)) { 1026 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); 1027 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); 1028 } 1029 put_uprobe(uprobe); 1030 } 1031 mutex_unlock(uprobes_mmap_hash(inode)); 1032 1033 return 0; 1034 } 1035 1036 static bool 1037 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1038 { 1039 loff_t min, max; 1040 struct inode *inode; 1041 struct rb_node *n; 1042 1043 inode = vma->vm_file->f_mapping->host; 1044 1045 min = vaddr_to_offset(vma, start); 1046 max = min + (end - start) - 1; 1047 1048 spin_lock(&uprobes_treelock); 1049 n = find_node_in_range(inode, min, max); 1050 spin_unlock(&uprobes_treelock); 1051 1052 return !!n; 1053 } 1054 1055 /* 1056 * Called in context of a munmap of a vma. 1057 */ 1058 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1059 { 1060 if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) 1061 return; 1062 1063 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1064 return; 1065 1066 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || 1067 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) 1068 return; 1069 1070 if (vma_has_uprobes(vma, start, end)) 1071 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); 1072 } 1073 1074 /* Slot allocation for XOL */ 1075 static int xol_add_vma(struct xol_area *area) 1076 { 1077 struct mm_struct *mm; 1078 int ret; 1079 1080 area->page = alloc_page(GFP_HIGHUSER); 1081 if (!area->page) 1082 return -ENOMEM; 1083 1084 ret = -EALREADY; 1085 mm = current->mm; 1086 1087 down_write(&mm->mmap_sem); 1088 if (mm->uprobes_state.xol_area) 1089 goto fail; 1090 1091 ret = -ENOMEM; 1092 1093 /* Try to map as high as possible, this is only a hint. */ 1094 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); 1095 if (area->vaddr & ~PAGE_MASK) { 1096 ret = area->vaddr; 1097 goto fail; 1098 } 1099 1100 ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE, 1101 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page); 1102 if (ret) 1103 goto fail; 1104 1105 smp_wmb(); /* pairs with get_xol_area() */ 1106 mm->uprobes_state.xol_area = area; 1107 ret = 0; 1108 1109 fail: 1110 up_write(&mm->mmap_sem); 1111 if (ret) 1112 __free_page(area->page); 1113 1114 return ret; 1115 } 1116 1117 static struct xol_area *get_xol_area(struct mm_struct *mm) 1118 { 1119 struct xol_area *area; 1120 1121 area = mm->uprobes_state.xol_area; 1122 smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ 1123 1124 return area; 1125 } 1126 1127 /* 1128 * xol_alloc_area - Allocate process's xol_area. 1129 * This area will be used for storing instructions for execution out of 1130 * line. 1131 * 1132 * Returns the allocated area or NULL. 1133 */ 1134 static struct xol_area *xol_alloc_area(void) 1135 { 1136 struct xol_area *area; 1137 1138 area = kzalloc(sizeof(*area), GFP_KERNEL); 1139 if (unlikely(!area)) 1140 return NULL; 1141 1142 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); 1143 1144 if (!area->bitmap) 1145 goto fail; 1146 1147 init_waitqueue_head(&area->wq); 1148 if (!xol_add_vma(area)) 1149 return area; 1150 1151 fail: 1152 kfree(area->bitmap); 1153 kfree(area); 1154 1155 return get_xol_area(current->mm); 1156 } 1157 1158 /* 1159 * uprobe_clear_state - Free the area allocated for slots. 1160 */ 1161 void uprobe_clear_state(struct mm_struct *mm) 1162 { 1163 struct xol_area *area = mm->uprobes_state.xol_area; 1164 1165 if (!area) 1166 return; 1167 1168 put_page(area->page); 1169 kfree(area->bitmap); 1170 kfree(area); 1171 } 1172 1173 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) 1174 { 1175 newmm->uprobes_state.xol_area = NULL; 1176 1177 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { 1178 set_bit(MMF_HAS_UPROBES, &newmm->flags); 1179 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ 1180 set_bit(MMF_RECALC_UPROBES, &newmm->flags); 1181 } 1182 } 1183 1184 /* 1185 * - search for a free slot. 1186 */ 1187 static unsigned long xol_take_insn_slot(struct xol_area *area) 1188 { 1189 unsigned long slot_addr; 1190 int slot_nr; 1191 1192 do { 1193 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); 1194 if (slot_nr < UINSNS_PER_PAGE) { 1195 if (!test_and_set_bit(slot_nr, area->bitmap)) 1196 break; 1197 1198 slot_nr = UINSNS_PER_PAGE; 1199 continue; 1200 } 1201 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); 1202 } while (slot_nr >= UINSNS_PER_PAGE); 1203 1204 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); 1205 atomic_inc(&area->slot_count); 1206 1207 return slot_addr; 1208 } 1209 1210 /* 1211 * xol_get_insn_slot - If was not allocated a slot, then 1212 * allocate a slot. 1213 * Returns the allocated slot address or 0. 1214 */ 1215 static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr) 1216 { 1217 struct xol_area *area; 1218 unsigned long offset; 1219 void *vaddr; 1220 1221 area = get_xol_area(current->mm); 1222 if (!area) { 1223 area = xol_alloc_area(); 1224 if (!area) 1225 return 0; 1226 } 1227 current->utask->xol_vaddr = xol_take_insn_slot(area); 1228 1229 /* 1230 * Initialize the slot if xol_vaddr points to valid 1231 * instruction slot. 1232 */ 1233 if (unlikely(!current->utask->xol_vaddr)) 1234 return 0; 1235 1236 current->utask->vaddr = slot_addr; 1237 offset = current->utask->xol_vaddr & ~PAGE_MASK; 1238 vaddr = kmap_atomic(area->page); 1239 memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES); 1240 kunmap_atomic(vaddr); 1241 1242 return current->utask->xol_vaddr; 1243 } 1244 1245 /* 1246 * xol_free_insn_slot - If slot was earlier allocated by 1247 * @xol_get_insn_slot(), make the slot available for 1248 * subsequent requests. 1249 */ 1250 static void xol_free_insn_slot(struct task_struct *tsk) 1251 { 1252 struct xol_area *area; 1253 unsigned long vma_end; 1254 unsigned long slot_addr; 1255 1256 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) 1257 return; 1258 1259 slot_addr = tsk->utask->xol_vaddr; 1260 1261 if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr))) 1262 return; 1263 1264 area = tsk->mm->uprobes_state.xol_area; 1265 vma_end = area->vaddr + PAGE_SIZE; 1266 if (area->vaddr <= slot_addr && slot_addr < vma_end) { 1267 unsigned long offset; 1268 int slot_nr; 1269 1270 offset = slot_addr - area->vaddr; 1271 slot_nr = offset / UPROBE_XOL_SLOT_BYTES; 1272 if (slot_nr >= UINSNS_PER_PAGE) 1273 return; 1274 1275 clear_bit(slot_nr, area->bitmap); 1276 atomic_dec(&area->slot_count); 1277 if (waitqueue_active(&area->wq)) 1278 wake_up(&area->wq); 1279 1280 tsk->utask->xol_vaddr = 0; 1281 } 1282 } 1283 1284 /** 1285 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 1286 * @regs: Reflects the saved state of the task after it has hit a breakpoint 1287 * instruction. 1288 * Return the address of the breakpoint instruction. 1289 */ 1290 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) 1291 { 1292 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; 1293 } 1294 1295 /* 1296 * Called with no locks held. 1297 * Called in context of a exiting or a exec-ing thread. 1298 */ 1299 void uprobe_free_utask(struct task_struct *t) 1300 { 1301 struct uprobe_task *utask = t->utask; 1302 1303 if (!utask) 1304 return; 1305 1306 if (utask->active_uprobe) 1307 put_uprobe(utask->active_uprobe); 1308 1309 xol_free_insn_slot(t); 1310 kfree(utask); 1311 t->utask = NULL; 1312 } 1313 1314 /* 1315 * Called in context of a new clone/fork from copy_process. 1316 */ 1317 void uprobe_copy_process(struct task_struct *t) 1318 { 1319 t->utask = NULL; 1320 } 1321 1322 /* 1323 * Allocate a uprobe_task object for the task. 1324 * Called when the thread hits a breakpoint for the first time. 1325 * 1326 * Returns: 1327 * - pointer to new uprobe_task on success 1328 * - NULL otherwise 1329 */ 1330 static struct uprobe_task *add_utask(void) 1331 { 1332 struct uprobe_task *utask; 1333 1334 utask = kzalloc(sizeof *utask, GFP_KERNEL); 1335 if (unlikely(!utask)) 1336 return NULL; 1337 1338 current->utask = utask; 1339 return utask; 1340 } 1341 1342 /* Prepare to single-step probed instruction out of line. */ 1343 static int 1344 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr) 1345 { 1346 if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs)) 1347 return 0; 1348 1349 return -EFAULT; 1350 } 1351 1352 /* 1353 * If we are singlestepping, then ensure this thread is not connected to 1354 * non-fatal signals until completion of singlestep. When xol insn itself 1355 * triggers the signal, restart the original insn even if the task is 1356 * already SIGKILL'ed (since coredump should report the correct ip). This 1357 * is even more important if the task has a handler for SIGSEGV/etc, The 1358 * _same_ instruction should be repeated again after return from the signal 1359 * handler, and SSTEP can never finish in this case. 1360 */ 1361 bool uprobe_deny_signal(void) 1362 { 1363 struct task_struct *t = current; 1364 struct uprobe_task *utask = t->utask; 1365 1366 if (likely(!utask || !utask->active_uprobe)) 1367 return false; 1368 1369 WARN_ON_ONCE(utask->state != UTASK_SSTEP); 1370 1371 if (signal_pending(t)) { 1372 spin_lock_irq(&t->sighand->siglock); 1373 clear_tsk_thread_flag(t, TIF_SIGPENDING); 1374 spin_unlock_irq(&t->sighand->siglock); 1375 1376 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 1377 utask->state = UTASK_SSTEP_TRAPPED; 1378 set_tsk_thread_flag(t, TIF_UPROBE); 1379 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); 1380 } 1381 } 1382 1383 return true; 1384 } 1385 1386 /* 1387 * Avoid singlestepping the original instruction if the original instruction 1388 * is a NOP or can be emulated. 1389 */ 1390 static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs) 1391 { 1392 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 1393 return true; 1394 1395 uprobe->flags &= ~UPROBE_SKIP_SSTEP; 1396 return false; 1397 } 1398 1399 static void mmf_recalc_uprobes(struct mm_struct *mm) 1400 { 1401 struct vm_area_struct *vma; 1402 1403 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1404 if (!valid_vma(vma, false)) 1405 continue; 1406 /* 1407 * This is not strictly accurate, we can race with 1408 * uprobe_unregister() and see the already removed 1409 * uprobe if delete_uprobe() was not yet called. 1410 */ 1411 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) 1412 return; 1413 } 1414 1415 clear_bit(MMF_HAS_UPROBES, &mm->flags); 1416 } 1417 1418 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) 1419 { 1420 struct mm_struct *mm = current->mm; 1421 struct uprobe *uprobe = NULL; 1422 struct vm_area_struct *vma; 1423 1424 down_read(&mm->mmap_sem); 1425 vma = find_vma(mm, bp_vaddr); 1426 if (vma && vma->vm_start <= bp_vaddr) { 1427 if (valid_vma(vma, false)) { 1428 struct inode *inode = vma->vm_file->f_mapping->host; 1429 loff_t offset = vaddr_to_offset(vma, bp_vaddr); 1430 1431 uprobe = find_uprobe(inode, offset); 1432 } 1433 1434 if (!uprobe) 1435 *is_swbp = is_swbp_at_addr(mm, bp_vaddr); 1436 } else { 1437 *is_swbp = -EFAULT; 1438 } 1439 1440 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) 1441 mmf_recalc_uprobes(mm); 1442 up_read(&mm->mmap_sem); 1443 1444 return uprobe; 1445 } 1446 1447 void __weak arch_uprobe_enable_step(struct arch_uprobe *arch) 1448 { 1449 user_enable_single_step(current); 1450 } 1451 1452 void __weak arch_uprobe_disable_step(struct arch_uprobe *arch) 1453 { 1454 user_disable_single_step(current); 1455 } 1456 1457 /* 1458 * Run handler and ask thread to singlestep. 1459 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 1460 */ 1461 static void handle_swbp(struct pt_regs *regs) 1462 { 1463 struct uprobe_task *utask; 1464 struct uprobe *uprobe; 1465 unsigned long bp_vaddr; 1466 int uninitialized_var(is_swbp); 1467 1468 bp_vaddr = uprobe_get_swbp_addr(regs); 1469 uprobe = find_active_uprobe(bp_vaddr, &is_swbp); 1470 1471 if (!uprobe) { 1472 if (is_swbp > 0) { 1473 /* No matching uprobe; signal SIGTRAP. */ 1474 send_sig(SIGTRAP, current, 0); 1475 } else { 1476 /* 1477 * Either we raced with uprobe_unregister() or we can't 1478 * access this memory. The latter is only possible if 1479 * another thread plays with our ->mm. In both cases 1480 * we can simply restart. If this vma was unmapped we 1481 * can pretend this insn was not executed yet and get 1482 * the (correct) SIGSEGV after restart. 1483 */ 1484 instruction_pointer_set(regs, bp_vaddr); 1485 } 1486 return; 1487 } 1488 1489 utask = current->utask; 1490 if (!utask) { 1491 utask = add_utask(); 1492 /* Cannot allocate; re-execute the instruction. */ 1493 if (!utask) 1494 goto cleanup_ret; 1495 } 1496 utask->active_uprobe = uprobe; 1497 handler_chain(uprobe, regs); 1498 if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs)) 1499 goto cleanup_ret; 1500 1501 utask->state = UTASK_SSTEP; 1502 if (!pre_ssout(uprobe, regs, bp_vaddr)) { 1503 arch_uprobe_enable_step(&uprobe->arch); 1504 return; 1505 } 1506 1507 cleanup_ret: 1508 if (utask) { 1509 utask->active_uprobe = NULL; 1510 utask->state = UTASK_RUNNING; 1511 } 1512 if (!(uprobe->flags & UPROBE_SKIP_SSTEP)) 1513 1514 /* 1515 * cannot singlestep; cannot skip instruction; 1516 * re-execute the instruction. 1517 */ 1518 instruction_pointer_set(regs, bp_vaddr); 1519 1520 put_uprobe(uprobe); 1521 } 1522 1523 /* 1524 * Perform required fix-ups and disable singlestep. 1525 * Allow pending signals to take effect. 1526 */ 1527 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) 1528 { 1529 struct uprobe *uprobe; 1530 1531 uprobe = utask->active_uprobe; 1532 if (utask->state == UTASK_SSTEP_ACK) 1533 arch_uprobe_post_xol(&uprobe->arch, regs); 1534 else if (utask->state == UTASK_SSTEP_TRAPPED) 1535 arch_uprobe_abort_xol(&uprobe->arch, regs); 1536 else 1537 WARN_ON_ONCE(1); 1538 1539 arch_uprobe_disable_step(&uprobe->arch); 1540 put_uprobe(uprobe); 1541 utask->active_uprobe = NULL; 1542 utask->state = UTASK_RUNNING; 1543 xol_free_insn_slot(current); 1544 1545 spin_lock_irq(¤t->sighand->siglock); 1546 recalc_sigpending(); /* see uprobe_deny_signal() */ 1547 spin_unlock_irq(¤t->sighand->siglock); 1548 } 1549 1550 /* 1551 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on 1552 * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and 1553 * allows the thread to return from interrupt. 1554 * 1555 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and 1556 * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from 1557 * interrupt. 1558 * 1559 * While returning to userspace, thread notices the TIF_UPROBE flag and calls 1560 * uprobe_notify_resume(). 1561 */ 1562 void uprobe_notify_resume(struct pt_regs *regs) 1563 { 1564 struct uprobe_task *utask; 1565 1566 utask = current->utask; 1567 if (!utask || utask->state == UTASK_BP_HIT) 1568 handle_swbp(regs); 1569 else 1570 handle_singlestep(utask, regs); 1571 } 1572 1573 /* 1574 * uprobe_pre_sstep_notifier gets called from interrupt context as part of 1575 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. 1576 */ 1577 int uprobe_pre_sstep_notifier(struct pt_regs *regs) 1578 { 1579 struct uprobe_task *utask; 1580 1581 if (!current->mm || !test_bit(MMF_HAS_UPROBES, ¤t->mm->flags)) 1582 return 0; 1583 1584 utask = current->utask; 1585 if (utask) 1586 utask->state = UTASK_BP_HIT; 1587 1588 set_thread_flag(TIF_UPROBE); 1589 1590 return 1; 1591 } 1592 1593 /* 1594 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier 1595 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. 1596 */ 1597 int uprobe_post_sstep_notifier(struct pt_regs *regs) 1598 { 1599 struct uprobe_task *utask = current->utask; 1600 1601 if (!current->mm || !utask || !utask->active_uprobe) 1602 /* task is currently not uprobed */ 1603 return 0; 1604 1605 utask->state = UTASK_SSTEP_ACK; 1606 set_thread_flag(TIF_UPROBE); 1607 return 1; 1608 } 1609 1610 static struct notifier_block uprobe_exception_nb = { 1611 .notifier_call = arch_uprobe_exception_notify, 1612 .priority = INT_MAX-1, /* notified after kprobes, kgdb */ 1613 }; 1614 1615 static int __init init_uprobes(void) 1616 { 1617 int i; 1618 1619 for (i = 0; i < UPROBES_HASH_SZ; i++) { 1620 mutex_init(&uprobes_mutex[i]); 1621 mutex_init(&uprobes_mmap_mutex[i]); 1622 } 1623 1624 return register_die_notifier(&uprobe_exception_nb); 1625 } 1626 module_init(init_uprobes); 1627 1628 static void __exit exit_uprobes(void) 1629 { 1630 } 1631 module_exit(exit_uprobes); 1632