xref: /linux/kernel/events/uprobes.c (revision 08ec212c0f92cbf30e3ecc7349f18151714041d6)
1 /*
2  * User-space Probes (UProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2008-2012
19  * Authors:
20  *	Srikar Dronamraju
21  *	Jim Keniston
22  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>	/* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/rmap.h>		/* anon_vma_prepare */
31 #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
32 #include <linux/swap.h>		/* try_to_free_swap */
33 #include <linux/ptrace.h>	/* user_enable_single_step */
34 #include <linux/kdebug.h>	/* notifier mechanism */
35 #include "../../mm/internal.h"	/* munlock_vma_page */
36 
37 #include <linux/uprobes.h>
38 
39 #define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
40 #define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
41 
42 static struct rb_root uprobes_tree = RB_ROOT;
43 
44 static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
45 
46 #define UPROBES_HASH_SZ	13
47 
48 /*
49  * We need separate register/unregister and mmap/munmap lock hashes because
50  * of mmap_sem nesting.
51  *
52  * uprobe_register() needs to install probes on (potentially) all processes
53  * and thus needs to acquire multiple mmap_sems (consequtively, not
54  * concurrently), whereas uprobe_mmap() is called while holding mmap_sem
55  * for the particular process doing the mmap.
56  *
57  * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
58  * because of lock order against i_mmap_mutex. This means there's a hole in
59  * the register vma iteration where a mmap() can happen.
60  *
61  * Thus uprobe_register() can race with uprobe_mmap() and we can try and
62  * install a probe where one is already installed.
63  */
64 
65 /* serialize (un)register */
66 static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
67 
68 #define uprobes_hash(v)		(&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
69 
70 /* serialize uprobe->pending_list */
71 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
72 #define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
73 
74 /*
75  * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
76  * events active at this time.  Probably a fine grained per inode count is
77  * better?
78  */
79 static atomic_t uprobe_events = ATOMIC_INIT(0);
80 
81 struct uprobe {
82 	struct rb_node		rb_node;	/* node in the rb tree */
83 	atomic_t		ref;
84 	struct rw_semaphore	consumer_rwsem;
85 	struct list_head	pending_list;
86 	struct uprobe_consumer	*consumers;
87 	struct inode		*inode;		/* Also hold a ref to inode */
88 	loff_t			offset;
89 	int			flags;
90 	struct arch_uprobe	arch;
91 };
92 
93 /*
94  * valid_vma: Verify if the specified vma is an executable vma
95  * Relax restrictions while unregistering: vm_flags might have
96  * changed after breakpoint was inserted.
97  *	- is_register: indicates if we are in register context.
98  *	- Return 1 if the specified virtual address is in an
99  *	  executable vma.
100  */
101 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
102 {
103 	if (!vma->vm_file)
104 		return false;
105 
106 	if (!is_register)
107 		return true;
108 
109 	if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED))
110 				== (VM_READ|VM_EXEC))
111 		return true;
112 
113 	return false;
114 }
115 
116 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
117 {
118 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
119 }
120 
121 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
122 {
123 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
124 }
125 
126 /**
127  * __replace_page - replace page in vma by new page.
128  * based on replace_page in mm/ksm.c
129  *
130  * @vma:      vma that holds the pte pointing to page
131  * @addr:     address the old @page is mapped at
132  * @page:     the cowed page we are replacing by kpage
133  * @kpage:    the modified page we replace page by
134  *
135  * Returns 0 on success, -EFAULT on failure.
136  */
137 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
138 				struct page *page, struct page *kpage)
139 {
140 	struct mm_struct *mm = vma->vm_mm;
141 	spinlock_t *ptl;
142 	pte_t *ptep;
143 	int err;
144 	/* For mmu_notifiers */
145 	const unsigned long mmun_start = addr;
146 	const unsigned long mmun_end   = addr + PAGE_SIZE;
147 
148 	/* For try_to_free_swap() and munlock_vma_page() below */
149 	lock_page(page);
150 
151 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
152 	err = -EAGAIN;
153 	ptep = page_check_address(page, mm, addr, &ptl, 0);
154 	if (!ptep)
155 		goto unlock;
156 
157 	get_page(kpage);
158 	page_add_new_anon_rmap(kpage, vma, addr);
159 
160 	if (!PageAnon(page)) {
161 		dec_mm_counter(mm, MM_FILEPAGES);
162 		inc_mm_counter(mm, MM_ANONPAGES);
163 	}
164 
165 	flush_cache_page(vma, addr, pte_pfn(*ptep));
166 	ptep_clear_flush(vma, addr, ptep);
167 	set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
168 
169 	page_remove_rmap(page);
170 	if (!page_mapped(page))
171 		try_to_free_swap(page);
172 	pte_unmap_unlock(ptep, ptl);
173 
174 	if (vma->vm_flags & VM_LOCKED)
175 		munlock_vma_page(page);
176 	put_page(page);
177 
178 	err = 0;
179  unlock:
180 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
181 	unlock_page(page);
182 	return err;
183 }
184 
185 /**
186  * is_swbp_insn - check if instruction is breakpoint instruction.
187  * @insn: instruction to be checked.
188  * Default implementation of is_swbp_insn
189  * Returns true if @insn is a breakpoint instruction.
190  */
191 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
192 {
193 	return *insn == UPROBE_SWBP_INSN;
194 }
195 
196 /*
197  * NOTE:
198  * Expect the breakpoint instruction to be the smallest size instruction for
199  * the architecture. If an arch has variable length instruction and the
200  * breakpoint instruction is not of the smallest length instruction
201  * supported by that architecture then we need to modify read_opcode /
202  * write_opcode accordingly. This would never be a problem for archs that
203  * have fixed length instructions.
204  */
205 
206 /*
207  * write_opcode - write the opcode at a given virtual address.
208  * @auprobe: arch breakpointing information.
209  * @mm: the probed process address space.
210  * @vaddr: the virtual address to store the opcode.
211  * @opcode: opcode to be written at @vaddr.
212  *
213  * Called with mm->mmap_sem held (for read and with a reference to
214  * mm).
215  *
216  * For mm @mm, write the opcode at @vaddr.
217  * Return 0 (success) or a negative errno.
218  */
219 static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
220 			unsigned long vaddr, uprobe_opcode_t opcode)
221 {
222 	struct page *old_page, *new_page;
223 	void *vaddr_old, *vaddr_new;
224 	struct vm_area_struct *vma;
225 	int ret;
226 
227 retry:
228 	/* Read the page with vaddr into memory */
229 	ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
230 	if (ret <= 0)
231 		return ret;
232 
233 	ret = -ENOMEM;
234 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
235 	if (!new_page)
236 		goto put_old;
237 
238 	__SetPageUptodate(new_page);
239 
240 	/* copy the page now that we've got it stable */
241 	vaddr_old = kmap_atomic(old_page);
242 	vaddr_new = kmap_atomic(new_page);
243 
244 	memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
245 	memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE);
246 
247 	kunmap_atomic(vaddr_new);
248 	kunmap_atomic(vaddr_old);
249 
250 	ret = anon_vma_prepare(vma);
251 	if (ret)
252 		goto put_new;
253 
254 	ret = __replace_page(vma, vaddr, old_page, new_page);
255 
256 put_new:
257 	page_cache_release(new_page);
258 put_old:
259 	put_page(old_page);
260 
261 	if (unlikely(ret == -EAGAIN))
262 		goto retry;
263 	return ret;
264 }
265 
266 /**
267  * read_opcode - read the opcode at a given virtual address.
268  * @mm: the probed process address space.
269  * @vaddr: the virtual address to read the opcode.
270  * @opcode: location to store the read opcode.
271  *
272  * Called with mm->mmap_sem held (for read and with a reference to
273  * mm.
274  *
275  * For mm @mm, read the opcode at @vaddr and store it in @opcode.
276  * Return 0 (success) or a negative errno.
277  */
278 static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
279 {
280 	struct page *page;
281 	void *vaddr_new;
282 	int ret;
283 
284 	ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
285 	if (ret <= 0)
286 		return ret;
287 
288 	vaddr_new = kmap_atomic(page);
289 	vaddr &= ~PAGE_MASK;
290 	memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
291 	kunmap_atomic(vaddr_new);
292 
293 	put_page(page);
294 
295 	return 0;
296 }
297 
298 static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
299 {
300 	uprobe_opcode_t opcode;
301 	int result;
302 
303 	if (current->mm == mm) {
304 		pagefault_disable();
305 		result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
306 								sizeof(opcode));
307 		pagefault_enable();
308 
309 		if (likely(result == 0))
310 			goto out;
311 	}
312 
313 	result = read_opcode(mm, vaddr, &opcode);
314 	if (result)
315 		return result;
316 out:
317 	if (is_swbp_insn(&opcode))
318 		return 1;
319 
320 	return 0;
321 }
322 
323 /**
324  * set_swbp - store breakpoint at a given address.
325  * @auprobe: arch specific probepoint information.
326  * @mm: the probed process address space.
327  * @vaddr: the virtual address to insert the opcode.
328  *
329  * For mm @mm, store the breakpoint instruction at @vaddr.
330  * Return 0 (success) or a negative errno.
331  */
332 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
333 {
334 	int result;
335 	/*
336 	 * See the comment near uprobes_hash().
337 	 */
338 	result = is_swbp_at_addr(mm, vaddr);
339 	if (result == 1)
340 		return 0;
341 
342 	if (result)
343 		return result;
344 
345 	return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
346 }
347 
348 /**
349  * set_orig_insn - Restore the original instruction.
350  * @mm: the probed process address space.
351  * @auprobe: arch specific probepoint information.
352  * @vaddr: the virtual address to insert the opcode.
353  *
354  * For mm @mm, restore the original opcode (opcode) at @vaddr.
355  * Return 0 (success) or a negative errno.
356  */
357 int __weak
358 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
359 {
360 	int result;
361 
362 	result = is_swbp_at_addr(mm, vaddr);
363 	if (!result)
364 		return -EINVAL;
365 
366 	if (result != 1)
367 		return result;
368 
369 	return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
370 }
371 
372 static int match_uprobe(struct uprobe *l, struct uprobe *r)
373 {
374 	if (l->inode < r->inode)
375 		return -1;
376 
377 	if (l->inode > r->inode)
378 		return 1;
379 
380 	if (l->offset < r->offset)
381 		return -1;
382 
383 	if (l->offset > r->offset)
384 		return 1;
385 
386 	return 0;
387 }
388 
389 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
390 {
391 	struct uprobe u = { .inode = inode, .offset = offset };
392 	struct rb_node *n = uprobes_tree.rb_node;
393 	struct uprobe *uprobe;
394 	int match;
395 
396 	while (n) {
397 		uprobe = rb_entry(n, struct uprobe, rb_node);
398 		match = match_uprobe(&u, uprobe);
399 		if (!match) {
400 			atomic_inc(&uprobe->ref);
401 			return uprobe;
402 		}
403 
404 		if (match < 0)
405 			n = n->rb_left;
406 		else
407 			n = n->rb_right;
408 	}
409 	return NULL;
410 }
411 
412 /*
413  * Find a uprobe corresponding to a given inode:offset
414  * Acquires uprobes_treelock
415  */
416 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
417 {
418 	struct uprobe *uprobe;
419 
420 	spin_lock(&uprobes_treelock);
421 	uprobe = __find_uprobe(inode, offset);
422 	spin_unlock(&uprobes_treelock);
423 
424 	return uprobe;
425 }
426 
427 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
428 {
429 	struct rb_node **p = &uprobes_tree.rb_node;
430 	struct rb_node *parent = NULL;
431 	struct uprobe *u;
432 	int match;
433 
434 	while (*p) {
435 		parent = *p;
436 		u = rb_entry(parent, struct uprobe, rb_node);
437 		match = match_uprobe(uprobe, u);
438 		if (!match) {
439 			atomic_inc(&u->ref);
440 			return u;
441 		}
442 
443 		if (match < 0)
444 			p = &parent->rb_left;
445 		else
446 			p = &parent->rb_right;
447 
448 	}
449 
450 	u = NULL;
451 	rb_link_node(&uprobe->rb_node, parent, p);
452 	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
453 	/* get access + creation ref */
454 	atomic_set(&uprobe->ref, 2);
455 
456 	return u;
457 }
458 
459 /*
460  * Acquire uprobes_treelock.
461  * Matching uprobe already exists in rbtree;
462  *	increment (access refcount) and return the matching uprobe.
463  *
464  * No matching uprobe; insert the uprobe in rb_tree;
465  *	get a double refcount (access + creation) and return NULL.
466  */
467 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
468 {
469 	struct uprobe *u;
470 
471 	spin_lock(&uprobes_treelock);
472 	u = __insert_uprobe(uprobe);
473 	spin_unlock(&uprobes_treelock);
474 
475 	/* For now assume that the instruction need not be single-stepped */
476 	uprobe->flags |= UPROBE_SKIP_SSTEP;
477 
478 	return u;
479 }
480 
481 static void put_uprobe(struct uprobe *uprobe)
482 {
483 	if (atomic_dec_and_test(&uprobe->ref))
484 		kfree(uprobe);
485 }
486 
487 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
488 {
489 	struct uprobe *uprobe, *cur_uprobe;
490 
491 	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
492 	if (!uprobe)
493 		return NULL;
494 
495 	uprobe->inode = igrab(inode);
496 	uprobe->offset = offset;
497 	init_rwsem(&uprobe->consumer_rwsem);
498 
499 	/* add to uprobes_tree, sorted on inode:offset */
500 	cur_uprobe = insert_uprobe(uprobe);
501 
502 	/* a uprobe exists for this inode:offset combination */
503 	if (cur_uprobe) {
504 		kfree(uprobe);
505 		uprobe = cur_uprobe;
506 		iput(inode);
507 	} else {
508 		atomic_inc(&uprobe_events);
509 	}
510 
511 	return uprobe;
512 }
513 
514 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
515 {
516 	struct uprobe_consumer *uc;
517 
518 	if (!(uprobe->flags & UPROBE_RUN_HANDLER))
519 		return;
520 
521 	down_read(&uprobe->consumer_rwsem);
522 	for (uc = uprobe->consumers; uc; uc = uc->next) {
523 		if (!uc->filter || uc->filter(uc, current))
524 			uc->handler(uc, regs);
525 	}
526 	up_read(&uprobe->consumer_rwsem);
527 }
528 
529 /* Returns the previous consumer */
530 static struct uprobe_consumer *
531 consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
532 {
533 	down_write(&uprobe->consumer_rwsem);
534 	uc->next = uprobe->consumers;
535 	uprobe->consumers = uc;
536 	up_write(&uprobe->consumer_rwsem);
537 
538 	return uc->next;
539 }
540 
541 /*
542  * For uprobe @uprobe, delete the consumer @uc.
543  * Return true if the @uc is deleted successfully
544  * or return false.
545  */
546 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
547 {
548 	struct uprobe_consumer **con;
549 	bool ret = false;
550 
551 	down_write(&uprobe->consumer_rwsem);
552 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
553 		if (*con == uc) {
554 			*con = uc->next;
555 			ret = true;
556 			break;
557 		}
558 	}
559 	up_write(&uprobe->consumer_rwsem);
560 
561 	return ret;
562 }
563 
564 static int
565 __copy_insn(struct address_space *mapping, struct file *filp, char *insn,
566 			unsigned long nbytes, loff_t offset)
567 {
568 	struct page *page;
569 	void *vaddr;
570 	unsigned long off;
571 	pgoff_t idx;
572 
573 	if (!filp)
574 		return -EINVAL;
575 
576 	if (!mapping->a_ops->readpage)
577 		return -EIO;
578 
579 	idx = offset >> PAGE_CACHE_SHIFT;
580 	off = offset & ~PAGE_MASK;
581 
582 	/*
583 	 * Ensure that the page that has the original instruction is
584 	 * populated and in page-cache.
585 	 */
586 	page = read_mapping_page(mapping, idx, filp);
587 	if (IS_ERR(page))
588 		return PTR_ERR(page);
589 
590 	vaddr = kmap_atomic(page);
591 	memcpy(insn, vaddr + off, nbytes);
592 	kunmap_atomic(vaddr);
593 	page_cache_release(page);
594 
595 	return 0;
596 }
597 
598 static int copy_insn(struct uprobe *uprobe, struct file *filp)
599 {
600 	struct address_space *mapping;
601 	unsigned long nbytes;
602 	int bytes;
603 
604 	nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK);
605 	mapping = uprobe->inode->i_mapping;
606 
607 	/* Instruction at end of binary; copy only available bytes */
608 	if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size)
609 		bytes = uprobe->inode->i_size - uprobe->offset;
610 	else
611 		bytes = MAX_UINSN_BYTES;
612 
613 	/* Instruction at the page-boundary; copy bytes in second page */
614 	if (nbytes < bytes) {
615 		int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes,
616 				bytes - nbytes, uprobe->offset + nbytes);
617 		if (err)
618 			return err;
619 		bytes = nbytes;
620 	}
621 	return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
622 }
623 
624 /*
625  * How mm->uprobes_state.count gets updated
626  * uprobe_mmap() increments the count if
627  * 	- it successfully adds a breakpoint.
628  * 	- it cannot add a breakpoint, but sees that there is a underlying
629  * 	  breakpoint (via a is_swbp_at_addr()).
630  *
631  * uprobe_munmap() decrements the count if
632  * 	- it sees a underlying breakpoint, (via is_swbp_at_addr)
633  * 	  (Subsequent uprobe_unregister wouldnt find the breakpoint
634  * 	  unless a uprobe_mmap kicks in, since the old vma would be
635  * 	  dropped just after uprobe_munmap.)
636  *
637  * uprobe_register increments the count if:
638  * 	- it successfully adds a breakpoint.
639  *
640  * uprobe_unregister decrements the count if:
641  * 	- it sees a underlying breakpoint and removes successfully.
642  * 	  (via is_swbp_at_addr)
643  * 	  (Subsequent uprobe_munmap wouldnt find the breakpoint
644  * 	  since there is no underlying breakpoint after the
645  * 	  breakpoint removal.)
646  */
647 static int
648 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
649 			struct vm_area_struct *vma, unsigned long vaddr)
650 {
651 	bool first_uprobe;
652 	int ret;
653 
654 	/*
655 	 * If probe is being deleted, unregister thread could be done with
656 	 * the vma-rmap-walk through. Adding a probe now can be fatal since
657 	 * nobody will be able to cleanup. Also we could be from fork or
658 	 * mremap path, where the probe might have already been inserted.
659 	 * Hence behave as if probe already existed.
660 	 */
661 	if (!uprobe->consumers)
662 		return 0;
663 
664 	if (!(uprobe->flags & UPROBE_COPY_INSN)) {
665 		ret = copy_insn(uprobe, vma->vm_file);
666 		if (ret)
667 			return ret;
668 
669 		if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
670 			return -ENOTSUPP;
671 
672 		ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
673 		if (ret)
674 			return ret;
675 
676 		/* write_opcode() assumes we don't cross page boundary */
677 		BUG_ON((uprobe->offset & ~PAGE_MASK) +
678 				UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
679 
680 		uprobe->flags |= UPROBE_COPY_INSN;
681 	}
682 
683 	/*
684 	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
685 	 * the task can hit this breakpoint right after __replace_page().
686 	 */
687 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
688 	if (first_uprobe)
689 		set_bit(MMF_HAS_UPROBES, &mm->flags);
690 
691 	ret = set_swbp(&uprobe->arch, mm, vaddr);
692 	if (!ret)
693 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
694 	else if (first_uprobe)
695 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
696 
697 	return ret;
698 }
699 
700 static void
701 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
702 {
703 	/* can happen if uprobe_register() fails */
704 	if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
705 		return;
706 
707 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
708 	set_orig_insn(&uprobe->arch, mm, vaddr);
709 }
710 
711 /*
712  * There could be threads that have already hit the breakpoint. They
713  * will recheck the current insn and restart if find_uprobe() fails.
714  * See find_active_uprobe().
715  */
716 static void delete_uprobe(struct uprobe *uprobe)
717 {
718 	spin_lock(&uprobes_treelock);
719 	rb_erase(&uprobe->rb_node, &uprobes_tree);
720 	spin_unlock(&uprobes_treelock);
721 	iput(uprobe->inode);
722 	put_uprobe(uprobe);
723 	atomic_dec(&uprobe_events);
724 }
725 
726 struct map_info {
727 	struct map_info *next;
728 	struct mm_struct *mm;
729 	unsigned long vaddr;
730 };
731 
732 static inline struct map_info *free_map_info(struct map_info *info)
733 {
734 	struct map_info *next = info->next;
735 	kfree(info);
736 	return next;
737 }
738 
739 static struct map_info *
740 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
741 {
742 	unsigned long pgoff = offset >> PAGE_SHIFT;
743 	struct vm_area_struct *vma;
744 	struct map_info *curr = NULL;
745 	struct map_info *prev = NULL;
746 	struct map_info *info;
747 	int more = 0;
748 
749  again:
750 	mutex_lock(&mapping->i_mmap_mutex);
751 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
752 		if (!valid_vma(vma, is_register))
753 			continue;
754 
755 		if (!prev && !more) {
756 			/*
757 			 * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
758 			 * reclaim. This is optimistic, no harm done if it fails.
759 			 */
760 			prev = kmalloc(sizeof(struct map_info),
761 					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
762 			if (prev)
763 				prev->next = NULL;
764 		}
765 		if (!prev) {
766 			more++;
767 			continue;
768 		}
769 
770 		if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
771 			continue;
772 
773 		info = prev;
774 		prev = prev->next;
775 		info->next = curr;
776 		curr = info;
777 
778 		info->mm = vma->vm_mm;
779 		info->vaddr = offset_to_vaddr(vma, offset);
780 	}
781 	mutex_unlock(&mapping->i_mmap_mutex);
782 
783 	if (!more)
784 		goto out;
785 
786 	prev = curr;
787 	while (curr) {
788 		mmput(curr->mm);
789 		curr = curr->next;
790 	}
791 
792 	do {
793 		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
794 		if (!info) {
795 			curr = ERR_PTR(-ENOMEM);
796 			goto out;
797 		}
798 		info->next = prev;
799 		prev = info;
800 	} while (--more);
801 
802 	goto again;
803  out:
804 	while (prev)
805 		prev = free_map_info(prev);
806 	return curr;
807 }
808 
809 static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
810 {
811 	struct map_info *info;
812 	int err = 0;
813 
814 	info = build_map_info(uprobe->inode->i_mapping,
815 					uprobe->offset, is_register);
816 	if (IS_ERR(info))
817 		return PTR_ERR(info);
818 
819 	while (info) {
820 		struct mm_struct *mm = info->mm;
821 		struct vm_area_struct *vma;
822 
823 		if (err)
824 			goto free;
825 
826 		down_write(&mm->mmap_sem);
827 		vma = find_vma(mm, info->vaddr);
828 		if (!vma || !valid_vma(vma, is_register) ||
829 		    vma->vm_file->f_mapping->host != uprobe->inode)
830 			goto unlock;
831 
832 		if (vma->vm_start > info->vaddr ||
833 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
834 			goto unlock;
835 
836 		if (is_register)
837 			err = install_breakpoint(uprobe, mm, vma, info->vaddr);
838 		else
839 			remove_breakpoint(uprobe, mm, info->vaddr);
840 
841  unlock:
842 		up_write(&mm->mmap_sem);
843  free:
844 		mmput(mm);
845 		info = free_map_info(info);
846 	}
847 
848 	return err;
849 }
850 
851 static int __uprobe_register(struct uprobe *uprobe)
852 {
853 	return register_for_each_vma(uprobe, true);
854 }
855 
856 static void __uprobe_unregister(struct uprobe *uprobe)
857 {
858 	if (!register_for_each_vma(uprobe, false))
859 		delete_uprobe(uprobe);
860 
861 	/* TODO : cant unregister? schedule a worker thread */
862 }
863 
864 /*
865  * uprobe_register - register a probe
866  * @inode: the file in which the probe has to be placed.
867  * @offset: offset from the start of the file.
868  * @uc: information on howto handle the probe..
869  *
870  * Apart from the access refcount, uprobe_register() takes a creation
871  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
872  * inserted into the rbtree (i.e first consumer for a @inode:@offset
873  * tuple).  Creation refcount stops uprobe_unregister from freeing the
874  * @uprobe even before the register operation is complete. Creation
875  * refcount is released when the last @uc for the @uprobe
876  * unregisters.
877  *
878  * Return errno if it cannot successully install probes
879  * else return 0 (success)
880  */
881 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
882 {
883 	struct uprobe *uprobe;
884 	int ret;
885 
886 	if (!inode || !uc || uc->next)
887 		return -EINVAL;
888 
889 	if (offset > i_size_read(inode))
890 		return -EINVAL;
891 
892 	ret = 0;
893 	mutex_lock(uprobes_hash(inode));
894 	uprobe = alloc_uprobe(inode, offset);
895 
896 	if (uprobe && !consumer_add(uprobe, uc)) {
897 		ret = __uprobe_register(uprobe);
898 		if (ret) {
899 			uprobe->consumers = NULL;
900 			__uprobe_unregister(uprobe);
901 		} else {
902 			uprobe->flags |= UPROBE_RUN_HANDLER;
903 		}
904 	}
905 
906 	mutex_unlock(uprobes_hash(inode));
907 	if (uprobe)
908 		put_uprobe(uprobe);
909 
910 	return ret;
911 }
912 
913 /*
914  * uprobe_unregister - unregister a already registered probe.
915  * @inode: the file in which the probe has to be removed.
916  * @offset: offset from the start of the file.
917  * @uc: identify which probe if multiple probes are colocated.
918  */
919 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
920 {
921 	struct uprobe *uprobe;
922 
923 	if (!inode || !uc)
924 		return;
925 
926 	uprobe = find_uprobe(inode, offset);
927 	if (!uprobe)
928 		return;
929 
930 	mutex_lock(uprobes_hash(inode));
931 
932 	if (consumer_del(uprobe, uc)) {
933 		if (!uprobe->consumers) {
934 			__uprobe_unregister(uprobe);
935 			uprobe->flags &= ~UPROBE_RUN_HANDLER;
936 		}
937 	}
938 
939 	mutex_unlock(uprobes_hash(inode));
940 	if (uprobe)
941 		put_uprobe(uprobe);
942 }
943 
944 static struct rb_node *
945 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
946 {
947 	struct rb_node *n = uprobes_tree.rb_node;
948 
949 	while (n) {
950 		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
951 
952 		if (inode < u->inode) {
953 			n = n->rb_left;
954 		} else if (inode > u->inode) {
955 			n = n->rb_right;
956 		} else {
957 			if (max < u->offset)
958 				n = n->rb_left;
959 			else if (min > u->offset)
960 				n = n->rb_right;
961 			else
962 				break;
963 		}
964 	}
965 
966 	return n;
967 }
968 
969 /*
970  * For a given range in vma, build a list of probes that need to be inserted.
971  */
972 static void build_probe_list(struct inode *inode,
973 				struct vm_area_struct *vma,
974 				unsigned long start, unsigned long end,
975 				struct list_head *head)
976 {
977 	loff_t min, max;
978 	struct rb_node *n, *t;
979 	struct uprobe *u;
980 
981 	INIT_LIST_HEAD(head);
982 	min = vaddr_to_offset(vma, start);
983 	max = min + (end - start) - 1;
984 
985 	spin_lock(&uprobes_treelock);
986 	n = find_node_in_range(inode, min, max);
987 	if (n) {
988 		for (t = n; t; t = rb_prev(t)) {
989 			u = rb_entry(t, struct uprobe, rb_node);
990 			if (u->inode != inode || u->offset < min)
991 				break;
992 			list_add(&u->pending_list, head);
993 			atomic_inc(&u->ref);
994 		}
995 		for (t = n; (t = rb_next(t)); ) {
996 			u = rb_entry(t, struct uprobe, rb_node);
997 			if (u->inode != inode || u->offset > max)
998 				break;
999 			list_add(&u->pending_list, head);
1000 			atomic_inc(&u->ref);
1001 		}
1002 	}
1003 	spin_unlock(&uprobes_treelock);
1004 }
1005 
1006 /*
1007  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1008  *
1009  * Currently we ignore all errors and always return 0, the callers
1010  * can't handle the failure anyway.
1011  */
1012 int uprobe_mmap(struct vm_area_struct *vma)
1013 {
1014 	struct list_head tmp_list;
1015 	struct uprobe *uprobe, *u;
1016 	struct inode *inode;
1017 
1018 	if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
1019 		return 0;
1020 
1021 	inode = vma->vm_file->f_mapping->host;
1022 	if (!inode)
1023 		return 0;
1024 
1025 	mutex_lock(uprobes_mmap_hash(inode));
1026 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1027 
1028 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1029 		if (!fatal_signal_pending(current)) {
1030 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1031 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1032 		}
1033 		put_uprobe(uprobe);
1034 	}
1035 	mutex_unlock(uprobes_mmap_hash(inode));
1036 
1037 	return 0;
1038 }
1039 
1040 static bool
1041 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1042 {
1043 	loff_t min, max;
1044 	struct inode *inode;
1045 	struct rb_node *n;
1046 
1047 	inode = vma->vm_file->f_mapping->host;
1048 
1049 	min = vaddr_to_offset(vma, start);
1050 	max = min + (end - start) - 1;
1051 
1052 	spin_lock(&uprobes_treelock);
1053 	n = find_node_in_range(inode, min, max);
1054 	spin_unlock(&uprobes_treelock);
1055 
1056 	return !!n;
1057 }
1058 
1059 /*
1060  * Called in context of a munmap of a vma.
1061  */
1062 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1063 {
1064 	if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
1065 		return;
1066 
1067 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1068 		return;
1069 
1070 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1071 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1072 		return;
1073 
1074 	if (vma_has_uprobes(vma, start, end))
1075 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1076 }
1077 
1078 /* Slot allocation for XOL */
1079 static int xol_add_vma(struct xol_area *area)
1080 {
1081 	struct mm_struct *mm;
1082 	int ret;
1083 
1084 	area->page = alloc_page(GFP_HIGHUSER);
1085 	if (!area->page)
1086 		return -ENOMEM;
1087 
1088 	ret = -EALREADY;
1089 	mm = current->mm;
1090 
1091 	down_write(&mm->mmap_sem);
1092 	if (mm->uprobes_state.xol_area)
1093 		goto fail;
1094 
1095 	ret = -ENOMEM;
1096 
1097 	/* Try to map as high as possible, this is only a hint. */
1098 	area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
1099 	if (area->vaddr & ~PAGE_MASK) {
1100 		ret = area->vaddr;
1101 		goto fail;
1102 	}
1103 
1104 	ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1105 				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
1106 	if (ret)
1107 		goto fail;
1108 
1109 	smp_wmb();	/* pairs with get_xol_area() */
1110 	mm->uprobes_state.xol_area = area;
1111 	ret = 0;
1112 
1113 fail:
1114 	up_write(&mm->mmap_sem);
1115 	if (ret)
1116 		__free_page(area->page);
1117 
1118 	return ret;
1119 }
1120 
1121 static struct xol_area *get_xol_area(struct mm_struct *mm)
1122 {
1123 	struct xol_area *area;
1124 
1125 	area = mm->uprobes_state.xol_area;
1126 	smp_read_barrier_depends();	/* pairs with wmb in xol_add_vma() */
1127 
1128 	return area;
1129 }
1130 
1131 /*
1132  * xol_alloc_area - Allocate process's xol_area.
1133  * This area will be used for storing instructions for execution out of
1134  * line.
1135  *
1136  * Returns the allocated area or NULL.
1137  */
1138 static struct xol_area *xol_alloc_area(void)
1139 {
1140 	struct xol_area *area;
1141 
1142 	area = kzalloc(sizeof(*area), GFP_KERNEL);
1143 	if (unlikely(!area))
1144 		return NULL;
1145 
1146 	area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1147 
1148 	if (!area->bitmap)
1149 		goto fail;
1150 
1151 	init_waitqueue_head(&area->wq);
1152 	if (!xol_add_vma(area))
1153 		return area;
1154 
1155 fail:
1156 	kfree(area->bitmap);
1157 	kfree(area);
1158 
1159 	return get_xol_area(current->mm);
1160 }
1161 
1162 /*
1163  * uprobe_clear_state - Free the area allocated for slots.
1164  */
1165 void uprobe_clear_state(struct mm_struct *mm)
1166 {
1167 	struct xol_area *area = mm->uprobes_state.xol_area;
1168 
1169 	if (!area)
1170 		return;
1171 
1172 	put_page(area->page);
1173 	kfree(area->bitmap);
1174 	kfree(area);
1175 }
1176 
1177 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1178 {
1179 	newmm->uprobes_state.xol_area = NULL;
1180 
1181 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1182 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
1183 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1184 		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1185 	}
1186 }
1187 
1188 /*
1189  *  - search for a free slot.
1190  */
1191 static unsigned long xol_take_insn_slot(struct xol_area *area)
1192 {
1193 	unsigned long slot_addr;
1194 	int slot_nr;
1195 
1196 	do {
1197 		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1198 		if (slot_nr < UINSNS_PER_PAGE) {
1199 			if (!test_and_set_bit(slot_nr, area->bitmap))
1200 				break;
1201 
1202 			slot_nr = UINSNS_PER_PAGE;
1203 			continue;
1204 		}
1205 		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1206 	} while (slot_nr >= UINSNS_PER_PAGE);
1207 
1208 	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1209 	atomic_inc(&area->slot_count);
1210 
1211 	return slot_addr;
1212 }
1213 
1214 /*
1215  * xol_get_insn_slot - If was not allocated a slot, then
1216  * allocate a slot.
1217  * Returns the allocated slot address or 0.
1218  */
1219 static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr)
1220 {
1221 	struct xol_area *area;
1222 	unsigned long offset;
1223 	void *vaddr;
1224 
1225 	area = get_xol_area(current->mm);
1226 	if (!area) {
1227 		area = xol_alloc_area();
1228 		if (!area)
1229 			return 0;
1230 	}
1231 	current->utask->xol_vaddr = xol_take_insn_slot(area);
1232 
1233 	/*
1234 	 * Initialize the slot if xol_vaddr points to valid
1235 	 * instruction slot.
1236 	 */
1237 	if (unlikely(!current->utask->xol_vaddr))
1238 		return 0;
1239 
1240 	current->utask->vaddr = slot_addr;
1241 	offset = current->utask->xol_vaddr & ~PAGE_MASK;
1242 	vaddr = kmap_atomic(area->page);
1243 	memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
1244 	kunmap_atomic(vaddr);
1245 
1246 	return current->utask->xol_vaddr;
1247 }
1248 
1249 /*
1250  * xol_free_insn_slot - If slot was earlier allocated by
1251  * @xol_get_insn_slot(), make the slot available for
1252  * subsequent requests.
1253  */
1254 static void xol_free_insn_slot(struct task_struct *tsk)
1255 {
1256 	struct xol_area *area;
1257 	unsigned long vma_end;
1258 	unsigned long slot_addr;
1259 
1260 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1261 		return;
1262 
1263 	slot_addr = tsk->utask->xol_vaddr;
1264 
1265 	if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr)))
1266 		return;
1267 
1268 	area = tsk->mm->uprobes_state.xol_area;
1269 	vma_end = area->vaddr + PAGE_SIZE;
1270 	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1271 		unsigned long offset;
1272 		int slot_nr;
1273 
1274 		offset = slot_addr - area->vaddr;
1275 		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1276 		if (slot_nr >= UINSNS_PER_PAGE)
1277 			return;
1278 
1279 		clear_bit(slot_nr, area->bitmap);
1280 		atomic_dec(&area->slot_count);
1281 		if (waitqueue_active(&area->wq))
1282 			wake_up(&area->wq);
1283 
1284 		tsk->utask->xol_vaddr = 0;
1285 	}
1286 }
1287 
1288 /**
1289  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1290  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1291  * instruction.
1292  * Return the address of the breakpoint instruction.
1293  */
1294 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1295 {
1296 	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1297 }
1298 
1299 /*
1300  * Called with no locks held.
1301  * Called in context of a exiting or a exec-ing thread.
1302  */
1303 void uprobe_free_utask(struct task_struct *t)
1304 {
1305 	struct uprobe_task *utask = t->utask;
1306 
1307 	if (!utask)
1308 		return;
1309 
1310 	if (utask->active_uprobe)
1311 		put_uprobe(utask->active_uprobe);
1312 
1313 	xol_free_insn_slot(t);
1314 	kfree(utask);
1315 	t->utask = NULL;
1316 }
1317 
1318 /*
1319  * Called in context of a new clone/fork from copy_process.
1320  */
1321 void uprobe_copy_process(struct task_struct *t)
1322 {
1323 	t->utask = NULL;
1324 }
1325 
1326 /*
1327  * Allocate a uprobe_task object for the task.
1328  * Called when the thread hits a breakpoint for the first time.
1329  *
1330  * Returns:
1331  * - pointer to new uprobe_task on success
1332  * - NULL otherwise
1333  */
1334 static struct uprobe_task *add_utask(void)
1335 {
1336 	struct uprobe_task *utask;
1337 
1338 	utask = kzalloc(sizeof *utask, GFP_KERNEL);
1339 	if (unlikely(!utask))
1340 		return NULL;
1341 
1342 	current->utask = utask;
1343 	return utask;
1344 }
1345 
1346 /* Prepare to single-step probed instruction out of line. */
1347 static int
1348 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr)
1349 {
1350 	if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs))
1351 		return 0;
1352 
1353 	return -EFAULT;
1354 }
1355 
1356 /*
1357  * If we are singlestepping, then ensure this thread is not connected to
1358  * non-fatal signals until completion of singlestep.  When xol insn itself
1359  * triggers the signal,  restart the original insn even if the task is
1360  * already SIGKILL'ed (since coredump should report the correct ip).  This
1361  * is even more important if the task has a handler for SIGSEGV/etc, The
1362  * _same_ instruction should be repeated again after return from the signal
1363  * handler, and SSTEP can never finish in this case.
1364  */
1365 bool uprobe_deny_signal(void)
1366 {
1367 	struct task_struct *t = current;
1368 	struct uprobe_task *utask = t->utask;
1369 
1370 	if (likely(!utask || !utask->active_uprobe))
1371 		return false;
1372 
1373 	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1374 
1375 	if (signal_pending(t)) {
1376 		spin_lock_irq(&t->sighand->siglock);
1377 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
1378 		spin_unlock_irq(&t->sighand->siglock);
1379 
1380 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1381 			utask->state = UTASK_SSTEP_TRAPPED;
1382 			set_tsk_thread_flag(t, TIF_UPROBE);
1383 			set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1384 		}
1385 	}
1386 
1387 	return true;
1388 }
1389 
1390 /*
1391  * Avoid singlestepping the original instruction if the original instruction
1392  * is a NOP or can be emulated.
1393  */
1394 static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
1395 {
1396 	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1397 		return true;
1398 
1399 	uprobe->flags &= ~UPROBE_SKIP_SSTEP;
1400 	return false;
1401 }
1402 
1403 static void mmf_recalc_uprobes(struct mm_struct *mm)
1404 {
1405 	struct vm_area_struct *vma;
1406 
1407 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1408 		if (!valid_vma(vma, false))
1409 			continue;
1410 		/*
1411 		 * This is not strictly accurate, we can race with
1412 		 * uprobe_unregister() and see the already removed
1413 		 * uprobe if delete_uprobe() was not yet called.
1414 		 */
1415 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1416 			return;
1417 	}
1418 
1419 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
1420 }
1421 
1422 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1423 {
1424 	struct mm_struct *mm = current->mm;
1425 	struct uprobe *uprobe = NULL;
1426 	struct vm_area_struct *vma;
1427 
1428 	down_read(&mm->mmap_sem);
1429 	vma = find_vma(mm, bp_vaddr);
1430 	if (vma && vma->vm_start <= bp_vaddr) {
1431 		if (valid_vma(vma, false)) {
1432 			struct inode *inode = vma->vm_file->f_mapping->host;
1433 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1434 
1435 			uprobe = find_uprobe(inode, offset);
1436 		}
1437 
1438 		if (!uprobe)
1439 			*is_swbp = is_swbp_at_addr(mm, bp_vaddr);
1440 	} else {
1441 		*is_swbp = -EFAULT;
1442 	}
1443 
1444 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1445 		mmf_recalc_uprobes(mm);
1446 	up_read(&mm->mmap_sem);
1447 
1448 	return uprobe;
1449 }
1450 
1451 void __weak arch_uprobe_enable_step(struct arch_uprobe *arch)
1452 {
1453 	user_enable_single_step(current);
1454 }
1455 
1456 void __weak arch_uprobe_disable_step(struct arch_uprobe *arch)
1457 {
1458 	user_disable_single_step(current);
1459 }
1460 
1461 /*
1462  * Run handler and ask thread to singlestep.
1463  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1464  */
1465 static void handle_swbp(struct pt_regs *regs)
1466 {
1467 	struct uprobe_task *utask;
1468 	struct uprobe *uprobe;
1469 	unsigned long bp_vaddr;
1470 	int uninitialized_var(is_swbp);
1471 
1472 	bp_vaddr = uprobe_get_swbp_addr(regs);
1473 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1474 
1475 	if (!uprobe) {
1476 		if (is_swbp > 0) {
1477 			/* No matching uprobe; signal SIGTRAP. */
1478 			send_sig(SIGTRAP, current, 0);
1479 		} else {
1480 			/*
1481 			 * Either we raced with uprobe_unregister() or we can't
1482 			 * access this memory. The latter is only possible if
1483 			 * another thread plays with our ->mm. In both cases
1484 			 * we can simply restart. If this vma was unmapped we
1485 			 * can pretend this insn was not executed yet and get
1486 			 * the (correct) SIGSEGV after restart.
1487 			 */
1488 			instruction_pointer_set(regs, bp_vaddr);
1489 		}
1490 		return;
1491 	}
1492 
1493 	utask = current->utask;
1494 	if (!utask) {
1495 		utask = add_utask();
1496 		/* Cannot allocate; re-execute the instruction. */
1497 		if (!utask)
1498 			goto cleanup_ret;
1499 	}
1500 	utask->active_uprobe = uprobe;
1501 	handler_chain(uprobe, regs);
1502 	if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs))
1503 		goto cleanup_ret;
1504 
1505 	utask->state = UTASK_SSTEP;
1506 	if (!pre_ssout(uprobe, regs, bp_vaddr)) {
1507 		arch_uprobe_enable_step(&uprobe->arch);
1508 		return;
1509 	}
1510 
1511 cleanup_ret:
1512 	if (utask) {
1513 		utask->active_uprobe = NULL;
1514 		utask->state = UTASK_RUNNING;
1515 	}
1516 	if (!(uprobe->flags & UPROBE_SKIP_SSTEP))
1517 
1518 		/*
1519 		 * cannot singlestep; cannot skip instruction;
1520 		 * re-execute the instruction.
1521 		 */
1522 		instruction_pointer_set(regs, bp_vaddr);
1523 
1524 	put_uprobe(uprobe);
1525 }
1526 
1527 /*
1528  * Perform required fix-ups and disable singlestep.
1529  * Allow pending signals to take effect.
1530  */
1531 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1532 {
1533 	struct uprobe *uprobe;
1534 
1535 	uprobe = utask->active_uprobe;
1536 	if (utask->state == UTASK_SSTEP_ACK)
1537 		arch_uprobe_post_xol(&uprobe->arch, regs);
1538 	else if (utask->state == UTASK_SSTEP_TRAPPED)
1539 		arch_uprobe_abort_xol(&uprobe->arch, regs);
1540 	else
1541 		WARN_ON_ONCE(1);
1542 
1543 	arch_uprobe_disable_step(&uprobe->arch);
1544 	put_uprobe(uprobe);
1545 	utask->active_uprobe = NULL;
1546 	utask->state = UTASK_RUNNING;
1547 	xol_free_insn_slot(current);
1548 
1549 	spin_lock_irq(&current->sighand->siglock);
1550 	recalc_sigpending(); /* see uprobe_deny_signal() */
1551 	spin_unlock_irq(&current->sighand->siglock);
1552 }
1553 
1554 /*
1555  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag.  (and on
1556  * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and
1557  * allows the thread to return from interrupt.
1558  *
1559  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and
1560  * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from
1561  * interrupt.
1562  *
1563  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1564  * uprobe_notify_resume().
1565  */
1566 void uprobe_notify_resume(struct pt_regs *regs)
1567 {
1568 	struct uprobe_task *utask;
1569 
1570 	utask = current->utask;
1571 	if (!utask || utask->state == UTASK_BP_HIT)
1572 		handle_swbp(regs);
1573 	else
1574 		handle_singlestep(utask, regs);
1575 }
1576 
1577 /*
1578  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1579  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1580  */
1581 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1582 {
1583 	struct uprobe_task *utask;
1584 
1585 	if (!current->mm || !test_bit(MMF_HAS_UPROBES, &current->mm->flags))
1586 		return 0;
1587 
1588 	utask = current->utask;
1589 	if (utask)
1590 		utask->state = UTASK_BP_HIT;
1591 
1592 	set_thread_flag(TIF_UPROBE);
1593 
1594 	return 1;
1595 }
1596 
1597 /*
1598  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1599  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1600  */
1601 int uprobe_post_sstep_notifier(struct pt_regs *regs)
1602 {
1603 	struct uprobe_task *utask = current->utask;
1604 
1605 	if (!current->mm || !utask || !utask->active_uprobe)
1606 		/* task is currently not uprobed */
1607 		return 0;
1608 
1609 	utask->state = UTASK_SSTEP_ACK;
1610 	set_thread_flag(TIF_UPROBE);
1611 	return 1;
1612 }
1613 
1614 static struct notifier_block uprobe_exception_nb = {
1615 	.notifier_call		= arch_uprobe_exception_notify,
1616 	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
1617 };
1618 
1619 static int __init init_uprobes(void)
1620 {
1621 	int i;
1622 
1623 	for (i = 0; i < UPROBES_HASH_SZ; i++) {
1624 		mutex_init(&uprobes_mutex[i]);
1625 		mutex_init(&uprobes_mmap_mutex[i]);
1626 	}
1627 
1628 	return register_die_notifier(&uprobe_exception_nb);
1629 }
1630 module_init(init_uprobes);
1631 
1632 static void __exit exit_uprobes(void)
1633 {
1634 }
1635 module_exit(exit_uprobes);
1636