xref: /linux/kernel/events/uprobes.c (revision c909a49128a31bced8cfbd2dfb0a4fe56e01a6d0)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * User-space Probes (UProbes)
4  *
5  * Copyright (C) IBM Corporation, 2008-2012
6  * Authors:
7  *	Srikar Dronamraju
8  *	Jim Keniston
9  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>	/* read_mapping_page */
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/export.h>
19 #include <linux/rmap.h>		/* anon_vma_prepare */
20 #include <linux/mmu_notifier.h>
21 #include <linux/swap.h>		/* folio_free_swap */
22 #include <linux/ptrace.h>	/* user_enable_single_step */
23 #include <linux/kdebug.h>	/* notifier mechanism */
24 #include <linux/percpu-rwsem.h>
25 #include <linux/task_work.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/khugepaged.h>
28 #include <linux/rcupdate_trace.h>
29 #include <linux/workqueue.h>
30 #include <linux/srcu.h>
31 #include <linux/oom.h>          /* check_stable_address_space */
32 
33 #include <linux/uprobes.h>
34 
35 #define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
36 #define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
37 
38 static struct rb_root uprobes_tree = RB_ROOT;
39 /*
40  * allows us to skip the uprobe_mmap if there are no uprobe events active
41  * at this time.  Probably a fine grained per inode count is better?
42  */
43 #define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
44 
45 static DEFINE_RWLOCK(uprobes_treelock);	/* serialize rbtree access */
46 static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock);
47 
48 #define UPROBES_HASH_SZ	13
49 /* serialize uprobe->pending_list */
50 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
51 #define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
52 
53 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
54 
55 /* Covers return_instance's uprobe lifetime. */
56 DEFINE_STATIC_SRCU(uretprobes_srcu);
57 
58 /* Have a copy of original instruction */
59 #define UPROBE_COPY_INSN	0
60 
61 struct uprobe {
62 	struct rb_node		rb_node;	/* node in the rb tree */
63 	refcount_t		ref;
64 	struct rw_semaphore	register_rwsem;
65 	struct rw_semaphore	consumer_rwsem;
66 	struct list_head	pending_list;
67 	struct list_head	consumers;
68 	struct inode		*inode;		/* Also hold a ref to inode */
69 	union {
70 		struct rcu_head		rcu;
71 		struct work_struct	work;
72 	};
73 	loff_t			offset;
74 	loff_t			ref_ctr_offset;
75 	unsigned long		flags;		/* "unsigned long" so bitops work */
76 
77 	/*
78 	 * The generic code assumes that it has two members of unknown type
79 	 * owned by the arch-specific code:
80 	 *
81 	 * 	insn -	copy_insn() saves the original instruction here for
82 	 *		arch_uprobe_analyze_insn().
83 	 *
84 	 *	ixol -	potentially modified instruction to execute out of
85 	 *		line, copied to xol_area by xol_get_insn_slot().
86 	 */
87 	struct arch_uprobe	arch;
88 };
89 
90 struct delayed_uprobe {
91 	struct list_head list;
92 	struct uprobe *uprobe;
93 	struct mm_struct *mm;
94 };
95 
96 static DEFINE_MUTEX(delayed_uprobe_lock);
97 static LIST_HEAD(delayed_uprobe_list);
98 
99 /*
100  * Execute out of line area: anonymous executable mapping installed
101  * by the probed task to execute the copy of the original instruction
102  * mangled by set_swbp().
103  *
104  * On a breakpoint hit, thread contests for a slot.  It frees the
105  * slot after singlestep. Currently a fixed number of slots are
106  * allocated.
107  */
108 struct xol_area {
109 	wait_queue_head_t 		wq;		/* if all slots are busy */
110 	unsigned long 			*bitmap;	/* 0 = free slot */
111 
112 	struct page			*page;
113 	/*
114 	 * We keep the vma's vm_start rather than a pointer to the vma
115 	 * itself.  The probed process or a naughty kernel module could make
116 	 * the vma go away, and we must handle that reasonably gracefully.
117 	 */
118 	unsigned long 			vaddr;		/* Page(s) of instruction slots */
119 };
120 
121 static void uprobe_warn(struct task_struct *t, const char *msg)
122 {
123 	pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg);
124 }
125 
126 /*
127  * valid_vma: Verify if the specified vma is an executable vma
128  * Relax restrictions while unregistering: vm_flags might have
129  * changed after breakpoint was inserted.
130  *	- is_register: indicates if we are in register context.
131  *	- Return 1 if the specified virtual address is in an
132  *	  executable vma.
133  */
134 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
135 {
136 	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
137 
138 	if (is_register)
139 		flags |= VM_WRITE;
140 
141 	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
142 }
143 
144 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
145 {
146 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
147 }
148 
149 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
150 {
151 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
152 }
153 
154 /**
155  * __replace_page - replace page in vma by new page.
156  * based on replace_page in mm/ksm.c
157  *
158  * @vma:      vma that holds the pte pointing to page
159  * @addr:     address the old @page is mapped at
160  * @old_page: the page we are replacing by new_page
161  * @new_page: the modified page we replace page by
162  *
163  * If @new_page is NULL, only unmap @old_page.
164  *
165  * Returns 0 on success, negative error code otherwise.
166  */
167 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
168 				struct page *old_page, struct page *new_page)
169 {
170 	struct folio *old_folio = page_folio(old_page);
171 	struct folio *new_folio;
172 	struct mm_struct *mm = vma->vm_mm;
173 	DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
174 	int err;
175 	struct mmu_notifier_range range;
176 
177 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
178 				addr + PAGE_SIZE);
179 
180 	if (new_page) {
181 		new_folio = page_folio(new_page);
182 		err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
183 		if (err)
184 			return err;
185 	}
186 
187 	/* For folio_free_swap() below */
188 	folio_lock(old_folio);
189 
190 	mmu_notifier_invalidate_range_start(&range);
191 	err = -EAGAIN;
192 	if (!page_vma_mapped_walk(&pvmw))
193 		goto unlock;
194 	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
195 
196 	if (new_page) {
197 		folio_get(new_folio);
198 		folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE);
199 		folio_add_lru_vma(new_folio, vma);
200 	} else
201 		/* no new page, just dec_mm_counter for old_page */
202 		dec_mm_counter(mm, MM_ANONPAGES);
203 
204 	if (!folio_test_anon(old_folio)) {
205 		dec_mm_counter(mm, mm_counter_file(old_folio));
206 		inc_mm_counter(mm, MM_ANONPAGES);
207 	}
208 
209 	flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
210 	ptep_clear_flush(vma, addr, pvmw.pte);
211 	if (new_page)
212 		set_pte_at(mm, addr, pvmw.pte,
213 			   mk_pte(new_page, vma->vm_page_prot));
214 
215 	folio_remove_rmap_pte(old_folio, old_page, vma);
216 	if (!folio_mapped(old_folio))
217 		folio_free_swap(old_folio);
218 	page_vma_mapped_walk_done(&pvmw);
219 	folio_put(old_folio);
220 
221 	err = 0;
222  unlock:
223 	mmu_notifier_invalidate_range_end(&range);
224 	folio_unlock(old_folio);
225 	return err;
226 }
227 
228 /**
229  * is_swbp_insn - check if instruction is breakpoint instruction.
230  * @insn: instruction to be checked.
231  * Default implementation of is_swbp_insn
232  * Returns true if @insn is a breakpoint instruction.
233  */
234 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
235 {
236 	return *insn == UPROBE_SWBP_INSN;
237 }
238 
239 /**
240  * is_trap_insn - check if instruction is breakpoint instruction.
241  * @insn: instruction to be checked.
242  * Default implementation of is_trap_insn
243  * Returns true if @insn is a breakpoint instruction.
244  *
245  * This function is needed for the case where an architecture has multiple
246  * trap instructions (like powerpc).
247  */
248 bool __weak is_trap_insn(uprobe_opcode_t *insn)
249 {
250 	return is_swbp_insn(insn);
251 }
252 
253 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
254 {
255 	void *kaddr = kmap_atomic(page);
256 	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
257 	kunmap_atomic(kaddr);
258 }
259 
260 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
261 {
262 	void *kaddr = kmap_atomic(page);
263 	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
264 	kunmap_atomic(kaddr);
265 }
266 
267 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
268 {
269 	uprobe_opcode_t old_opcode;
270 	bool is_swbp;
271 
272 	/*
273 	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
274 	 * We do not check if it is any other 'trap variant' which could
275 	 * be conditional trap instruction such as the one powerpc supports.
276 	 *
277 	 * The logic is that we do not care if the underlying instruction
278 	 * is a trap variant; uprobes always wins over any other (gdb)
279 	 * breakpoint.
280 	 */
281 	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
282 	is_swbp = is_swbp_insn(&old_opcode);
283 
284 	if (is_swbp_insn(new_opcode)) {
285 		if (is_swbp)		/* register: already installed? */
286 			return 0;
287 	} else {
288 		if (!is_swbp)		/* unregister: was it changed by us? */
289 			return 0;
290 	}
291 
292 	return 1;
293 }
294 
295 static struct delayed_uprobe *
296 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
297 {
298 	struct delayed_uprobe *du;
299 
300 	list_for_each_entry(du, &delayed_uprobe_list, list)
301 		if (du->uprobe == uprobe && du->mm == mm)
302 			return du;
303 	return NULL;
304 }
305 
306 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
307 {
308 	struct delayed_uprobe *du;
309 
310 	if (delayed_uprobe_check(uprobe, mm))
311 		return 0;
312 
313 	du  = kzalloc(sizeof(*du), GFP_KERNEL);
314 	if (!du)
315 		return -ENOMEM;
316 
317 	du->uprobe = uprobe;
318 	du->mm = mm;
319 	list_add(&du->list, &delayed_uprobe_list);
320 	return 0;
321 }
322 
323 static void delayed_uprobe_delete(struct delayed_uprobe *du)
324 {
325 	if (WARN_ON(!du))
326 		return;
327 	list_del(&du->list);
328 	kfree(du);
329 }
330 
331 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
332 {
333 	struct list_head *pos, *q;
334 	struct delayed_uprobe *du;
335 
336 	if (!uprobe && !mm)
337 		return;
338 
339 	list_for_each_safe(pos, q, &delayed_uprobe_list) {
340 		du = list_entry(pos, struct delayed_uprobe, list);
341 
342 		if (uprobe && du->uprobe != uprobe)
343 			continue;
344 		if (mm && du->mm != mm)
345 			continue;
346 
347 		delayed_uprobe_delete(du);
348 	}
349 }
350 
351 static bool valid_ref_ctr_vma(struct uprobe *uprobe,
352 			      struct vm_area_struct *vma)
353 {
354 	unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
355 
356 	return uprobe->ref_ctr_offset &&
357 		vma->vm_file &&
358 		file_inode(vma->vm_file) == uprobe->inode &&
359 		(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
360 		vma->vm_start <= vaddr &&
361 		vma->vm_end > vaddr;
362 }
363 
364 static struct vm_area_struct *
365 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
366 {
367 	VMA_ITERATOR(vmi, mm, 0);
368 	struct vm_area_struct *tmp;
369 
370 	for_each_vma(vmi, tmp)
371 		if (valid_ref_ctr_vma(uprobe, tmp))
372 			return tmp;
373 
374 	return NULL;
375 }
376 
377 static int
378 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
379 {
380 	void *kaddr;
381 	struct page *page;
382 	int ret;
383 	short *ptr;
384 
385 	if (!vaddr || !d)
386 		return -EINVAL;
387 
388 	ret = get_user_pages_remote(mm, vaddr, 1,
389 				    FOLL_WRITE, &page, NULL);
390 	if (unlikely(ret <= 0)) {
391 		/*
392 		 * We are asking for 1 page. If get_user_pages_remote() fails,
393 		 * it may return 0, in that case we have to return error.
394 		 */
395 		return ret == 0 ? -EBUSY : ret;
396 	}
397 
398 	kaddr = kmap_atomic(page);
399 	ptr = kaddr + (vaddr & ~PAGE_MASK);
400 
401 	if (unlikely(*ptr + d < 0)) {
402 		pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
403 			"curr val: %d, delta: %d\n", vaddr, *ptr, d);
404 		ret = -EINVAL;
405 		goto out;
406 	}
407 
408 	*ptr += d;
409 	ret = 0;
410 out:
411 	kunmap_atomic(kaddr);
412 	put_page(page);
413 	return ret;
414 }
415 
416 static void update_ref_ctr_warn(struct uprobe *uprobe,
417 				struct mm_struct *mm, short d)
418 {
419 	pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
420 		"0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
421 		d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
422 		(unsigned long long) uprobe->offset,
423 		(unsigned long long) uprobe->ref_ctr_offset, mm);
424 }
425 
426 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
427 			  short d)
428 {
429 	struct vm_area_struct *rc_vma;
430 	unsigned long rc_vaddr;
431 	int ret = 0;
432 
433 	rc_vma = find_ref_ctr_vma(uprobe, mm);
434 
435 	if (rc_vma) {
436 		rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
437 		ret = __update_ref_ctr(mm, rc_vaddr, d);
438 		if (ret)
439 			update_ref_ctr_warn(uprobe, mm, d);
440 
441 		if (d > 0)
442 			return ret;
443 	}
444 
445 	mutex_lock(&delayed_uprobe_lock);
446 	if (d > 0)
447 		ret = delayed_uprobe_add(uprobe, mm);
448 	else
449 		delayed_uprobe_remove(uprobe, mm);
450 	mutex_unlock(&delayed_uprobe_lock);
451 
452 	return ret;
453 }
454 
455 /*
456  * NOTE:
457  * Expect the breakpoint instruction to be the smallest size instruction for
458  * the architecture. If an arch has variable length instruction and the
459  * breakpoint instruction is not of the smallest length instruction
460  * supported by that architecture then we need to modify is_trap_at_addr and
461  * uprobe_write_opcode accordingly. This would never be a problem for archs
462  * that have fixed length instructions.
463  *
464  * uprobe_write_opcode - write the opcode at a given virtual address.
465  * @auprobe: arch specific probepoint information.
466  * @mm: the probed process address space.
467  * @vaddr: the virtual address to store the opcode.
468  * @opcode: opcode to be written at @vaddr.
469  *
470  * Called with mm->mmap_lock held for read or write.
471  * Return 0 (success) or a negative errno.
472  */
473 int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
474 			unsigned long vaddr, uprobe_opcode_t opcode)
475 {
476 	struct uprobe *uprobe;
477 	struct page *old_page, *new_page;
478 	struct vm_area_struct *vma;
479 	int ret, is_register, ref_ctr_updated = 0;
480 	bool orig_page_huge = false;
481 	unsigned int gup_flags = FOLL_FORCE;
482 
483 	is_register = is_swbp_insn(&opcode);
484 	uprobe = container_of(auprobe, struct uprobe, arch);
485 
486 retry:
487 	if (is_register)
488 		gup_flags |= FOLL_SPLIT_PMD;
489 	/* Read the page with vaddr into memory */
490 	old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma);
491 	if (IS_ERR(old_page))
492 		return PTR_ERR(old_page);
493 
494 	ret = verify_opcode(old_page, vaddr, &opcode);
495 	if (ret <= 0)
496 		goto put_old;
497 
498 	if (WARN(!is_register && PageCompound(old_page),
499 		 "uprobe unregister should never work on compound page\n")) {
500 		ret = -EINVAL;
501 		goto put_old;
502 	}
503 
504 	/* We are going to replace instruction, update ref_ctr. */
505 	if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
506 		ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
507 		if (ret)
508 			goto put_old;
509 
510 		ref_ctr_updated = 1;
511 	}
512 
513 	ret = 0;
514 	if (!is_register && !PageAnon(old_page))
515 		goto put_old;
516 
517 	ret = anon_vma_prepare(vma);
518 	if (ret)
519 		goto put_old;
520 
521 	ret = -ENOMEM;
522 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
523 	if (!new_page)
524 		goto put_old;
525 
526 	__SetPageUptodate(new_page);
527 	copy_highpage(new_page, old_page);
528 	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
529 
530 	if (!is_register) {
531 		struct page *orig_page;
532 		pgoff_t index;
533 
534 		VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
535 
536 		index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
537 		orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
538 					  index);
539 
540 		if (orig_page) {
541 			if (PageUptodate(orig_page) &&
542 			    pages_identical(new_page, orig_page)) {
543 				/* let go new_page */
544 				put_page(new_page);
545 				new_page = NULL;
546 
547 				if (PageCompound(orig_page))
548 					orig_page_huge = true;
549 			}
550 			put_page(orig_page);
551 		}
552 	}
553 
554 	ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
555 	if (new_page)
556 		put_page(new_page);
557 put_old:
558 	put_page(old_page);
559 
560 	if (unlikely(ret == -EAGAIN))
561 		goto retry;
562 
563 	/* Revert back reference counter if instruction update failed. */
564 	if (ret && is_register && ref_ctr_updated)
565 		update_ref_ctr(uprobe, mm, -1);
566 
567 	/* try collapse pmd for compound page */
568 	if (!ret && orig_page_huge)
569 		collapse_pte_mapped_thp(mm, vaddr, false);
570 
571 	return ret;
572 }
573 
574 /**
575  * set_swbp - store breakpoint at a given address.
576  * @auprobe: arch specific probepoint information.
577  * @mm: the probed process address space.
578  * @vaddr: the virtual address to insert the opcode.
579  *
580  * For mm @mm, store the breakpoint instruction at @vaddr.
581  * Return 0 (success) or a negative errno.
582  */
583 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
584 {
585 	return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
586 }
587 
588 /**
589  * set_orig_insn - Restore the original instruction.
590  * @mm: the probed process address space.
591  * @auprobe: arch specific probepoint information.
592  * @vaddr: the virtual address to insert the opcode.
593  *
594  * For mm @mm, restore the original opcode (opcode) at @vaddr.
595  * Return 0 (success) or a negative errno.
596  */
597 int __weak
598 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
599 {
600 	return uprobe_write_opcode(auprobe, mm, vaddr,
601 			*(uprobe_opcode_t *)&auprobe->insn);
602 }
603 
604 /* uprobe should have guaranteed positive refcount */
605 static struct uprobe *get_uprobe(struct uprobe *uprobe)
606 {
607 	refcount_inc(&uprobe->ref);
608 	return uprobe;
609 }
610 
611 /*
612  * uprobe should have guaranteed lifetime, which can be either of:
613  *   - caller already has refcount taken (and wants an extra one);
614  *   - uprobe is RCU protected and won't be freed until after grace period;
615  *   - we are holding uprobes_treelock (for read or write, doesn't matter).
616  */
617 static struct uprobe *try_get_uprobe(struct uprobe *uprobe)
618 {
619 	if (refcount_inc_not_zero(&uprobe->ref))
620 		return uprobe;
621 	return NULL;
622 }
623 
624 static inline bool uprobe_is_active(struct uprobe *uprobe)
625 {
626 	return !RB_EMPTY_NODE(&uprobe->rb_node);
627 }
628 
629 static void uprobe_free_rcu_tasks_trace(struct rcu_head *rcu)
630 {
631 	struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
632 
633 	kfree(uprobe);
634 }
635 
636 static void uprobe_free_srcu(struct rcu_head *rcu)
637 {
638 	struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
639 
640 	call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace);
641 }
642 
643 static void uprobe_free_deferred(struct work_struct *work)
644 {
645 	struct uprobe *uprobe = container_of(work, struct uprobe, work);
646 
647 	write_lock(&uprobes_treelock);
648 
649 	if (uprobe_is_active(uprobe)) {
650 		write_seqcount_begin(&uprobes_seqcount);
651 		rb_erase(&uprobe->rb_node, &uprobes_tree);
652 		write_seqcount_end(&uprobes_seqcount);
653 	}
654 
655 	write_unlock(&uprobes_treelock);
656 
657 	/*
658 	 * If application munmap(exec_vma) before uprobe_unregister()
659 	 * gets called, we don't get a chance to remove uprobe from
660 	 * delayed_uprobe_list from remove_breakpoint(). Do it here.
661 	 */
662 	mutex_lock(&delayed_uprobe_lock);
663 	delayed_uprobe_remove(uprobe, NULL);
664 	mutex_unlock(&delayed_uprobe_lock);
665 
666 	/* start srcu -> rcu_tasks_trace -> kfree chain */
667 	call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu);
668 }
669 
670 static void put_uprobe(struct uprobe *uprobe)
671 {
672 	if (!refcount_dec_and_test(&uprobe->ref))
673 		return;
674 
675 	INIT_WORK(&uprobe->work, uprobe_free_deferred);
676 	schedule_work(&uprobe->work);
677 }
678 
679 /* Initialize hprobe as SRCU-protected "leased" uprobe */
680 static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx)
681 {
682 	WARN_ON(!uprobe);
683 	hprobe->state = HPROBE_LEASED;
684 	hprobe->uprobe = uprobe;
685 	hprobe->srcu_idx = srcu_idx;
686 }
687 
688 /* Initialize hprobe as refcounted ("stable") uprobe (uprobe can be NULL). */
689 static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe)
690 {
691 	hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE;
692 	hprobe->uprobe = uprobe;
693 	hprobe->srcu_idx = -1;
694 }
695 
696 /*
697  * hprobe_consume() fetches hprobe's underlying uprobe and detects whether
698  * uprobe is SRCU protected or is refcounted. hprobe_consume() can be
699  * used only once for a given hprobe.
700  *
701  * Caller has to call hprobe_finalize() and pass previous hprobe_state, so
702  * that hprobe_finalize() can perform SRCU unlock or put uprobe, whichever
703  * is appropriate.
704  */
705 static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate)
706 {
707 	*hstate = xchg(&hprobe->state, HPROBE_CONSUMED);
708 	switch (*hstate) {
709 	case HPROBE_LEASED:
710 	case HPROBE_STABLE:
711 		return hprobe->uprobe;
712 	case HPROBE_GONE:	/* uprobe is NULL, no SRCU */
713 	case HPROBE_CONSUMED:	/* uprobe was finalized already, do nothing */
714 		return NULL;
715 	default:
716 		WARN(1, "hprobe invalid state %d", *hstate);
717 		return NULL;
718 	}
719 }
720 
721 /*
722  * Reset hprobe state and, if hprobe was LEASED, release SRCU lock.
723  * hprobe_finalize() can only be used from current context after
724  * hprobe_consume() call (which determines uprobe and hstate value).
725  */
726 static void hprobe_finalize(struct hprobe *hprobe, enum hprobe_state hstate)
727 {
728 	switch (hstate) {
729 	case HPROBE_LEASED:
730 		__srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx);
731 		break;
732 	case HPROBE_STABLE:
733 		put_uprobe(hprobe->uprobe);
734 		break;
735 	case HPROBE_GONE:
736 	case HPROBE_CONSUMED:
737 		break;
738 	default:
739 		WARN(1, "hprobe invalid state %d", hstate);
740 		break;
741 	}
742 }
743 
744 /*
745  * Attempt to switch (atomically) uprobe from being SRCU protected (LEASED)
746  * to refcounted (STABLE) state. Competes with hprobe_consume(); only one of
747  * them can win the race to perform SRCU unlocking. Whoever wins must perform
748  * SRCU unlock.
749  *
750  * Returns underlying valid uprobe or NULL, if there was no underlying uprobe
751  * to begin with or we failed to bump its refcount and it's going away.
752  *
753  * Returned non-NULL uprobe can be still safely used within an ongoing SRCU
754  * locked region. If `get` is true, it's guaranteed that non-NULL uprobe has
755  * an extra refcount for caller to assume and use. Otherwise, it's not
756  * guaranteed that returned uprobe has a positive refcount, so caller has to
757  * attempt try_get_uprobe(), if it needs to preserve uprobe beyond current
758  * SRCU lock region. See dup_utask().
759  */
760 static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get)
761 {
762 	enum hprobe_state hstate;
763 
764 	/*
765 	 * return_instance's hprobe is protected by RCU.
766 	 * Underlying uprobe is itself protected from reuse by SRCU.
767 	 */
768 	lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu));
769 
770 	hstate = READ_ONCE(hprobe->state);
771 	switch (hstate) {
772 	case HPROBE_STABLE:
773 		/* uprobe has positive refcount, bump refcount, if necessary */
774 		return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe;
775 	case HPROBE_GONE:
776 		/*
777 		 * SRCU was unlocked earlier and we didn't manage to take
778 		 * uprobe refcnt, so it's effectively NULL
779 		 */
780 		return NULL;
781 	case HPROBE_CONSUMED:
782 		/*
783 		 * uprobe was consumed, so it's effectively NULL as far as
784 		 * uretprobe processing logic is concerned
785 		 */
786 		return NULL;
787 	case HPROBE_LEASED: {
788 		struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe);
789 		/*
790 		 * Try to switch hprobe state, guarding against
791 		 * hprobe_consume() or another hprobe_expire() racing with us.
792 		 * Note, if we failed to get uprobe refcount, we use special
793 		 * HPROBE_GONE state to signal that hprobe->uprobe shouldn't
794 		 * be used as it will be freed after SRCU is unlocked.
795 		 */
796 		if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) {
797 			/* We won the race, we are the ones to unlock SRCU */
798 			__srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx);
799 			return get ? get_uprobe(uprobe) : uprobe;
800 		}
801 
802 		/*
803 		 * We lost the race, undo refcount bump (if it ever happened),
804 		 * unless caller would like an extra refcount anyways.
805 		 */
806 		if (uprobe && !get)
807 			put_uprobe(uprobe);
808 		/*
809 		 * Even if hprobe_consume() or another hprobe_expire() wins
810 		 * the state update race and unlocks SRCU from under us, we
811 		 * still have a guarantee that underyling uprobe won't be
812 		 * freed due to ongoing caller's SRCU lock region, so we can
813 		 * return it regardless. Also, if `get` was true, we also have
814 		 * an extra ref for the caller to own. This is used in dup_utask().
815 		 */
816 		return uprobe;
817 	}
818 	default:
819 		WARN(1, "unknown hprobe state %d", hstate);
820 		return NULL;
821 	}
822 }
823 
824 static __always_inline
825 int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
826 	       const struct uprobe *r)
827 {
828 	if (l_inode < r->inode)
829 		return -1;
830 
831 	if (l_inode > r->inode)
832 		return 1;
833 
834 	if (l_offset < r->offset)
835 		return -1;
836 
837 	if (l_offset > r->offset)
838 		return 1;
839 
840 	return 0;
841 }
842 
843 #define __node_2_uprobe(node) \
844 	rb_entry((node), struct uprobe, rb_node)
845 
846 struct __uprobe_key {
847 	struct inode *inode;
848 	loff_t offset;
849 };
850 
851 static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
852 {
853 	const struct __uprobe_key *a = key;
854 	return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
855 }
856 
857 static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
858 {
859 	struct uprobe *u = __node_2_uprobe(a);
860 	return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
861 }
862 
863 /*
864  * Assumes being inside RCU protected region.
865  * No refcount is taken on returned uprobe.
866  */
867 static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset)
868 {
869 	struct __uprobe_key key = {
870 		.inode = inode,
871 		.offset = offset,
872 	};
873 	struct rb_node *node;
874 	unsigned int seq;
875 
876 	lockdep_assert(rcu_read_lock_trace_held());
877 
878 	do {
879 		seq = read_seqcount_begin(&uprobes_seqcount);
880 		node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key);
881 		/*
882 		 * Lockless RB-tree lookups can result only in false negatives.
883 		 * If the element is found, it is correct and can be returned
884 		 * under RCU protection. If we find nothing, we need to
885 		 * validate that seqcount didn't change. If it did, we have to
886 		 * try again as we might have missed the element (false
887 		 * negative). If seqcount is unchanged, search truly failed.
888 		 */
889 		if (node)
890 			return __node_2_uprobe(node);
891 	} while (read_seqcount_retry(&uprobes_seqcount, seq));
892 
893 	return NULL;
894 }
895 
896 /*
897  * Attempt to insert a new uprobe into uprobes_tree.
898  *
899  * If uprobe already exists (for given inode+offset), we just increment
900  * refcount of previously existing uprobe.
901  *
902  * If not, a provided new instance of uprobe is inserted into the tree (with
903  * assumed initial refcount == 1).
904  *
905  * In any case, we return a uprobe instance that ends up being in uprobes_tree.
906  * Caller has to clean up new uprobe instance, if it ended up not being
907  * inserted into the tree.
908  *
909  * We assume that uprobes_treelock is held for writing.
910  */
911 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
912 {
913 	struct rb_node *node;
914 again:
915 	node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
916 	if (node) {
917 		struct uprobe *u = __node_2_uprobe(node);
918 
919 		if (!try_get_uprobe(u)) {
920 			rb_erase(node, &uprobes_tree);
921 			RB_CLEAR_NODE(&u->rb_node);
922 			goto again;
923 		}
924 
925 		return u;
926 	}
927 
928 	return uprobe;
929 }
930 
931 /*
932  * Acquire uprobes_treelock and insert uprobe into uprobes_tree
933  * (or reuse existing one, see __insert_uprobe() comments above).
934  */
935 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
936 {
937 	struct uprobe *u;
938 
939 	write_lock(&uprobes_treelock);
940 	write_seqcount_begin(&uprobes_seqcount);
941 	u = __insert_uprobe(uprobe);
942 	write_seqcount_end(&uprobes_seqcount);
943 	write_unlock(&uprobes_treelock);
944 
945 	return u;
946 }
947 
948 static void
949 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
950 {
951 	pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
952 		"ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
953 		uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
954 		(unsigned long long) cur_uprobe->ref_ctr_offset,
955 		(unsigned long long) uprobe->ref_ctr_offset);
956 }
957 
958 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
959 				   loff_t ref_ctr_offset)
960 {
961 	struct uprobe *uprobe, *cur_uprobe;
962 
963 	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
964 	if (!uprobe)
965 		return ERR_PTR(-ENOMEM);
966 
967 	uprobe->inode = inode;
968 	uprobe->offset = offset;
969 	uprobe->ref_ctr_offset = ref_ctr_offset;
970 	INIT_LIST_HEAD(&uprobe->consumers);
971 	init_rwsem(&uprobe->register_rwsem);
972 	init_rwsem(&uprobe->consumer_rwsem);
973 	RB_CLEAR_NODE(&uprobe->rb_node);
974 	refcount_set(&uprobe->ref, 1);
975 
976 	/* add to uprobes_tree, sorted on inode:offset */
977 	cur_uprobe = insert_uprobe(uprobe);
978 	/* a uprobe exists for this inode:offset combination */
979 	if (cur_uprobe != uprobe) {
980 		if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
981 			ref_ctr_mismatch_warn(cur_uprobe, uprobe);
982 			put_uprobe(cur_uprobe);
983 			kfree(uprobe);
984 			return ERR_PTR(-EINVAL);
985 		}
986 		kfree(uprobe);
987 		uprobe = cur_uprobe;
988 	}
989 
990 	return uprobe;
991 }
992 
993 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
994 {
995 	static atomic64_t id;
996 
997 	down_write(&uprobe->consumer_rwsem);
998 	list_add_rcu(&uc->cons_node, &uprobe->consumers);
999 	uc->id = (__u64) atomic64_inc_return(&id);
1000 	up_write(&uprobe->consumer_rwsem);
1001 }
1002 
1003 /*
1004  * For uprobe @uprobe, delete the consumer @uc.
1005  * Should never be called with consumer that's not part of @uprobe->consumers.
1006  */
1007 static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
1008 {
1009 	down_write(&uprobe->consumer_rwsem);
1010 	list_del_rcu(&uc->cons_node);
1011 	up_write(&uprobe->consumer_rwsem);
1012 }
1013 
1014 static int __copy_insn(struct address_space *mapping, struct file *filp,
1015 			void *insn, int nbytes, loff_t offset)
1016 {
1017 	struct page *page;
1018 	/*
1019 	 * Ensure that the page that has the original instruction is populated
1020 	 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
1021 	 * see uprobe_register().
1022 	 */
1023 	if (mapping->a_ops->read_folio)
1024 		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
1025 	else
1026 		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
1027 	if (IS_ERR(page))
1028 		return PTR_ERR(page);
1029 
1030 	copy_from_page(page, offset, insn, nbytes);
1031 	put_page(page);
1032 
1033 	return 0;
1034 }
1035 
1036 static int copy_insn(struct uprobe *uprobe, struct file *filp)
1037 {
1038 	struct address_space *mapping = uprobe->inode->i_mapping;
1039 	loff_t offs = uprobe->offset;
1040 	void *insn = &uprobe->arch.insn;
1041 	int size = sizeof(uprobe->arch.insn);
1042 	int len, err = -EIO;
1043 
1044 	/* Copy only available bytes, -EIO if nothing was read */
1045 	do {
1046 		if (offs >= i_size_read(uprobe->inode))
1047 			break;
1048 
1049 		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
1050 		err = __copy_insn(mapping, filp, insn, len, offs);
1051 		if (err)
1052 			break;
1053 
1054 		insn += len;
1055 		offs += len;
1056 		size -= len;
1057 	} while (size);
1058 
1059 	return err;
1060 }
1061 
1062 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
1063 				struct mm_struct *mm, unsigned long vaddr)
1064 {
1065 	int ret = 0;
1066 
1067 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
1068 		return ret;
1069 
1070 	/* TODO: move this into _register, until then we abuse this sem. */
1071 	down_write(&uprobe->consumer_rwsem);
1072 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
1073 		goto out;
1074 
1075 	ret = copy_insn(uprobe, file);
1076 	if (ret)
1077 		goto out;
1078 
1079 	ret = -ENOTSUPP;
1080 	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
1081 		goto out;
1082 
1083 	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
1084 	if (ret)
1085 		goto out;
1086 
1087 	smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
1088 	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
1089 
1090  out:
1091 	up_write(&uprobe->consumer_rwsem);
1092 
1093 	return ret;
1094 }
1095 
1096 static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
1097 {
1098 	return !uc->filter || uc->filter(uc, mm);
1099 }
1100 
1101 static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm)
1102 {
1103 	struct uprobe_consumer *uc;
1104 	bool ret = false;
1105 
1106 	down_read(&uprobe->consumer_rwsem);
1107 	list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
1108 		ret = consumer_filter(uc, mm);
1109 		if (ret)
1110 			break;
1111 	}
1112 	up_read(&uprobe->consumer_rwsem);
1113 
1114 	return ret;
1115 }
1116 
1117 static int
1118 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
1119 			struct vm_area_struct *vma, unsigned long vaddr)
1120 {
1121 	bool first_uprobe;
1122 	int ret;
1123 
1124 	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
1125 	if (ret)
1126 		return ret;
1127 
1128 	/*
1129 	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
1130 	 * the task can hit this breakpoint right after __replace_page().
1131 	 */
1132 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
1133 	if (first_uprobe)
1134 		set_bit(MMF_HAS_UPROBES, &mm->flags);
1135 
1136 	ret = set_swbp(&uprobe->arch, mm, vaddr);
1137 	if (!ret)
1138 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
1139 	else if (first_uprobe)
1140 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
1141 
1142 	return ret;
1143 }
1144 
1145 static int
1146 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
1147 {
1148 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
1149 	return set_orig_insn(&uprobe->arch, mm, vaddr);
1150 }
1151 
1152 struct map_info {
1153 	struct map_info *next;
1154 	struct mm_struct *mm;
1155 	unsigned long vaddr;
1156 };
1157 
1158 static inline struct map_info *free_map_info(struct map_info *info)
1159 {
1160 	struct map_info *next = info->next;
1161 	kfree(info);
1162 	return next;
1163 }
1164 
1165 static struct map_info *
1166 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
1167 {
1168 	unsigned long pgoff = offset >> PAGE_SHIFT;
1169 	struct vm_area_struct *vma;
1170 	struct map_info *curr = NULL;
1171 	struct map_info *prev = NULL;
1172 	struct map_info *info;
1173 	int more = 0;
1174 
1175  again:
1176 	i_mmap_lock_read(mapping);
1177 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1178 		if (!valid_vma(vma, is_register))
1179 			continue;
1180 
1181 		if (!prev && !more) {
1182 			/*
1183 			 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
1184 			 * reclaim. This is optimistic, no harm done if it fails.
1185 			 */
1186 			prev = kmalloc(sizeof(struct map_info),
1187 					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
1188 			if (prev)
1189 				prev->next = NULL;
1190 		}
1191 		if (!prev) {
1192 			more++;
1193 			continue;
1194 		}
1195 
1196 		if (!mmget_not_zero(vma->vm_mm))
1197 			continue;
1198 
1199 		info = prev;
1200 		prev = prev->next;
1201 		info->next = curr;
1202 		curr = info;
1203 
1204 		info->mm = vma->vm_mm;
1205 		info->vaddr = offset_to_vaddr(vma, offset);
1206 	}
1207 	i_mmap_unlock_read(mapping);
1208 
1209 	if (!more)
1210 		goto out;
1211 
1212 	prev = curr;
1213 	while (curr) {
1214 		mmput(curr->mm);
1215 		curr = curr->next;
1216 	}
1217 
1218 	do {
1219 		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1220 		if (!info) {
1221 			curr = ERR_PTR(-ENOMEM);
1222 			goto out;
1223 		}
1224 		info->next = prev;
1225 		prev = info;
1226 	} while (--more);
1227 
1228 	goto again;
1229  out:
1230 	while (prev)
1231 		prev = free_map_info(prev);
1232 	return curr;
1233 }
1234 
1235 static int
1236 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1237 {
1238 	bool is_register = !!new;
1239 	struct map_info *info;
1240 	int err = 0;
1241 
1242 	percpu_down_write(&dup_mmap_sem);
1243 	info = build_map_info(uprobe->inode->i_mapping,
1244 					uprobe->offset, is_register);
1245 	if (IS_ERR(info)) {
1246 		err = PTR_ERR(info);
1247 		goto out;
1248 	}
1249 
1250 	while (info) {
1251 		struct mm_struct *mm = info->mm;
1252 		struct vm_area_struct *vma;
1253 
1254 		if (err && is_register)
1255 			goto free;
1256 		/*
1257 		 * We take mmap_lock for writing to avoid the race with
1258 		 * find_active_uprobe_rcu() which takes mmap_lock for reading.
1259 		 * Thus this install_breakpoint() can not make
1260 		 * is_trap_at_addr() true right after find_uprobe_rcu()
1261 		 * returns NULL in find_active_uprobe_rcu().
1262 		 */
1263 		mmap_write_lock(mm);
1264 		if (check_stable_address_space(mm))
1265 			goto unlock;
1266 
1267 		vma = find_vma(mm, info->vaddr);
1268 		if (!vma || !valid_vma(vma, is_register) ||
1269 		    file_inode(vma->vm_file) != uprobe->inode)
1270 			goto unlock;
1271 
1272 		if (vma->vm_start > info->vaddr ||
1273 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
1274 			goto unlock;
1275 
1276 		if (is_register) {
1277 			/* consult only the "caller", new consumer. */
1278 			if (consumer_filter(new, mm))
1279 				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1280 		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
1281 			if (!filter_chain(uprobe, mm))
1282 				err |= remove_breakpoint(uprobe, mm, info->vaddr);
1283 		}
1284 
1285  unlock:
1286 		mmap_write_unlock(mm);
1287  free:
1288 		mmput(mm);
1289 		info = free_map_info(info);
1290 	}
1291  out:
1292 	percpu_up_write(&dup_mmap_sem);
1293 	return err;
1294 }
1295 
1296 /**
1297  * uprobe_unregister_nosync - unregister an already registered probe.
1298  * @uprobe: uprobe to remove
1299  * @uc: identify which probe if multiple probes are colocated.
1300  */
1301 void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
1302 {
1303 	int err;
1304 
1305 	down_write(&uprobe->register_rwsem);
1306 	consumer_del(uprobe, uc);
1307 	err = register_for_each_vma(uprobe, NULL);
1308 	up_write(&uprobe->register_rwsem);
1309 
1310 	/* TODO : cant unregister? schedule a worker thread */
1311 	if (unlikely(err)) {
1312 		uprobe_warn(current, "unregister, leaking uprobe");
1313 		return;
1314 	}
1315 
1316 	put_uprobe(uprobe);
1317 }
1318 EXPORT_SYMBOL_GPL(uprobe_unregister_nosync);
1319 
1320 void uprobe_unregister_sync(void)
1321 {
1322 	/*
1323 	 * Now that handler_chain() and handle_uretprobe_chain() iterate over
1324 	 * uprobe->consumers list under RCU protection without holding
1325 	 * uprobe->register_rwsem, we need to wait for RCU grace period to
1326 	 * make sure that we can't call into just unregistered
1327 	 * uprobe_consumer's callbacks anymore. If we don't do that, fast and
1328 	 * unlucky enough caller can free consumer's memory and cause
1329 	 * handler_chain() or handle_uretprobe_chain() to do an use-after-free.
1330 	 */
1331 	synchronize_rcu_tasks_trace();
1332 	synchronize_srcu(&uretprobes_srcu);
1333 }
1334 EXPORT_SYMBOL_GPL(uprobe_unregister_sync);
1335 
1336 /**
1337  * uprobe_register - register a probe
1338  * @inode: the file in which the probe has to be placed.
1339  * @offset: offset from the start of the file.
1340  * @ref_ctr_offset: offset of SDT marker / reference counter
1341  * @uc: information on howto handle the probe..
1342  *
1343  * Apart from the access refcount, uprobe_register() takes a creation
1344  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1345  * inserted into the rbtree (i.e first consumer for a @inode:@offset
1346  * tuple).  Creation refcount stops uprobe_unregister from freeing the
1347  * @uprobe even before the register operation is complete. Creation
1348  * refcount is released when the last @uc for the @uprobe
1349  * unregisters. Caller of uprobe_register() is required to keep @inode
1350  * (and the containing mount) referenced.
1351  *
1352  * Return: pointer to the new uprobe on success or an ERR_PTR on failure.
1353  */
1354 struct uprobe *uprobe_register(struct inode *inode,
1355 				loff_t offset, loff_t ref_ctr_offset,
1356 				struct uprobe_consumer *uc)
1357 {
1358 	struct uprobe *uprobe;
1359 	int ret;
1360 
1361 	/* Uprobe must have at least one set consumer */
1362 	if (!uc->handler && !uc->ret_handler)
1363 		return ERR_PTR(-EINVAL);
1364 
1365 	/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
1366 	if (!inode->i_mapping->a_ops->read_folio &&
1367 	    !shmem_mapping(inode->i_mapping))
1368 		return ERR_PTR(-EIO);
1369 	/* Racy, just to catch the obvious mistakes */
1370 	if (offset > i_size_read(inode))
1371 		return ERR_PTR(-EINVAL);
1372 
1373 	/*
1374 	 * This ensures that copy_from_page(), copy_to_page() and
1375 	 * __update_ref_ctr() can't cross page boundary.
1376 	 */
1377 	if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
1378 		return ERR_PTR(-EINVAL);
1379 	if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
1380 		return ERR_PTR(-EINVAL);
1381 
1382 	uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
1383 	if (IS_ERR(uprobe))
1384 		return uprobe;
1385 
1386 	down_write(&uprobe->register_rwsem);
1387 	consumer_add(uprobe, uc);
1388 	ret = register_for_each_vma(uprobe, uc);
1389 	up_write(&uprobe->register_rwsem);
1390 
1391 	if (ret) {
1392 		uprobe_unregister_nosync(uprobe, uc);
1393 		/*
1394 		 * Registration might have partially succeeded, so we can have
1395 		 * this consumer being called right at this time. We need to
1396 		 * sync here. It's ok, it's unlikely slow path.
1397 		 */
1398 		uprobe_unregister_sync();
1399 		return ERR_PTR(ret);
1400 	}
1401 
1402 	return uprobe;
1403 }
1404 EXPORT_SYMBOL_GPL(uprobe_register);
1405 
1406 /**
1407  * uprobe_apply - add or remove the breakpoints according to @uc->filter
1408  * @uprobe: uprobe which "owns" the breakpoint
1409  * @uc: consumer which wants to add more or remove some breakpoints
1410  * @add: add or remove the breakpoints
1411  * Return: 0 on success or negative error code.
1412  */
1413 int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add)
1414 {
1415 	struct uprobe_consumer *con;
1416 	int ret = -ENOENT;
1417 
1418 	down_write(&uprobe->register_rwsem);
1419 
1420 	rcu_read_lock_trace();
1421 	list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
1422 		if (con == uc) {
1423 			ret = register_for_each_vma(uprobe, add ? uc : NULL);
1424 			break;
1425 		}
1426 	}
1427 	rcu_read_unlock_trace();
1428 
1429 	up_write(&uprobe->register_rwsem);
1430 
1431 	return ret;
1432 }
1433 
1434 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1435 {
1436 	VMA_ITERATOR(vmi, mm, 0);
1437 	struct vm_area_struct *vma;
1438 	int err = 0;
1439 
1440 	mmap_read_lock(mm);
1441 	for_each_vma(vmi, vma) {
1442 		unsigned long vaddr;
1443 		loff_t offset;
1444 
1445 		if (!valid_vma(vma, false) ||
1446 		    file_inode(vma->vm_file) != uprobe->inode)
1447 			continue;
1448 
1449 		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1450 		if (uprobe->offset <  offset ||
1451 		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1452 			continue;
1453 
1454 		vaddr = offset_to_vaddr(vma, uprobe->offset);
1455 		err |= remove_breakpoint(uprobe, mm, vaddr);
1456 	}
1457 	mmap_read_unlock(mm);
1458 
1459 	return err;
1460 }
1461 
1462 static struct rb_node *
1463 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1464 {
1465 	struct rb_node *n = uprobes_tree.rb_node;
1466 
1467 	while (n) {
1468 		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1469 
1470 		if (inode < u->inode) {
1471 			n = n->rb_left;
1472 		} else if (inode > u->inode) {
1473 			n = n->rb_right;
1474 		} else {
1475 			if (max < u->offset)
1476 				n = n->rb_left;
1477 			else if (min > u->offset)
1478 				n = n->rb_right;
1479 			else
1480 				break;
1481 		}
1482 	}
1483 
1484 	return n;
1485 }
1486 
1487 /*
1488  * For a given range in vma, build a list of probes that need to be inserted.
1489  */
1490 static void build_probe_list(struct inode *inode,
1491 				struct vm_area_struct *vma,
1492 				unsigned long start, unsigned long end,
1493 				struct list_head *head)
1494 {
1495 	loff_t min, max;
1496 	struct rb_node *n, *t;
1497 	struct uprobe *u;
1498 
1499 	INIT_LIST_HEAD(head);
1500 	min = vaddr_to_offset(vma, start);
1501 	max = min + (end - start) - 1;
1502 
1503 	read_lock(&uprobes_treelock);
1504 	n = find_node_in_range(inode, min, max);
1505 	if (n) {
1506 		for (t = n; t; t = rb_prev(t)) {
1507 			u = rb_entry(t, struct uprobe, rb_node);
1508 			if (u->inode != inode || u->offset < min)
1509 				break;
1510 			/* if uprobe went away, it's safe to ignore it */
1511 			if (try_get_uprobe(u))
1512 				list_add(&u->pending_list, head);
1513 		}
1514 		for (t = n; (t = rb_next(t)); ) {
1515 			u = rb_entry(t, struct uprobe, rb_node);
1516 			if (u->inode != inode || u->offset > max)
1517 				break;
1518 			/* if uprobe went away, it's safe to ignore it */
1519 			if (try_get_uprobe(u))
1520 				list_add(&u->pending_list, head);
1521 		}
1522 	}
1523 	read_unlock(&uprobes_treelock);
1524 }
1525 
1526 /* @vma contains reference counter, not the probed instruction. */
1527 static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1528 {
1529 	struct list_head *pos, *q;
1530 	struct delayed_uprobe *du;
1531 	unsigned long vaddr;
1532 	int ret = 0, err = 0;
1533 
1534 	mutex_lock(&delayed_uprobe_lock);
1535 	list_for_each_safe(pos, q, &delayed_uprobe_list) {
1536 		du = list_entry(pos, struct delayed_uprobe, list);
1537 
1538 		if (du->mm != vma->vm_mm ||
1539 		    !valid_ref_ctr_vma(du->uprobe, vma))
1540 			continue;
1541 
1542 		vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1543 		ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1544 		if (ret) {
1545 			update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1546 			if (!err)
1547 				err = ret;
1548 		}
1549 		delayed_uprobe_delete(du);
1550 	}
1551 	mutex_unlock(&delayed_uprobe_lock);
1552 	return err;
1553 }
1554 
1555 /*
1556  * Called from mmap_region/vma_merge with mm->mmap_lock acquired.
1557  *
1558  * Currently we ignore all errors and always return 0, the callers
1559  * can't handle the failure anyway.
1560  */
1561 int uprobe_mmap(struct vm_area_struct *vma)
1562 {
1563 	struct list_head tmp_list;
1564 	struct uprobe *uprobe, *u;
1565 	struct inode *inode;
1566 
1567 	if (no_uprobe_events())
1568 		return 0;
1569 
1570 	if (vma->vm_file &&
1571 	    (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1572 	    test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1573 		delayed_ref_ctr_inc(vma);
1574 
1575 	if (!valid_vma(vma, true))
1576 		return 0;
1577 
1578 	inode = file_inode(vma->vm_file);
1579 	if (!inode)
1580 		return 0;
1581 
1582 	mutex_lock(uprobes_mmap_hash(inode));
1583 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1584 	/*
1585 	 * We can race with uprobe_unregister(), this uprobe can be already
1586 	 * removed. But in this case filter_chain() must return false, all
1587 	 * consumers have gone away.
1588 	 */
1589 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1590 		if (!fatal_signal_pending(current) &&
1591 		    filter_chain(uprobe, vma->vm_mm)) {
1592 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1593 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1594 		}
1595 		put_uprobe(uprobe);
1596 	}
1597 	mutex_unlock(uprobes_mmap_hash(inode));
1598 
1599 	return 0;
1600 }
1601 
1602 static bool
1603 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1604 {
1605 	loff_t min, max;
1606 	struct inode *inode;
1607 	struct rb_node *n;
1608 
1609 	inode = file_inode(vma->vm_file);
1610 
1611 	min = vaddr_to_offset(vma, start);
1612 	max = min + (end - start) - 1;
1613 
1614 	read_lock(&uprobes_treelock);
1615 	n = find_node_in_range(inode, min, max);
1616 	read_unlock(&uprobes_treelock);
1617 
1618 	return !!n;
1619 }
1620 
1621 /*
1622  * Called in context of a munmap of a vma.
1623  */
1624 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1625 {
1626 	if (no_uprobe_events() || !valid_vma(vma, false))
1627 		return;
1628 
1629 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1630 		return;
1631 
1632 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1633 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1634 		return;
1635 
1636 	if (vma_has_uprobes(vma, start, end))
1637 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1638 }
1639 
1640 static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
1641 			    struct vm_area_struct *vma, struct vm_fault *vmf)
1642 {
1643 	struct xol_area *area = vma->vm_mm->uprobes_state.xol_area;
1644 
1645 	vmf->page = area->page;
1646 	get_page(vmf->page);
1647 	return 0;
1648 }
1649 
1650 static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
1651 {
1652 	return -EPERM;
1653 }
1654 
1655 static const struct vm_special_mapping xol_mapping = {
1656 	.name = "[uprobes]",
1657 	.fault = xol_fault,
1658 	.mremap = xol_mremap,
1659 };
1660 
1661 /* Slot allocation for XOL */
1662 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1663 {
1664 	struct vm_area_struct *vma;
1665 	int ret;
1666 
1667 	if (mmap_write_lock_killable(mm))
1668 		return -EINTR;
1669 
1670 	if (mm->uprobes_state.xol_area) {
1671 		ret = -EALREADY;
1672 		goto fail;
1673 	}
1674 
1675 	if (!area->vaddr) {
1676 		/* Try to map as high as possible, this is only a hint. */
1677 		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1678 						PAGE_SIZE, 0, 0);
1679 		if (IS_ERR_VALUE(area->vaddr)) {
1680 			ret = area->vaddr;
1681 			goto fail;
1682 		}
1683 	}
1684 
1685 	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1686 				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1687 				&xol_mapping);
1688 	if (IS_ERR(vma)) {
1689 		ret = PTR_ERR(vma);
1690 		goto fail;
1691 	}
1692 
1693 	ret = 0;
1694 	/* pairs with get_xol_area() */
1695 	smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1696  fail:
1697 	mmap_write_unlock(mm);
1698 
1699 	return ret;
1700 }
1701 
1702 void * __weak arch_uprobe_trampoline(unsigned long *psize)
1703 {
1704 	static uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1705 
1706 	*psize = UPROBE_SWBP_INSN_SIZE;
1707 	return &insn;
1708 }
1709 
1710 static struct xol_area *__create_xol_area(unsigned long vaddr)
1711 {
1712 	struct mm_struct *mm = current->mm;
1713 	unsigned long insns_size;
1714 	struct xol_area *area;
1715 	void *insns;
1716 
1717 	area = kzalloc(sizeof(*area), GFP_KERNEL);
1718 	if (unlikely(!area))
1719 		goto out;
1720 
1721 	area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1722 			       GFP_KERNEL);
1723 	if (!area->bitmap)
1724 		goto free_area;
1725 
1726 	area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
1727 	if (!area->page)
1728 		goto free_bitmap;
1729 
1730 	area->vaddr = vaddr;
1731 	init_waitqueue_head(&area->wq);
1732 	/* Reserve the 1st slot for get_trampoline_vaddr() */
1733 	set_bit(0, area->bitmap);
1734 	insns = arch_uprobe_trampoline(&insns_size);
1735 	arch_uprobe_copy_ixol(area->page, 0, insns, insns_size);
1736 
1737 	if (!xol_add_vma(mm, area))
1738 		return area;
1739 
1740 	__free_page(area->page);
1741  free_bitmap:
1742 	kfree(area->bitmap);
1743  free_area:
1744 	kfree(area);
1745  out:
1746 	return NULL;
1747 }
1748 
1749 /*
1750  * get_xol_area - Allocate process's xol_area if necessary.
1751  * This area will be used for storing instructions for execution out of line.
1752  *
1753  * Returns the allocated area or NULL.
1754  */
1755 static struct xol_area *get_xol_area(void)
1756 {
1757 	struct mm_struct *mm = current->mm;
1758 	struct xol_area *area;
1759 
1760 	if (!mm->uprobes_state.xol_area)
1761 		__create_xol_area(0);
1762 
1763 	/* Pairs with xol_add_vma() smp_store_release() */
1764 	area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
1765 	return area;
1766 }
1767 
1768 /*
1769  * uprobe_clear_state - Free the area allocated for slots.
1770  */
1771 void uprobe_clear_state(struct mm_struct *mm)
1772 {
1773 	struct xol_area *area = mm->uprobes_state.xol_area;
1774 
1775 	mutex_lock(&delayed_uprobe_lock);
1776 	delayed_uprobe_remove(NULL, mm);
1777 	mutex_unlock(&delayed_uprobe_lock);
1778 
1779 	if (!area)
1780 		return;
1781 
1782 	put_page(area->page);
1783 	kfree(area->bitmap);
1784 	kfree(area);
1785 }
1786 
1787 void uprobe_start_dup_mmap(void)
1788 {
1789 	percpu_down_read(&dup_mmap_sem);
1790 }
1791 
1792 void uprobe_end_dup_mmap(void)
1793 {
1794 	percpu_up_read(&dup_mmap_sem);
1795 }
1796 
1797 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1798 {
1799 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1800 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
1801 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1802 		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1803 	}
1804 }
1805 
1806 static unsigned long xol_get_slot_nr(struct xol_area *area)
1807 {
1808 	unsigned long slot_nr;
1809 
1810 	slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1811 	if (slot_nr < UINSNS_PER_PAGE) {
1812 		if (!test_and_set_bit(slot_nr, area->bitmap))
1813 			return slot_nr;
1814 	}
1815 
1816 	return UINSNS_PER_PAGE;
1817 }
1818 
1819 /*
1820  * xol_get_insn_slot - allocate a slot for xol.
1821  */
1822 static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask)
1823 {
1824 	struct xol_area *area = get_xol_area();
1825 	unsigned long slot_nr;
1826 
1827 	if (!area)
1828 		return false;
1829 
1830 	wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE);
1831 
1832 	utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES;
1833 	arch_uprobe_copy_ixol(area->page, utask->xol_vaddr,
1834 			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1835 	return true;
1836 }
1837 
1838 /*
1839  * xol_free_insn_slot - free the slot allocated by xol_get_insn_slot()
1840  */
1841 static void xol_free_insn_slot(struct uprobe_task *utask)
1842 {
1843 	struct xol_area *area = current->mm->uprobes_state.xol_area;
1844 	unsigned long offset = utask->xol_vaddr - area->vaddr;
1845 	unsigned int slot_nr;
1846 
1847 	utask->xol_vaddr = 0;
1848 	/* xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE) */
1849 	if (WARN_ON_ONCE(offset >= PAGE_SIZE))
1850 		return;
1851 
1852 	slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1853 	clear_bit(slot_nr, area->bitmap);
1854 	smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1855 	if (waitqueue_active(&area->wq))
1856 		wake_up(&area->wq);
1857 }
1858 
1859 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1860 				  void *src, unsigned long len)
1861 {
1862 	/* Initialize the slot */
1863 	copy_to_page(page, vaddr, src, len);
1864 
1865 	/*
1866 	 * We probably need flush_icache_user_page() but it needs vma.
1867 	 * This should work on most of architectures by default. If
1868 	 * architecture needs to do something different it can define
1869 	 * its own version of the function.
1870 	 */
1871 	flush_dcache_page(page);
1872 }
1873 
1874 /**
1875  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1876  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1877  * instruction.
1878  * Return the address of the breakpoint instruction.
1879  */
1880 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1881 {
1882 	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1883 }
1884 
1885 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1886 {
1887 	struct uprobe_task *utask = current->utask;
1888 
1889 	if (unlikely(utask && utask->active_uprobe))
1890 		return utask->vaddr;
1891 
1892 	return instruction_pointer(regs);
1893 }
1894 
1895 static void ri_pool_push(struct uprobe_task *utask, struct return_instance *ri)
1896 {
1897 	ri->cons_cnt = 0;
1898 	ri->next = utask->ri_pool;
1899 	utask->ri_pool = ri;
1900 }
1901 
1902 static struct return_instance *ri_pool_pop(struct uprobe_task *utask)
1903 {
1904 	struct return_instance *ri = utask->ri_pool;
1905 
1906 	if (likely(ri))
1907 		utask->ri_pool = ri->next;
1908 
1909 	return ri;
1910 }
1911 
1912 static void ri_free(struct return_instance *ri)
1913 {
1914 	kfree(ri->extra_consumers);
1915 	kfree_rcu(ri, rcu);
1916 }
1917 
1918 static void free_ret_instance(struct uprobe_task *utask,
1919 			      struct return_instance *ri, bool cleanup_hprobe)
1920 {
1921 	unsigned seq;
1922 
1923 	if (cleanup_hprobe) {
1924 		enum hprobe_state hstate;
1925 
1926 		(void)hprobe_consume(&ri->hprobe, &hstate);
1927 		hprobe_finalize(&ri->hprobe, hstate);
1928 	}
1929 
1930 	/*
1931 	 * At this point return_instance is unlinked from utask's
1932 	 * return_instances list and this has become visible to ri_timer().
1933 	 * If seqcount now indicates that ri_timer's return instance
1934 	 * processing loop isn't active, we can return ri into the pool of
1935 	 * to-be-reused return instances for future uretprobes. If ri_timer()
1936 	 * happens to be running right now, though, we fallback to safety and
1937 	 * just perform RCU-delated freeing of ri.
1938 	 */
1939 	if (raw_seqcount_try_begin(&utask->ri_seqcount, seq)) {
1940 		/* immediate reuse of ri without RCU GP is OK */
1941 		ri_pool_push(utask, ri);
1942 	} else {
1943 		/* we might be racing with ri_timer(), so play it safe */
1944 		ri_free(ri);
1945 	}
1946 }
1947 
1948 /*
1949  * Called with no locks held.
1950  * Called in context of an exiting or an exec-ing thread.
1951  */
1952 void uprobe_free_utask(struct task_struct *t)
1953 {
1954 	struct uprobe_task *utask = t->utask;
1955 	struct return_instance *ri, *ri_next;
1956 
1957 	if (!utask)
1958 		return;
1959 
1960 	t->utask = NULL;
1961 	WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr);
1962 
1963 	timer_delete_sync(&utask->ri_timer);
1964 
1965 	ri = utask->return_instances;
1966 	while (ri) {
1967 		ri_next = ri->next;
1968 		free_ret_instance(utask, ri, true /* cleanup_hprobe */);
1969 		ri = ri_next;
1970 	}
1971 
1972 	/* free_ret_instance() above might add to ri_pool, so this loop should come last */
1973 	ri = utask->ri_pool;
1974 	while (ri) {
1975 		ri_next = ri->next;
1976 		ri_free(ri);
1977 		ri = ri_next;
1978 	}
1979 
1980 	kfree(utask);
1981 }
1982 
1983 #define RI_TIMER_PERIOD (HZ / 10) /* 100 ms */
1984 
1985 #define for_each_ret_instance_rcu(pos, head) \
1986 	for (pos = rcu_dereference_raw(head); pos; pos = rcu_dereference_raw(pos->next))
1987 
1988 static void ri_timer(struct timer_list *timer)
1989 {
1990 	struct uprobe_task *utask = container_of(timer, struct uprobe_task, ri_timer);
1991 	struct return_instance *ri;
1992 
1993 	/* SRCU protects uprobe from reuse for the cmpxchg() inside hprobe_expire(). */
1994 	guard(srcu)(&uretprobes_srcu);
1995 	/* RCU protects return_instance from freeing. */
1996 	guard(rcu)();
1997 
1998 	write_seqcount_begin(&utask->ri_seqcount);
1999 
2000 	for_each_ret_instance_rcu(ri, utask->return_instances)
2001 		hprobe_expire(&ri->hprobe, false);
2002 
2003 	write_seqcount_end(&utask->ri_seqcount);
2004 }
2005 
2006 static struct uprobe_task *alloc_utask(void)
2007 {
2008 	struct uprobe_task *utask;
2009 
2010 	utask = kzalloc(sizeof(*utask), GFP_KERNEL);
2011 	if (!utask)
2012 		return NULL;
2013 
2014 	timer_setup(&utask->ri_timer, ri_timer, 0);
2015 	seqcount_init(&utask->ri_seqcount);
2016 
2017 	return utask;
2018 }
2019 
2020 /*
2021  * Allocate a uprobe_task object for the task if necessary.
2022  * Called when the thread hits a breakpoint.
2023  *
2024  * Returns:
2025  * - pointer to new uprobe_task on success
2026  * - NULL otherwise
2027  */
2028 static struct uprobe_task *get_utask(void)
2029 {
2030 	if (!current->utask)
2031 		current->utask = alloc_utask();
2032 	return current->utask;
2033 }
2034 
2035 static struct return_instance *alloc_return_instance(struct uprobe_task *utask)
2036 {
2037 	struct return_instance *ri;
2038 
2039 	ri = ri_pool_pop(utask);
2040 	if (ri)
2041 		return ri;
2042 
2043 	ri = kzalloc(sizeof(*ri), GFP_KERNEL);
2044 	if (!ri)
2045 		return ZERO_SIZE_PTR;
2046 
2047 	return ri;
2048 }
2049 
2050 static struct return_instance *dup_return_instance(struct return_instance *old)
2051 {
2052 	struct return_instance *ri;
2053 
2054 	ri = kmemdup(old, sizeof(*ri), GFP_KERNEL);
2055 	if (!ri)
2056 		return NULL;
2057 
2058 	if (unlikely(old->cons_cnt > 1)) {
2059 		ri->extra_consumers = kmemdup(old->extra_consumers,
2060 					      sizeof(ri->extra_consumers[0]) * (old->cons_cnt - 1),
2061 					      GFP_KERNEL);
2062 		if (!ri->extra_consumers) {
2063 			kfree(ri);
2064 			return NULL;
2065 		}
2066 	}
2067 
2068 	return ri;
2069 }
2070 
2071 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
2072 {
2073 	struct uprobe_task *n_utask;
2074 	struct return_instance **p, *o, *n;
2075 	struct uprobe *uprobe;
2076 
2077 	n_utask = alloc_utask();
2078 	if (!n_utask)
2079 		return -ENOMEM;
2080 	t->utask = n_utask;
2081 
2082 	/* protect uprobes from freeing, we'll need try_get_uprobe() them */
2083 	guard(srcu)(&uretprobes_srcu);
2084 
2085 	p = &n_utask->return_instances;
2086 	for (o = o_utask->return_instances; o; o = o->next) {
2087 		n = dup_return_instance(o);
2088 		if (!n)
2089 			return -ENOMEM;
2090 
2091 		/* if uprobe is non-NULL, we'll have an extra refcount for uprobe */
2092 		uprobe = hprobe_expire(&o->hprobe, true);
2093 
2094 		/*
2095 		 * New utask will have stable properly refcounted uprobe or
2096 		 * NULL. Even if we failed to get refcounted uprobe, we still
2097 		 * need to preserve full set of return_instances for proper
2098 		 * uretprobe handling and nesting in forked task.
2099 		 */
2100 		hprobe_init_stable(&n->hprobe, uprobe);
2101 
2102 		n->next = NULL;
2103 		rcu_assign_pointer(*p, n);
2104 		p = &n->next;
2105 
2106 		n_utask->depth++;
2107 	}
2108 
2109 	return 0;
2110 }
2111 
2112 static void dup_xol_work(struct callback_head *work)
2113 {
2114 	if (current->flags & PF_EXITING)
2115 		return;
2116 
2117 	if (!__create_xol_area(current->utask->dup_xol_addr) &&
2118 			!fatal_signal_pending(current))
2119 		uprobe_warn(current, "dup xol area");
2120 }
2121 
2122 /*
2123  * Called in context of a new clone/fork from copy_process.
2124  */
2125 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
2126 {
2127 	struct uprobe_task *utask = current->utask;
2128 	struct mm_struct *mm = current->mm;
2129 	struct xol_area *area;
2130 
2131 	t->utask = NULL;
2132 
2133 	if (!utask || !utask->return_instances)
2134 		return;
2135 
2136 	if (mm == t->mm && !(flags & CLONE_VFORK))
2137 		return;
2138 
2139 	if (dup_utask(t, utask))
2140 		return uprobe_warn(t, "dup ret instances");
2141 
2142 	/* The task can fork() after dup_xol_work() fails */
2143 	area = mm->uprobes_state.xol_area;
2144 	if (!area)
2145 		return uprobe_warn(t, "dup xol area");
2146 
2147 	if (mm == t->mm)
2148 		return;
2149 
2150 	t->utask->dup_xol_addr = area->vaddr;
2151 	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
2152 	task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
2153 }
2154 
2155 /*
2156  * Current area->vaddr notion assume the trampoline address is always
2157  * equal area->vaddr.
2158  *
2159  * Returns -1 in case the xol_area is not allocated.
2160  */
2161 unsigned long uprobe_get_trampoline_vaddr(void)
2162 {
2163 	struct xol_area *area;
2164 	unsigned long trampoline_vaddr = -1;
2165 
2166 	/* Pairs with xol_add_vma() smp_store_release() */
2167 	area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
2168 	if (area)
2169 		trampoline_vaddr = area->vaddr;
2170 
2171 	return trampoline_vaddr;
2172 }
2173 
2174 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
2175 					struct pt_regs *regs)
2176 {
2177 	struct return_instance *ri = utask->return_instances, *ri_next;
2178 	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
2179 
2180 	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
2181 		ri_next = ri->next;
2182 		rcu_assign_pointer(utask->return_instances, ri_next);
2183 		utask->depth--;
2184 
2185 		free_ret_instance(utask, ri, true /* cleanup_hprobe */);
2186 		ri = ri_next;
2187 	}
2188 }
2189 
2190 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
2191 			      struct return_instance *ri)
2192 {
2193 	struct uprobe_task *utask = current->utask;
2194 	unsigned long orig_ret_vaddr, trampoline_vaddr;
2195 	bool chained;
2196 	int srcu_idx;
2197 
2198 	if (!get_xol_area())
2199 		goto free;
2200 
2201 	if (utask->depth >= MAX_URETPROBE_DEPTH) {
2202 		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
2203 				" nestedness limit pid/tgid=%d/%d\n",
2204 				current->pid, current->tgid);
2205 		goto free;
2206 	}
2207 
2208 	trampoline_vaddr = uprobe_get_trampoline_vaddr();
2209 	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
2210 	if (orig_ret_vaddr == -1)
2211 		goto free;
2212 
2213 	/* drop the entries invalidated by longjmp() */
2214 	chained = (orig_ret_vaddr == trampoline_vaddr);
2215 	cleanup_return_instances(utask, chained, regs);
2216 
2217 	/*
2218 	 * We don't want to keep trampoline address in stack, rather keep the
2219 	 * original return address of first caller thru all the consequent
2220 	 * instances. This also makes breakpoint unwrapping easier.
2221 	 */
2222 	if (chained) {
2223 		if (!utask->return_instances) {
2224 			/*
2225 			 * This situation is not possible. Likely we have an
2226 			 * attack from user-space.
2227 			 */
2228 			uprobe_warn(current, "handle tail call");
2229 			goto free;
2230 		}
2231 		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
2232 	}
2233 
2234 	/* __srcu_read_lock() because SRCU lock survives switch to user space */
2235 	srcu_idx = __srcu_read_lock(&uretprobes_srcu);
2236 
2237 	ri->func = instruction_pointer(regs);
2238 	ri->stack = user_stack_pointer(regs);
2239 	ri->orig_ret_vaddr = orig_ret_vaddr;
2240 	ri->chained = chained;
2241 
2242 	utask->depth++;
2243 
2244 	hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx);
2245 	ri->next = utask->return_instances;
2246 	rcu_assign_pointer(utask->return_instances, ri);
2247 
2248 	mod_timer(&utask->ri_timer, jiffies + RI_TIMER_PERIOD);
2249 
2250 	return;
2251 free:
2252 	ri_free(ri);
2253 }
2254 
2255 /* Prepare to single-step probed instruction out of line. */
2256 static int
2257 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
2258 {
2259 	struct uprobe_task *utask = current->utask;
2260 	int err;
2261 
2262 	if (!try_get_uprobe(uprobe))
2263 		return -EINVAL;
2264 
2265 	if (!xol_get_insn_slot(uprobe, utask)) {
2266 		err = -ENOMEM;
2267 		goto err_out;
2268 	}
2269 
2270 	utask->vaddr = bp_vaddr;
2271 	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
2272 	if (unlikely(err)) {
2273 		xol_free_insn_slot(utask);
2274 		goto err_out;
2275 	}
2276 
2277 	utask->active_uprobe = uprobe;
2278 	utask->state = UTASK_SSTEP;
2279 	return 0;
2280 err_out:
2281 	put_uprobe(uprobe);
2282 	return err;
2283 }
2284 
2285 /*
2286  * If we are singlestepping, then ensure this thread is not connected to
2287  * non-fatal signals until completion of singlestep.  When xol insn itself
2288  * triggers the signal,  restart the original insn even if the task is
2289  * already SIGKILL'ed (since coredump should report the correct ip).  This
2290  * is even more important if the task has a handler for SIGSEGV/etc, The
2291  * _same_ instruction should be repeated again after return from the signal
2292  * handler, and SSTEP can never finish in this case.
2293  */
2294 bool uprobe_deny_signal(void)
2295 {
2296 	struct task_struct *t = current;
2297 	struct uprobe_task *utask = t->utask;
2298 
2299 	if (likely(!utask || !utask->active_uprobe))
2300 		return false;
2301 
2302 	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
2303 
2304 	if (task_sigpending(t)) {
2305 		spin_lock_irq(&t->sighand->siglock);
2306 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
2307 		spin_unlock_irq(&t->sighand->siglock);
2308 
2309 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
2310 			utask->state = UTASK_SSTEP_TRAPPED;
2311 			set_tsk_thread_flag(t, TIF_UPROBE);
2312 		}
2313 	}
2314 
2315 	return true;
2316 }
2317 
2318 static void mmf_recalc_uprobes(struct mm_struct *mm)
2319 {
2320 	VMA_ITERATOR(vmi, mm, 0);
2321 	struct vm_area_struct *vma;
2322 
2323 	for_each_vma(vmi, vma) {
2324 		if (!valid_vma(vma, false))
2325 			continue;
2326 		/*
2327 		 * This is not strictly accurate, we can race with
2328 		 * uprobe_unregister() and see the already removed
2329 		 * uprobe if delete_uprobe() was not yet called.
2330 		 * Or this uprobe can be filtered out.
2331 		 */
2332 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2333 			return;
2334 	}
2335 
2336 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
2337 }
2338 
2339 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2340 {
2341 	struct page *page;
2342 	uprobe_opcode_t opcode;
2343 	int result;
2344 
2345 	if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
2346 		return -EINVAL;
2347 
2348 	pagefault_disable();
2349 	result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2350 	pagefault_enable();
2351 
2352 	if (likely(result == 0))
2353 		goto out;
2354 
2355 	result = get_user_pages(vaddr, 1, FOLL_FORCE, &page);
2356 	if (result < 0)
2357 		return result;
2358 
2359 	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2360 	put_page(page);
2361  out:
2362 	/* This needs to return true for any variant of the trap insn */
2363 	return is_trap_insn(&opcode);
2364 }
2365 
2366 static struct uprobe *find_active_uprobe_speculative(unsigned long bp_vaddr)
2367 {
2368 	struct mm_struct *mm = current->mm;
2369 	struct uprobe *uprobe = NULL;
2370 	struct vm_area_struct *vma;
2371 	struct file *vm_file;
2372 	loff_t offset;
2373 	unsigned int seq;
2374 
2375 	guard(rcu)();
2376 
2377 	if (!mmap_lock_speculate_try_begin(mm, &seq))
2378 		return NULL;
2379 
2380 	vma = vma_lookup(mm, bp_vaddr);
2381 	if (!vma)
2382 		return NULL;
2383 
2384 	/*
2385 	 * vm_file memory can be reused for another instance of struct file,
2386 	 * but can't be freed from under us, so it's safe to read fields from
2387 	 * it, even if the values are some garbage values; ultimately
2388 	 * find_uprobe_rcu() + mmap_lock_speculation_end() check will ensure
2389 	 * that whatever we speculatively found is correct
2390 	 */
2391 	vm_file = READ_ONCE(vma->vm_file);
2392 	if (!vm_file)
2393 		return NULL;
2394 
2395 	offset = (loff_t)(vma->vm_pgoff << PAGE_SHIFT) + (bp_vaddr - vma->vm_start);
2396 	uprobe = find_uprobe_rcu(vm_file->f_inode, offset);
2397 	if (!uprobe)
2398 		return NULL;
2399 
2400 	/* now double check that nothing about MM changed */
2401 	if (mmap_lock_speculate_retry(mm, seq))
2402 		return NULL;
2403 
2404 	return uprobe;
2405 }
2406 
2407 /* assumes being inside RCU protected region */
2408 static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp)
2409 {
2410 	struct mm_struct *mm = current->mm;
2411 	struct uprobe *uprobe = NULL;
2412 	struct vm_area_struct *vma;
2413 
2414 	uprobe = find_active_uprobe_speculative(bp_vaddr);
2415 	if (uprobe)
2416 		return uprobe;
2417 
2418 	mmap_read_lock(mm);
2419 	vma = vma_lookup(mm, bp_vaddr);
2420 	if (vma) {
2421 		if (vma->vm_file) {
2422 			struct inode *inode = file_inode(vma->vm_file);
2423 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
2424 
2425 			uprobe = find_uprobe_rcu(inode, offset);
2426 		}
2427 
2428 		if (!uprobe)
2429 			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
2430 	} else {
2431 		*is_swbp = -EFAULT;
2432 	}
2433 
2434 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2435 		mmf_recalc_uprobes(mm);
2436 	mmap_read_unlock(mm);
2437 
2438 	return uprobe;
2439 }
2440 
2441 static struct return_instance *push_consumer(struct return_instance *ri, __u64 id, __u64 cookie)
2442 {
2443 	struct return_consumer *ric;
2444 
2445 	if (unlikely(ri == ZERO_SIZE_PTR))
2446 		return ri;
2447 
2448 	if (unlikely(ri->cons_cnt > 0)) {
2449 		ric = krealloc(ri->extra_consumers, sizeof(*ric) * ri->cons_cnt, GFP_KERNEL);
2450 		if (!ric) {
2451 			ri_free(ri);
2452 			return ZERO_SIZE_PTR;
2453 		}
2454 		ri->extra_consumers = ric;
2455 	}
2456 
2457 	ric = likely(ri->cons_cnt == 0) ? &ri->consumer : &ri->extra_consumers[ri->cons_cnt - 1];
2458 	ric->id = id;
2459 	ric->cookie = cookie;
2460 
2461 	ri->cons_cnt++;
2462 	return ri;
2463 }
2464 
2465 static struct return_consumer *
2466 return_consumer_find(struct return_instance *ri, int *iter, int id)
2467 {
2468 	struct return_consumer *ric;
2469 	int idx;
2470 
2471 	for (idx = *iter; idx < ri->cons_cnt; idx++)
2472 	{
2473 		ric = likely(idx == 0) ? &ri->consumer : &ri->extra_consumers[idx - 1];
2474 		if (ric->id == id) {
2475 			*iter = idx + 1;
2476 			return ric;
2477 		}
2478 	}
2479 
2480 	return NULL;
2481 }
2482 
2483 static bool ignore_ret_handler(int rc)
2484 {
2485 	return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE;
2486 }
2487 
2488 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2489 {
2490 	struct uprobe_consumer *uc;
2491 	bool has_consumers = false, remove = true;
2492 	struct return_instance *ri = NULL;
2493 	struct uprobe_task *utask = current->utask;
2494 
2495 	utask->auprobe = &uprobe->arch;
2496 
2497 	list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
2498 		bool session = uc->handler && uc->ret_handler;
2499 		__u64 cookie = 0;
2500 		int rc = 0;
2501 
2502 		if (uc->handler) {
2503 			rc = uc->handler(uc, regs, &cookie);
2504 			WARN(rc < 0 || rc > 2,
2505 				"bad rc=0x%x from %ps()\n", rc, uc->handler);
2506 		}
2507 
2508 		remove &= rc == UPROBE_HANDLER_REMOVE;
2509 		has_consumers = true;
2510 
2511 		if (!uc->ret_handler || ignore_ret_handler(rc))
2512 			continue;
2513 
2514 		if (!ri)
2515 			ri = alloc_return_instance(utask);
2516 
2517 		if (session)
2518 			ri = push_consumer(ri, uc->id, cookie);
2519 	}
2520 	utask->auprobe = NULL;
2521 
2522 	if (!ZERO_OR_NULL_PTR(ri))
2523 		prepare_uretprobe(uprobe, regs, ri);
2524 
2525 	if (remove && has_consumers) {
2526 		down_read(&uprobe->register_rwsem);
2527 
2528 		/* re-check that removal is still required, this time under lock */
2529 		if (!filter_chain(uprobe, current->mm)) {
2530 			WARN_ON(!uprobe_is_active(uprobe));
2531 			unapply_uprobe(uprobe, current->mm);
2532 		}
2533 
2534 		up_read(&uprobe->register_rwsem);
2535 	}
2536 }
2537 
2538 static void
2539 handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs)
2540 {
2541 	struct return_consumer *ric;
2542 	struct uprobe_consumer *uc;
2543 	int ric_idx = 0;
2544 
2545 	/* all consumers unsubscribed meanwhile */
2546 	if (unlikely(!uprobe))
2547 		return;
2548 
2549 	rcu_read_lock_trace();
2550 	list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
2551 		bool session = uc->handler && uc->ret_handler;
2552 
2553 		if (uc->ret_handler) {
2554 			ric = return_consumer_find(ri, &ric_idx, uc->id);
2555 			if (!session || ric)
2556 				uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
2557 		}
2558 	}
2559 	rcu_read_unlock_trace();
2560 }
2561 
2562 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2563 {
2564 	bool chained;
2565 
2566 	do {
2567 		chained = ri->chained;
2568 		ri = ri->next;	/* can't be NULL if chained */
2569 	} while (chained);
2570 
2571 	return ri;
2572 }
2573 
2574 void uprobe_handle_trampoline(struct pt_regs *regs)
2575 {
2576 	struct uprobe_task *utask;
2577 	struct return_instance *ri, *ri_next, *next_chain;
2578 	struct uprobe *uprobe;
2579 	enum hprobe_state hstate;
2580 	bool valid;
2581 
2582 	utask = current->utask;
2583 	if (!utask)
2584 		goto sigill;
2585 
2586 	ri = utask->return_instances;
2587 	if (!ri)
2588 		goto sigill;
2589 
2590 	do {
2591 		/*
2592 		 * We should throw out the frames invalidated by longjmp().
2593 		 * If this chain is valid, then the next one should be alive
2594 		 * or NULL; the latter case means that nobody but ri->func
2595 		 * could hit this trampoline on return. TODO: sigaltstack().
2596 		 */
2597 		next_chain = find_next_ret_chain(ri);
2598 		valid = !next_chain || arch_uretprobe_is_alive(next_chain, RP_CHECK_RET, regs);
2599 
2600 		instruction_pointer_set(regs, ri->orig_ret_vaddr);
2601 		do {
2602 			/* pop current instance from the stack of pending return instances,
2603 			 * as it's not pending anymore: we just fixed up original
2604 			 * instruction pointer in regs and are about to call handlers;
2605 			 * this allows fixup_uretprobe_trampoline_entries() to properly fix up
2606 			 * captured stack traces from uretprobe handlers, in which pending
2607 			 * trampoline addresses on the stack are replaced with correct
2608 			 * original return addresses
2609 			 */
2610 			ri_next = ri->next;
2611 			rcu_assign_pointer(utask->return_instances, ri_next);
2612 			utask->depth--;
2613 
2614 			uprobe = hprobe_consume(&ri->hprobe, &hstate);
2615 			if (valid)
2616 				handle_uretprobe_chain(ri, uprobe, regs);
2617 			hprobe_finalize(&ri->hprobe, hstate);
2618 
2619 			/* We already took care of hprobe, no need to waste more time on that. */
2620 			free_ret_instance(utask, ri, false /* !cleanup_hprobe */);
2621 			ri = ri_next;
2622 		} while (ri != next_chain);
2623 	} while (!valid);
2624 
2625 	return;
2626 
2627 sigill:
2628 	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
2629 	force_sig(SIGILL);
2630 }
2631 
2632 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2633 {
2634 	return false;
2635 }
2636 
2637 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2638 					struct pt_regs *regs)
2639 {
2640 	return true;
2641 }
2642 
2643 /*
2644  * Run handler and ask thread to singlestep.
2645  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2646  */
2647 static void handle_swbp(struct pt_regs *regs)
2648 {
2649 	struct uprobe *uprobe;
2650 	unsigned long bp_vaddr;
2651 	int is_swbp;
2652 
2653 	bp_vaddr = uprobe_get_swbp_addr(regs);
2654 	if (bp_vaddr == uprobe_get_trampoline_vaddr())
2655 		return uprobe_handle_trampoline(regs);
2656 
2657 	rcu_read_lock_trace();
2658 
2659 	uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
2660 	if (!uprobe) {
2661 		if (is_swbp > 0) {
2662 			/* No matching uprobe; signal SIGTRAP. */
2663 			force_sig(SIGTRAP);
2664 		} else {
2665 			/*
2666 			 * Either we raced with uprobe_unregister() or we can't
2667 			 * access this memory. The latter is only possible if
2668 			 * another thread plays with our ->mm. In both cases
2669 			 * we can simply restart. If this vma was unmapped we
2670 			 * can pretend this insn was not executed yet and get
2671 			 * the (correct) SIGSEGV after restart.
2672 			 */
2673 			instruction_pointer_set(regs, bp_vaddr);
2674 		}
2675 		goto out;
2676 	}
2677 
2678 	/* change it in advance for ->handler() and restart */
2679 	instruction_pointer_set(regs, bp_vaddr);
2680 
2681 	/*
2682 	 * TODO: move copy_insn/etc into _register and remove this hack.
2683 	 * After we hit the bp, _unregister + _register can install the
2684 	 * new and not-yet-analyzed uprobe at the same address, restart.
2685 	 */
2686 	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2687 		goto out;
2688 
2689 	/*
2690 	 * Pairs with the smp_wmb() in prepare_uprobe().
2691 	 *
2692 	 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2693 	 * we must also see the stores to &uprobe->arch performed by the
2694 	 * prepare_uprobe() call.
2695 	 */
2696 	smp_rmb();
2697 
2698 	/* Tracing handlers use ->utask to communicate with fetch methods */
2699 	if (!get_utask())
2700 		goto out;
2701 
2702 	if (arch_uprobe_ignore(&uprobe->arch, regs))
2703 		goto out;
2704 
2705 	handler_chain(uprobe, regs);
2706 
2707 	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
2708 		goto out;
2709 
2710 	if (pre_ssout(uprobe, regs, bp_vaddr))
2711 		goto out;
2712 
2713 out:
2714 	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2715 	rcu_read_unlock_trace();
2716 }
2717 
2718 /*
2719  * Perform required fix-ups and disable singlestep.
2720  * Allow pending signals to take effect.
2721  */
2722 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2723 {
2724 	struct uprobe *uprobe;
2725 	int err = 0;
2726 
2727 	uprobe = utask->active_uprobe;
2728 	if (utask->state == UTASK_SSTEP_ACK)
2729 		err = arch_uprobe_post_xol(&uprobe->arch, regs);
2730 	else if (utask->state == UTASK_SSTEP_TRAPPED)
2731 		arch_uprobe_abort_xol(&uprobe->arch, regs);
2732 	else
2733 		WARN_ON_ONCE(1);
2734 
2735 	put_uprobe(uprobe);
2736 	utask->active_uprobe = NULL;
2737 	utask->state = UTASK_RUNNING;
2738 	xol_free_insn_slot(utask);
2739 
2740 	spin_lock_irq(&current->sighand->siglock);
2741 	recalc_sigpending(); /* see uprobe_deny_signal() */
2742 	spin_unlock_irq(&current->sighand->siglock);
2743 
2744 	if (unlikely(err)) {
2745 		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
2746 		force_sig(SIGILL);
2747 	}
2748 }
2749 
2750 /*
2751  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2752  * allows the thread to return from interrupt. After that handle_swbp()
2753  * sets utask->active_uprobe.
2754  *
2755  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2756  * and allows the thread to return from interrupt.
2757  *
2758  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2759  * uprobe_notify_resume().
2760  */
2761 void uprobe_notify_resume(struct pt_regs *regs)
2762 {
2763 	struct uprobe_task *utask;
2764 
2765 	clear_thread_flag(TIF_UPROBE);
2766 
2767 	utask = current->utask;
2768 	if (utask && utask->active_uprobe)
2769 		handle_singlestep(utask, regs);
2770 	else
2771 		handle_swbp(regs);
2772 }
2773 
2774 /*
2775  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2776  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2777  */
2778 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2779 {
2780 	if (!current->mm)
2781 		return 0;
2782 
2783 	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2784 	    (!current->utask || !current->utask->return_instances))
2785 		return 0;
2786 
2787 	set_thread_flag(TIF_UPROBE);
2788 	return 1;
2789 }
2790 
2791 /*
2792  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2793  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2794  */
2795 int uprobe_post_sstep_notifier(struct pt_regs *regs)
2796 {
2797 	struct uprobe_task *utask = current->utask;
2798 
2799 	if (!current->mm || !utask || !utask->active_uprobe)
2800 		/* task is currently not uprobed */
2801 		return 0;
2802 
2803 	utask->state = UTASK_SSTEP_ACK;
2804 	set_thread_flag(TIF_UPROBE);
2805 	return 1;
2806 }
2807 
2808 static struct notifier_block uprobe_exception_nb = {
2809 	.notifier_call		= arch_uprobe_exception_notify,
2810 	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
2811 };
2812 
2813 void __init uprobes_init(void)
2814 {
2815 	int i;
2816 
2817 	for (i = 0; i < UPROBES_HASH_SZ; i++)
2818 		mutex_init(&uprobes_mmap_mutex[i]);
2819 
2820 	BUG_ON(register_die_notifier(&uprobe_exception_nb));
2821 }
2822