1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * User-space Probes (UProbes)
4 *
5 * Copyright (C) IBM Corporation, 2008-2012
6 * Authors:
7 * Srikar Dronamraju
8 * Jim Keniston
9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h> /* read_mapping_page */
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/export.h>
19 #include <linux/rmap.h> /* anon_vma_prepare */
20 #include <linux/mmu_notifier.h>
21 #include <linux/swap.h> /* folio_free_swap */
22 #include <linux/ptrace.h> /* user_enable_single_step */
23 #include <linux/kdebug.h> /* notifier mechanism */
24 #include <linux/percpu-rwsem.h>
25 #include <linux/task_work.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/khugepaged.h>
28 #include <linux/rcupdate_trace.h>
29 #include <linux/workqueue.h>
30 #include <linux/srcu.h>
31 #include <linux/oom.h> /* check_stable_address_space */
32
33 #include <linux/uprobes.h>
34
35 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
36 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
37
38 static struct rb_root uprobes_tree = RB_ROOT;
39 /*
40 * allows us to skip the uprobe_mmap if there are no uprobe events active
41 * at this time. Probably a fine grained per inode count is better?
42 */
43 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
44
45 static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */
46 static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock);
47
48 #define UPROBES_HASH_SZ 13
49 /* serialize uprobe->pending_list */
50 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
51 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
52
53 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
54
55 /* Covers return_instance's uprobe lifetime. */
56 DEFINE_STATIC_SRCU(uretprobes_srcu);
57
58 /* Have a copy of original instruction */
59 #define UPROBE_COPY_INSN 0
60
61 struct uprobe {
62 struct rb_node rb_node; /* node in the rb tree */
63 refcount_t ref;
64 struct rw_semaphore register_rwsem;
65 struct rw_semaphore consumer_rwsem;
66 struct list_head pending_list;
67 struct list_head consumers;
68 struct inode *inode; /* Also hold a ref to inode */
69 union {
70 struct rcu_head rcu;
71 struct work_struct work;
72 };
73 loff_t offset;
74 loff_t ref_ctr_offset;
75 unsigned long flags; /* "unsigned long" so bitops work */
76
77 /*
78 * The generic code assumes that it has two members of unknown type
79 * owned by the arch-specific code:
80 *
81 * insn - copy_insn() saves the original instruction here for
82 * arch_uprobe_analyze_insn().
83 *
84 * ixol - potentially modified instruction to execute out of
85 * line, copied to xol_area by xol_get_insn_slot().
86 */
87 struct arch_uprobe arch;
88 };
89
90 struct delayed_uprobe {
91 struct list_head list;
92 struct uprobe *uprobe;
93 struct mm_struct *mm;
94 };
95
96 static DEFINE_MUTEX(delayed_uprobe_lock);
97 static LIST_HEAD(delayed_uprobe_list);
98
99 /*
100 * Execute out of line area: anonymous executable mapping installed
101 * by the probed task to execute the copy of the original instruction
102 * mangled by set_swbp().
103 *
104 * On a breakpoint hit, thread contests for a slot. It frees the
105 * slot after singlestep. Currently a fixed number of slots are
106 * allocated.
107 */
108 struct xol_area {
109 wait_queue_head_t wq; /* if all slots are busy */
110 unsigned long *bitmap; /* 0 = free slot */
111
112 struct page *page;
113 /*
114 * We keep the vma's vm_start rather than a pointer to the vma
115 * itself. The probed process or a naughty kernel module could make
116 * the vma go away, and we must handle that reasonably gracefully.
117 */
118 unsigned long vaddr; /* Page(s) of instruction slots */
119 };
120
uprobe_warn(struct task_struct * t,const char * msg)121 static void uprobe_warn(struct task_struct *t, const char *msg)
122 {
123 pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg);
124 }
125
126 /*
127 * valid_vma: Verify if the specified vma is an executable vma
128 * Relax restrictions while unregistering: vm_flags might have
129 * changed after breakpoint was inserted.
130 * - is_register: indicates if we are in register context.
131 * - Return 1 if the specified virtual address is in an
132 * executable vma.
133 */
valid_vma(struct vm_area_struct * vma,bool is_register)134 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
135 {
136 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
137
138 if (is_register)
139 flags |= VM_WRITE;
140
141 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
142 }
143
offset_to_vaddr(struct vm_area_struct * vma,loff_t offset)144 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
145 {
146 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
147 }
148
vaddr_to_offset(struct vm_area_struct * vma,unsigned long vaddr)149 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
150 {
151 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
152 }
153
154 /**
155 * __replace_page - replace page in vma by new page.
156 * based on replace_page in mm/ksm.c
157 *
158 * @vma: vma that holds the pte pointing to page
159 * @addr: address the old @page is mapped at
160 * @old_page: the page we are replacing by new_page
161 * @new_page: the modified page we replace page by
162 *
163 * If @new_page is NULL, only unmap @old_page.
164 *
165 * Returns 0 on success, negative error code otherwise.
166 */
__replace_page(struct vm_area_struct * vma,unsigned long addr,struct page * old_page,struct page * new_page)167 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
168 struct page *old_page, struct page *new_page)
169 {
170 struct folio *old_folio = page_folio(old_page);
171 struct folio *new_folio;
172 struct mm_struct *mm = vma->vm_mm;
173 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
174 int err;
175 struct mmu_notifier_range range;
176 pte_t pte;
177
178 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
179 addr + PAGE_SIZE);
180
181 if (new_page) {
182 new_folio = page_folio(new_page);
183 err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
184 if (err)
185 return err;
186 }
187
188 /* For folio_free_swap() below */
189 folio_lock(old_folio);
190
191 mmu_notifier_invalidate_range_start(&range);
192 err = -EAGAIN;
193 if (!page_vma_mapped_walk(&pvmw))
194 goto unlock;
195 VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
196 pte = ptep_get(pvmw.pte);
197
198 /*
199 * Handle PFN swap PTES, such as device-exclusive ones, that actually
200 * map pages: simply trigger GUP again to fix it up.
201 */
202 if (unlikely(!pte_present(pte))) {
203 page_vma_mapped_walk_done(&pvmw);
204 goto unlock;
205 }
206
207 if (new_page) {
208 folio_get(new_folio);
209 folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE);
210 folio_add_lru_vma(new_folio, vma);
211 } else
212 /* no new page, just dec_mm_counter for old_page */
213 dec_mm_counter(mm, MM_ANONPAGES);
214
215 if (!folio_test_anon(old_folio)) {
216 dec_mm_counter(mm, mm_counter_file(old_folio));
217 inc_mm_counter(mm, MM_ANONPAGES);
218 }
219
220 flush_cache_page(vma, addr, pte_pfn(pte));
221 ptep_clear_flush(vma, addr, pvmw.pte);
222 if (new_page)
223 set_pte_at(mm, addr, pvmw.pte,
224 mk_pte(new_page, vma->vm_page_prot));
225
226 folio_remove_rmap_pte(old_folio, old_page, vma);
227 if (!folio_mapped(old_folio))
228 folio_free_swap(old_folio);
229 page_vma_mapped_walk_done(&pvmw);
230 folio_put(old_folio);
231
232 err = 0;
233 unlock:
234 mmu_notifier_invalidate_range_end(&range);
235 folio_unlock(old_folio);
236 return err;
237 }
238
239 /**
240 * is_swbp_insn - check if instruction is breakpoint instruction.
241 * @insn: instruction to be checked.
242 * Default implementation of is_swbp_insn
243 * Returns true if @insn is a breakpoint instruction.
244 */
is_swbp_insn(uprobe_opcode_t * insn)245 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
246 {
247 return *insn == UPROBE_SWBP_INSN;
248 }
249
250 /**
251 * is_trap_insn - check if instruction is breakpoint instruction.
252 * @insn: instruction to be checked.
253 * Default implementation of is_trap_insn
254 * Returns true if @insn is a breakpoint instruction.
255 *
256 * This function is needed for the case where an architecture has multiple
257 * trap instructions (like powerpc).
258 */
is_trap_insn(uprobe_opcode_t * insn)259 bool __weak is_trap_insn(uprobe_opcode_t *insn)
260 {
261 return is_swbp_insn(insn);
262 }
263
copy_from_page(struct page * page,unsigned long vaddr,void * dst,int len)264 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
265 {
266 void *kaddr = kmap_atomic(page);
267 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
268 kunmap_atomic(kaddr);
269 }
270
copy_to_page(struct page * page,unsigned long vaddr,const void * src,int len)271 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
272 {
273 void *kaddr = kmap_atomic(page);
274 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
275 kunmap_atomic(kaddr);
276 }
277
verify_opcode(struct page * page,unsigned long vaddr,uprobe_opcode_t * new_opcode)278 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
279 {
280 uprobe_opcode_t old_opcode;
281 bool is_swbp;
282
283 /*
284 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
285 * We do not check if it is any other 'trap variant' which could
286 * be conditional trap instruction such as the one powerpc supports.
287 *
288 * The logic is that we do not care if the underlying instruction
289 * is a trap variant; uprobes always wins over any other (gdb)
290 * breakpoint.
291 */
292 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
293 is_swbp = is_swbp_insn(&old_opcode);
294
295 if (is_swbp_insn(new_opcode)) {
296 if (is_swbp) /* register: already installed? */
297 return 0;
298 } else {
299 if (!is_swbp) /* unregister: was it changed by us? */
300 return 0;
301 }
302
303 return 1;
304 }
305
306 static struct delayed_uprobe *
delayed_uprobe_check(struct uprobe * uprobe,struct mm_struct * mm)307 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
308 {
309 struct delayed_uprobe *du;
310
311 list_for_each_entry(du, &delayed_uprobe_list, list)
312 if (du->uprobe == uprobe && du->mm == mm)
313 return du;
314 return NULL;
315 }
316
delayed_uprobe_add(struct uprobe * uprobe,struct mm_struct * mm)317 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
318 {
319 struct delayed_uprobe *du;
320
321 if (delayed_uprobe_check(uprobe, mm))
322 return 0;
323
324 du = kzalloc(sizeof(*du), GFP_KERNEL);
325 if (!du)
326 return -ENOMEM;
327
328 du->uprobe = uprobe;
329 du->mm = mm;
330 list_add(&du->list, &delayed_uprobe_list);
331 return 0;
332 }
333
delayed_uprobe_delete(struct delayed_uprobe * du)334 static void delayed_uprobe_delete(struct delayed_uprobe *du)
335 {
336 if (WARN_ON(!du))
337 return;
338 list_del(&du->list);
339 kfree(du);
340 }
341
delayed_uprobe_remove(struct uprobe * uprobe,struct mm_struct * mm)342 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
343 {
344 struct list_head *pos, *q;
345 struct delayed_uprobe *du;
346
347 if (!uprobe && !mm)
348 return;
349
350 list_for_each_safe(pos, q, &delayed_uprobe_list) {
351 du = list_entry(pos, struct delayed_uprobe, list);
352
353 if (uprobe && du->uprobe != uprobe)
354 continue;
355 if (mm && du->mm != mm)
356 continue;
357
358 delayed_uprobe_delete(du);
359 }
360 }
361
valid_ref_ctr_vma(struct uprobe * uprobe,struct vm_area_struct * vma)362 static bool valid_ref_ctr_vma(struct uprobe *uprobe,
363 struct vm_area_struct *vma)
364 {
365 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
366
367 return uprobe->ref_ctr_offset &&
368 vma->vm_file &&
369 file_inode(vma->vm_file) == uprobe->inode &&
370 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
371 vma->vm_start <= vaddr &&
372 vma->vm_end > vaddr;
373 }
374
375 static struct vm_area_struct *
find_ref_ctr_vma(struct uprobe * uprobe,struct mm_struct * mm)376 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
377 {
378 VMA_ITERATOR(vmi, mm, 0);
379 struct vm_area_struct *tmp;
380
381 for_each_vma(vmi, tmp)
382 if (valid_ref_ctr_vma(uprobe, tmp))
383 return tmp;
384
385 return NULL;
386 }
387
388 static int
__update_ref_ctr(struct mm_struct * mm,unsigned long vaddr,short d)389 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
390 {
391 void *kaddr;
392 struct page *page;
393 int ret;
394 short *ptr;
395
396 if (!vaddr || !d)
397 return -EINVAL;
398
399 ret = get_user_pages_remote(mm, vaddr, 1,
400 FOLL_WRITE, &page, NULL);
401 if (unlikely(ret <= 0)) {
402 /*
403 * We are asking for 1 page. If get_user_pages_remote() fails,
404 * it may return 0, in that case we have to return error.
405 */
406 return ret == 0 ? -EBUSY : ret;
407 }
408
409 kaddr = kmap_atomic(page);
410 ptr = kaddr + (vaddr & ~PAGE_MASK);
411
412 if (unlikely(*ptr + d < 0)) {
413 pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
414 "curr val: %d, delta: %d\n", vaddr, *ptr, d);
415 ret = -EINVAL;
416 goto out;
417 }
418
419 *ptr += d;
420 ret = 0;
421 out:
422 kunmap_atomic(kaddr);
423 put_page(page);
424 return ret;
425 }
426
update_ref_ctr_warn(struct uprobe * uprobe,struct mm_struct * mm,short d)427 static void update_ref_ctr_warn(struct uprobe *uprobe,
428 struct mm_struct *mm, short d)
429 {
430 pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
431 "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%p\n",
432 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
433 (unsigned long long) uprobe->offset,
434 (unsigned long long) uprobe->ref_ctr_offset, mm);
435 }
436
update_ref_ctr(struct uprobe * uprobe,struct mm_struct * mm,short d)437 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
438 short d)
439 {
440 struct vm_area_struct *rc_vma;
441 unsigned long rc_vaddr;
442 int ret = 0;
443
444 rc_vma = find_ref_ctr_vma(uprobe, mm);
445
446 if (rc_vma) {
447 rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
448 ret = __update_ref_ctr(mm, rc_vaddr, d);
449 if (ret)
450 update_ref_ctr_warn(uprobe, mm, d);
451
452 if (d > 0)
453 return ret;
454 }
455
456 mutex_lock(&delayed_uprobe_lock);
457 if (d > 0)
458 ret = delayed_uprobe_add(uprobe, mm);
459 else
460 delayed_uprobe_remove(uprobe, mm);
461 mutex_unlock(&delayed_uprobe_lock);
462
463 return ret;
464 }
465
466 /*
467 * NOTE:
468 * Expect the breakpoint instruction to be the smallest size instruction for
469 * the architecture. If an arch has variable length instruction and the
470 * breakpoint instruction is not of the smallest length instruction
471 * supported by that architecture then we need to modify is_trap_at_addr and
472 * uprobe_write_opcode accordingly. This would never be a problem for archs
473 * that have fixed length instructions.
474 *
475 * uprobe_write_opcode - write the opcode at a given virtual address.
476 * @auprobe: arch specific probepoint information.
477 * @mm: the probed process address space.
478 * @vaddr: the virtual address to store the opcode.
479 * @opcode: opcode to be written at @vaddr.
480 *
481 * Called with mm->mmap_lock held for read or write.
482 * Return 0 (success) or a negative errno.
483 */
uprobe_write_opcode(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr,uprobe_opcode_t opcode)484 int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
485 unsigned long vaddr, uprobe_opcode_t opcode)
486 {
487 struct uprobe *uprobe;
488 struct page *old_page, *new_page;
489 struct vm_area_struct *vma;
490 int ret, is_register, ref_ctr_updated = 0;
491 bool orig_page_huge = false;
492 unsigned int gup_flags = FOLL_FORCE;
493
494 is_register = is_swbp_insn(&opcode);
495 uprobe = container_of(auprobe, struct uprobe, arch);
496
497 retry:
498 if (is_register)
499 gup_flags |= FOLL_SPLIT_PMD;
500 /* Read the page with vaddr into memory */
501 old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma);
502 if (IS_ERR(old_page))
503 return PTR_ERR(old_page);
504
505 ret = verify_opcode(old_page, vaddr, &opcode);
506 if (ret <= 0)
507 goto put_old;
508
509 if (is_zero_page(old_page)) {
510 ret = -EINVAL;
511 goto put_old;
512 }
513
514 if (WARN(!is_register && PageCompound(old_page),
515 "uprobe unregister should never work on compound page\n")) {
516 ret = -EINVAL;
517 goto put_old;
518 }
519
520 /* We are going to replace instruction, update ref_ctr. */
521 if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
522 ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
523 if (ret)
524 goto put_old;
525
526 ref_ctr_updated = 1;
527 }
528
529 ret = 0;
530 if (!is_register && !PageAnon(old_page))
531 goto put_old;
532
533 ret = anon_vma_prepare(vma);
534 if (ret)
535 goto put_old;
536
537 ret = -ENOMEM;
538 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
539 if (!new_page)
540 goto put_old;
541
542 __SetPageUptodate(new_page);
543 copy_highpage(new_page, old_page);
544 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
545
546 if (!is_register) {
547 struct page *orig_page;
548 pgoff_t index;
549
550 VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
551
552 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
553 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
554 index);
555
556 if (orig_page) {
557 if (PageUptodate(orig_page) &&
558 pages_identical(new_page, orig_page)) {
559 /* let go new_page */
560 put_page(new_page);
561 new_page = NULL;
562
563 if (PageCompound(orig_page))
564 orig_page_huge = true;
565 }
566 put_page(orig_page);
567 }
568 }
569
570 ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
571 if (new_page)
572 put_page(new_page);
573 put_old:
574 put_page(old_page);
575
576 if (unlikely(ret == -EAGAIN))
577 goto retry;
578
579 /* Revert back reference counter if instruction update failed. */
580 if (ret && is_register && ref_ctr_updated)
581 update_ref_ctr(uprobe, mm, -1);
582
583 /* try collapse pmd for compound page */
584 if (!ret && orig_page_huge)
585 collapse_pte_mapped_thp(mm, vaddr, false);
586
587 return ret;
588 }
589
590 /**
591 * set_swbp - store breakpoint at a given address.
592 * @auprobe: arch specific probepoint information.
593 * @mm: the probed process address space.
594 * @vaddr: the virtual address to insert the opcode.
595 *
596 * For mm @mm, store the breakpoint instruction at @vaddr.
597 * Return 0 (success) or a negative errno.
598 */
set_swbp(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)599 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
600 {
601 return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
602 }
603
604 /**
605 * set_orig_insn - Restore the original instruction.
606 * @mm: the probed process address space.
607 * @auprobe: arch specific probepoint information.
608 * @vaddr: the virtual address to insert the opcode.
609 *
610 * For mm @mm, restore the original opcode (opcode) at @vaddr.
611 * Return 0 (success) or a negative errno.
612 */
613 int __weak
set_orig_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)614 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
615 {
616 return uprobe_write_opcode(auprobe, mm, vaddr,
617 *(uprobe_opcode_t *)&auprobe->insn);
618 }
619
620 /* uprobe should have guaranteed positive refcount */
get_uprobe(struct uprobe * uprobe)621 static struct uprobe *get_uprobe(struct uprobe *uprobe)
622 {
623 refcount_inc(&uprobe->ref);
624 return uprobe;
625 }
626
627 /*
628 * uprobe should have guaranteed lifetime, which can be either of:
629 * - caller already has refcount taken (and wants an extra one);
630 * - uprobe is RCU protected and won't be freed until after grace period;
631 * - we are holding uprobes_treelock (for read or write, doesn't matter).
632 */
try_get_uprobe(struct uprobe * uprobe)633 static struct uprobe *try_get_uprobe(struct uprobe *uprobe)
634 {
635 if (refcount_inc_not_zero(&uprobe->ref))
636 return uprobe;
637 return NULL;
638 }
639
uprobe_is_active(struct uprobe * uprobe)640 static inline bool uprobe_is_active(struct uprobe *uprobe)
641 {
642 return !RB_EMPTY_NODE(&uprobe->rb_node);
643 }
644
uprobe_free_rcu_tasks_trace(struct rcu_head * rcu)645 static void uprobe_free_rcu_tasks_trace(struct rcu_head *rcu)
646 {
647 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
648
649 kfree(uprobe);
650 }
651
uprobe_free_srcu(struct rcu_head * rcu)652 static void uprobe_free_srcu(struct rcu_head *rcu)
653 {
654 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
655
656 call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace);
657 }
658
uprobe_free_deferred(struct work_struct * work)659 static void uprobe_free_deferred(struct work_struct *work)
660 {
661 struct uprobe *uprobe = container_of(work, struct uprobe, work);
662
663 write_lock(&uprobes_treelock);
664
665 if (uprobe_is_active(uprobe)) {
666 write_seqcount_begin(&uprobes_seqcount);
667 rb_erase(&uprobe->rb_node, &uprobes_tree);
668 write_seqcount_end(&uprobes_seqcount);
669 }
670
671 write_unlock(&uprobes_treelock);
672
673 /*
674 * If application munmap(exec_vma) before uprobe_unregister()
675 * gets called, we don't get a chance to remove uprobe from
676 * delayed_uprobe_list from remove_breakpoint(). Do it here.
677 */
678 mutex_lock(&delayed_uprobe_lock);
679 delayed_uprobe_remove(uprobe, NULL);
680 mutex_unlock(&delayed_uprobe_lock);
681
682 /* start srcu -> rcu_tasks_trace -> kfree chain */
683 call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu);
684 }
685
put_uprobe(struct uprobe * uprobe)686 static void put_uprobe(struct uprobe *uprobe)
687 {
688 if (!refcount_dec_and_test(&uprobe->ref))
689 return;
690
691 INIT_WORK(&uprobe->work, uprobe_free_deferred);
692 schedule_work(&uprobe->work);
693 }
694
695 /* Initialize hprobe as SRCU-protected "leased" uprobe */
hprobe_init_leased(struct hprobe * hprobe,struct uprobe * uprobe,int srcu_idx)696 static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx)
697 {
698 WARN_ON(!uprobe);
699 hprobe->state = HPROBE_LEASED;
700 hprobe->uprobe = uprobe;
701 hprobe->srcu_idx = srcu_idx;
702 }
703
704 /* Initialize hprobe as refcounted ("stable") uprobe (uprobe can be NULL). */
hprobe_init_stable(struct hprobe * hprobe,struct uprobe * uprobe)705 static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe)
706 {
707 hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE;
708 hprobe->uprobe = uprobe;
709 hprobe->srcu_idx = -1;
710 }
711
712 /*
713 * hprobe_consume() fetches hprobe's underlying uprobe and detects whether
714 * uprobe is SRCU protected or is refcounted. hprobe_consume() can be
715 * used only once for a given hprobe.
716 *
717 * Caller has to call hprobe_finalize() and pass previous hprobe_state, so
718 * that hprobe_finalize() can perform SRCU unlock or put uprobe, whichever
719 * is appropriate.
720 */
hprobe_consume(struct hprobe * hprobe,enum hprobe_state * hstate)721 static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate)
722 {
723 *hstate = xchg(&hprobe->state, HPROBE_CONSUMED);
724 switch (*hstate) {
725 case HPROBE_LEASED:
726 case HPROBE_STABLE:
727 return hprobe->uprobe;
728 case HPROBE_GONE: /* uprobe is NULL, no SRCU */
729 case HPROBE_CONSUMED: /* uprobe was finalized already, do nothing */
730 return NULL;
731 default:
732 WARN(1, "hprobe invalid state %d", *hstate);
733 return NULL;
734 }
735 }
736
737 /*
738 * Reset hprobe state and, if hprobe was LEASED, release SRCU lock.
739 * hprobe_finalize() can only be used from current context after
740 * hprobe_consume() call (which determines uprobe and hstate value).
741 */
hprobe_finalize(struct hprobe * hprobe,enum hprobe_state hstate)742 static void hprobe_finalize(struct hprobe *hprobe, enum hprobe_state hstate)
743 {
744 switch (hstate) {
745 case HPROBE_LEASED:
746 __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx);
747 break;
748 case HPROBE_STABLE:
749 put_uprobe(hprobe->uprobe);
750 break;
751 case HPROBE_GONE:
752 case HPROBE_CONSUMED:
753 break;
754 default:
755 WARN(1, "hprobe invalid state %d", hstate);
756 break;
757 }
758 }
759
760 /*
761 * Attempt to switch (atomically) uprobe from being SRCU protected (LEASED)
762 * to refcounted (STABLE) state. Competes with hprobe_consume(); only one of
763 * them can win the race to perform SRCU unlocking. Whoever wins must perform
764 * SRCU unlock.
765 *
766 * Returns underlying valid uprobe or NULL, if there was no underlying uprobe
767 * to begin with or we failed to bump its refcount and it's going away.
768 *
769 * Returned non-NULL uprobe can be still safely used within an ongoing SRCU
770 * locked region. If `get` is true, it's guaranteed that non-NULL uprobe has
771 * an extra refcount for caller to assume and use. Otherwise, it's not
772 * guaranteed that returned uprobe has a positive refcount, so caller has to
773 * attempt try_get_uprobe(), if it needs to preserve uprobe beyond current
774 * SRCU lock region. See dup_utask().
775 */
hprobe_expire(struct hprobe * hprobe,bool get)776 static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get)
777 {
778 enum hprobe_state hstate;
779
780 /*
781 * Caller should guarantee that return_instance is not going to be
782 * freed from under us. This can be achieved either through holding
783 * rcu_read_lock() or by owning return_instance in the first place.
784 *
785 * Underlying uprobe is itself protected from reuse by SRCU, so ensure
786 * SRCU lock is held properly.
787 */
788 lockdep_assert(srcu_read_lock_held(&uretprobes_srcu));
789
790 hstate = READ_ONCE(hprobe->state);
791 switch (hstate) {
792 case HPROBE_STABLE:
793 /* uprobe has positive refcount, bump refcount, if necessary */
794 return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe;
795 case HPROBE_GONE:
796 /*
797 * SRCU was unlocked earlier and we didn't manage to take
798 * uprobe refcnt, so it's effectively NULL
799 */
800 return NULL;
801 case HPROBE_CONSUMED:
802 /*
803 * uprobe was consumed, so it's effectively NULL as far as
804 * uretprobe processing logic is concerned
805 */
806 return NULL;
807 case HPROBE_LEASED: {
808 struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe);
809 /*
810 * Try to switch hprobe state, guarding against
811 * hprobe_consume() or another hprobe_expire() racing with us.
812 * Note, if we failed to get uprobe refcount, we use special
813 * HPROBE_GONE state to signal that hprobe->uprobe shouldn't
814 * be used as it will be freed after SRCU is unlocked.
815 */
816 if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) {
817 /* We won the race, we are the ones to unlock SRCU */
818 __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx);
819 return get ? get_uprobe(uprobe) : uprobe;
820 }
821
822 /*
823 * We lost the race, undo refcount bump (if it ever happened),
824 * unless caller would like an extra refcount anyways.
825 */
826 if (uprobe && !get)
827 put_uprobe(uprobe);
828 /*
829 * Even if hprobe_consume() or another hprobe_expire() wins
830 * the state update race and unlocks SRCU from under us, we
831 * still have a guarantee that underyling uprobe won't be
832 * freed due to ongoing caller's SRCU lock region, so we can
833 * return it regardless. Also, if `get` was true, we also have
834 * an extra ref for the caller to own. This is used in dup_utask().
835 */
836 return uprobe;
837 }
838 default:
839 WARN(1, "unknown hprobe state %d", hstate);
840 return NULL;
841 }
842 }
843
844 static __always_inline
uprobe_cmp(const struct inode * l_inode,const loff_t l_offset,const struct uprobe * r)845 int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
846 const struct uprobe *r)
847 {
848 if (l_inode < r->inode)
849 return -1;
850
851 if (l_inode > r->inode)
852 return 1;
853
854 if (l_offset < r->offset)
855 return -1;
856
857 if (l_offset > r->offset)
858 return 1;
859
860 return 0;
861 }
862
863 #define __node_2_uprobe(node) \
864 rb_entry((node), struct uprobe, rb_node)
865
866 struct __uprobe_key {
867 struct inode *inode;
868 loff_t offset;
869 };
870
__uprobe_cmp_key(const void * key,const struct rb_node * b)871 static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
872 {
873 const struct __uprobe_key *a = key;
874 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
875 }
876
__uprobe_cmp(struct rb_node * a,const struct rb_node * b)877 static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
878 {
879 struct uprobe *u = __node_2_uprobe(a);
880 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
881 }
882
883 /*
884 * Assumes being inside RCU protected region.
885 * No refcount is taken on returned uprobe.
886 */
find_uprobe_rcu(struct inode * inode,loff_t offset)887 static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset)
888 {
889 struct __uprobe_key key = {
890 .inode = inode,
891 .offset = offset,
892 };
893 struct rb_node *node;
894 unsigned int seq;
895
896 lockdep_assert(rcu_read_lock_trace_held());
897
898 do {
899 seq = read_seqcount_begin(&uprobes_seqcount);
900 node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key);
901 /*
902 * Lockless RB-tree lookups can result only in false negatives.
903 * If the element is found, it is correct and can be returned
904 * under RCU protection. If we find nothing, we need to
905 * validate that seqcount didn't change. If it did, we have to
906 * try again as we might have missed the element (false
907 * negative). If seqcount is unchanged, search truly failed.
908 */
909 if (node)
910 return __node_2_uprobe(node);
911 } while (read_seqcount_retry(&uprobes_seqcount, seq));
912
913 return NULL;
914 }
915
916 /*
917 * Attempt to insert a new uprobe into uprobes_tree.
918 *
919 * If uprobe already exists (for given inode+offset), we just increment
920 * refcount of previously existing uprobe.
921 *
922 * If not, a provided new instance of uprobe is inserted into the tree (with
923 * assumed initial refcount == 1).
924 *
925 * In any case, we return a uprobe instance that ends up being in uprobes_tree.
926 * Caller has to clean up new uprobe instance, if it ended up not being
927 * inserted into the tree.
928 *
929 * We assume that uprobes_treelock is held for writing.
930 */
__insert_uprobe(struct uprobe * uprobe)931 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
932 {
933 struct rb_node *node;
934 again:
935 node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
936 if (node) {
937 struct uprobe *u = __node_2_uprobe(node);
938
939 if (!try_get_uprobe(u)) {
940 rb_erase(node, &uprobes_tree);
941 RB_CLEAR_NODE(&u->rb_node);
942 goto again;
943 }
944
945 return u;
946 }
947
948 return uprobe;
949 }
950
951 /*
952 * Acquire uprobes_treelock and insert uprobe into uprobes_tree
953 * (or reuse existing one, see __insert_uprobe() comments above).
954 */
insert_uprobe(struct uprobe * uprobe)955 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
956 {
957 struct uprobe *u;
958
959 write_lock(&uprobes_treelock);
960 write_seqcount_begin(&uprobes_seqcount);
961 u = __insert_uprobe(uprobe);
962 write_seqcount_end(&uprobes_seqcount);
963 write_unlock(&uprobes_treelock);
964
965 return u;
966 }
967
968 static void
ref_ctr_mismatch_warn(struct uprobe * cur_uprobe,struct uprobe * uprobe)969 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
970 {
971 pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
972 "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
973 uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
974 (unsigned long long) cur_uprobe->ref_ctr_offset,
975 (unsigned long long) uprobe->ref_ctr_offset);
976 }
977
alloc_uprobe(struct inode * inode,loff_t offset,loff_t ref_ctr_offset)978 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
979 loff_t ref_ctr_offset)
980 {
981 struct uprobe *uprobe, *cur_uprobe;
982
983 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
984 if (!uprobe)
985 return ERR_PTR(-ENOMEM);
986
987 uprobe->inode = inode;
988 uprobe->offset = offset;
989 uprobe->ref_ctr_offset = ref_ctr_offset;
990 INIT_LIST_HEAD(&uprobe->consumers);
991 init_rwsem(&uprobe->register_rwsem);
992 init_rwsem(&uprobe->consumer_rwsem);
993 RB_CLEAR_NODE(&uprobe->rb_node);
994 refcount_set(&uprobe->ref, 1);
995
996 /* add to uprobes_tree, sorted on inode:offset */
997 cur_uprobe = insert_uprobe(uprobe);
998 /* a uprobe exists for this inode:offset combination */
999 if (cur_uprobe != uprobe) {
1000 if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
1001 ref_ctr_mismatch_warn(cur_uprobe, uprobe);
1002 put_uprobe(cur_uprobe);
1003 kfree(uprobe);
1004 return ERR_PTR(-EINVAL);
1005 }
1006 kfree(uprobe);
1007 uprobe = cur_uprobe;
1008 }
1009
1010 return uprobe;
1011 }
1012
consumer_add(struct uprobe * uprobe,struct uprobe_consumer * uc)1013 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
1014 {
1015 static atomic64_t id;
1016
1017 down_write(&uprobe->consumer_rwsem);
1018 list_add_rcu(&uc->cons_node, &uprobe->consumers);
1019 uc->id = (__u64) atomic64_inc_return(&id);
1020 up_write(&uprobe->consumer_rwsem);
1021 }
1022
1023 /*
1024 * For uprobe @uprobe, delete the consumer @uc.
1025 * Should never be called with consumer that's not part of @uprobe->consumers.
1026 */
consumer_del(struct uprobe * uprobe,struct uprobe_consumer * uc)1027 static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
1028 {
1029 down_write(&uprobe->consumer_rwsem);
1030 list_del_rcu(&uc->cons_node);
1031 up_write(&uprobe->consumer_rwsem);
1032 }
1033
__copy_insn(struct address_space * mapping,struct file * filp,void * insn,int nbytes,loff_t offset)1034 static int __copy_insn(struct address_space *mapping, struct file *filp,
1035 void *insn, int nbytes, loff_t offset)
1036 {
1037 struct page *page;
1038 /*
1039 * Ensure that the page that has the original instruction is populated
1040 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
1041 * see uprobe_register().
1042 */
1043 if (mapping->a_ops->read_folio)
1044 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
1045 else
1046 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
1047 if (IS_ERR(page))
1048 return PTR_ERR(page);
1049
1050 copy_from_page(page, offset, insn, nbytes);
1051 put_page(page);
1052
1053 return 0;
1054 }
1055
copy_insn(struct uprobe * uprobe,struct file * filp)1056 static int copy_insn(struct uprobe *uprobe, struct file *filp)
1057 {
1058 struct address_space *mapping = uprobe->inode->i_mapping;
1059 loff_t offs = uprobe->offset;
1060 void *insn = &uprobe->arch.insn;
1061 int size = sizeof(uprobe->arch.insn);
1062 int len, err = -EIO;
1063
1064 /* Copy only available bytes, -EIO if nothing was read */
1065 do {
1066 if (offs >= i_size_read(uprobe->inode))
1067 break;
1068
1069 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
1070 err = __copy_insn(mapping, filp, insn, len, offs);
1071 if (err)
1072 break;
1073
1074 insn += len;
1075 offs += len;
1076 size -= len;
1077 } while (size);
1078
1079 return err;
1080 }
1081
prepare_uprobe(struct uprobe * uprobe,struct file * file,struct mm_struct * mm,unsigned long vaddr)1082 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
1083 struct mm_struct *mm, unsigned long vaddr)
1084 {
1085 int ret = 0;
1086
1087 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
1088 return ret;
1089
1090 /* TODO: move this into _register, until then we abuse this sem. */
1091 down_write(&uprobe->consumer_rwsem);
1092 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
1093 goto out;
1094
1095 ret = copy_insn(uprobe, file);
1096 if (ret)
1097 goto out;
1098
1099 ret = -ENOTSUPP;
1100 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
1101 goto out;
1102
1103 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
1104 if (ret)
1105 goto out;
1106
1107 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
1108 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
1109
1110 out:
1111 up_write(&uprobe->consumer_rwsem);
1112
1113 return ret;
1114 }
1115
consumer_filter(struct uprobe_consumer * uc,struct mm_struct * mm)1116 static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
1117 {
1118 return !uc->filter || uc->filter(uc, mm);
1119 }
1120
filter_chain(struct uprobe * uprobe,struct mm_struct * mm)1121 static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm)
1122 {
1123 struct uprobe_consumer *uc;
1124 bool ret = false;
1125
1126 down_read(&uprobe->consumer_rwsem);
1127 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
1128 ret = consumer_filter(uc, mm);
1129 if (ret)
1130 break;
1131 }
1132 up_read(&uprobe->consumer_rwsem);
1133
1134 return ret;
1135 }
1136
1137 static int
install_breakpoint(struct uprobe * uprobe,struct mm_struct * mm,struct vm_area_struct * vma,unsigned long vaddr)1138 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
1139 struct vm_area_struct *vma, unsigned long vaddr)
1140 {
1141 bool first_uprobe;
1142 int ret;
1143
1144 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
1145 if (ret)
1146 return ret;
1147
1148 /*
1149 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
1150 * the task can hit this breakpoint right after __replace_page().
1151 */
1152 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
1153 if (first_uprobe)
1154 set_bit(MMF_HAS_UPROBES, &mm->flags);
1155
1156 ret = set_swbp(&uprobe->arch, mm, vaddr);
1157 if (!ret)
1158 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
1159 else if (first_uprobe)
1160 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1161
1162 return ret;
1163 }
1164
1165 static int
remove_breakpoint(struct uprobe * uprobe,struct mm_struct * mm,unsigned long vaddr)1166 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
1167 {
1168 set_bit(MMF_RECALC_UPROBES, &mm->flags);
1169 return set_orig_insn(&uprobe->arch, mm, vaddr);
1170 }
1171
1172 struct map_info {
1173 struct map_info *next;
1174 struct mm_struct *mm;
1175 unsigned long vaddr;
1176 };
1177
free_map_info(struct map_info * info)1178 static inline struct map_info *free_map_info(struct map_info *info)
1179 {
1180 struct map_info *next = info->next;
1181 kfree(info);
1182 return next;
1183 }
1184
1185 static struct map_info *
build_map_info(struct address_space * mapping,loff_t offset,bool is_register)1186 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
1187 {
1188 unsigned long pgoff = offset >> PAGE_SHIFT;
1189 struct vm_area_struct *vma;
1190 struct map_info *curr = NULL;
1191 struct map_info *prev = NULL;
1192 struct map_info *info;
1193 int more = 0;
1194
1195 again:
1196 i_mmap_lock_read(mapping);
1197 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1198 if (!valid_vma(vma, is_register))
1199 continue;
1200
1201 if (!prev && !more) {
1202 /*
1203 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
1204 * reclaim. This is optimistic, no harm done if it fails.
1205 */
1206 prev = kmalloc(sizeof(struct map_info),
1207 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
1208 if (prev)
1209 prev->next = NULL;
1210 }
1211 if (!prev) {
1212 more++;
1213 continue;
1214 }
1215
1216 if (!mmget_not_zero(vma->vm_mm))
1217 continue;
1218
1219 info = prev;
1220 prev = prev->next;
1221 info->next = curr;
1222 curr = info;
1223
1224 info->mm = vma->vm_mm;
1225 info->vaddr = offset_to_vaddr(vma, offset);
1226 }
1227 i_mmap_unlock_read(mapping);
1228
1229 if (!more)
1230 goto out;
1231
1232 prev = curr;
1233 while (curr) {
1234 mmput(curr->mm);
1235 curr = curr->next;
1236 }
1237
1238 do {
1239 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1240 if (!info) {
1241 curr = ERR_PTR(-ENOMEM);
1242 goto out;
1243 }
1244 info->next = prev;
1245 prev = info;
1246 } while (--more);
1247
1248 goto again;
1249 out:
1250 while (prev)
1251 prev = free_map_info(prev);
1252 return curr;
1253 }
1254
1255 static int
register_for_each_vma(struct uprobe * uprobe,struct uprobe_consumer * new)1256 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1257 {
1258 bool is_register = !!new;
1259 struct map_info *info;
1260 int err = 0;
1261
1262 percpu_down_write(&dup_mmap_sem);
1263 info = build_map_info(uprobe->inode->i_mapping,
1264 uprobe->offset, is_register);
1265 if (IS_ERR(info)) {
1266 err = PTR_ERR(info);
1267 goto out;
1268 }
1269
1270 while (info) {
1271 struct mm_struct *mm = info->mm;
1272 struct vm_area_struct *vma;
1273
1274 if (err && is_register)
1275 goto free;
1276 /*
1277 * We take mmap_lock for writing to avoid the race with
1278 * find_active_uprobe_rcu() which takes mmap_lock for reading.
1279 * Thus this install_breakpoint() can not make
1280 * is_trap_at_addr() true right after find_uprobe_rcu()
1281 * returns NULL in find_active_uprobe_rcu().
1282 */
1283 mmap_write_lock(mm);
1284 if (check_stable_address_space(mm))
1285 goto unlock;
1286
1287 vma = find_vma(mm, info->vaddr);
1288 if (!vma || !valid_vma(vma, is_register) ||
1289 file_inode(vma->vm_file) != uprobe->inode)
1290 goto unlock;
1291
1292 if (vma->vm_start > info->vaddr ||
1293 vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
1294 goto unlock;
1295
1296 if (is_register) {
1297 /* consult only the "caller", new consumer. */
1298 if (consumer_filter(new, mm))
1299 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1300 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
1301 if (!filter_chain(uprobe, mm))
1302 err |= remove_breakpoint(uprobe, mm, info->vaddr);
1303 }
1304
1305 unlock:
1306 mmap_write_unlock(mm);
1307 free:
1308 mmput(mm);
1309 info = free_map_info(info);
1310 }
1311 out:
1312 percpu_up_write(&dup_mmap_sem);
1313 return err;
1314 }
1315
1316 /**
1317 * uprobe_unregister_nosync - unregister an already registered probe.
1318 * @uprobe: uprobe to remove
1319 * @uc: identify which probe if multiple probes are colocated.
1320 */
uprobe_unregister_nosync(struct uprobe * uprobe,struct uprobe_consumer * uc)1321 void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
1322 {
1323 int err;
1324
1325 down_write(&uprobe->register_rwsem);
1326 consumer_del(uprobe, uc);
1327 err = register_for_each_vma(uprobe, NULL);
1328 up_write(&uprobe->register_rwsem);
1329
1330 /* TODO : cant unregister? schedule a worker thread */
1331 if (unlikely(err)) {
1332 uprobe_warn(current, "unregister, leaking uprobe");
1333 return;
1334 }
1335
1336 put_uprobe(uprobe);
1337 }
1338 EXPORT_SYMBOL_GPL(uprobe_unregister_nosync);
1339
uprobe_unregister_sync(void)1340 void uprobe_unregister_sync(void)
1341 {
1342 /*
1343 * Now that handler_chain() and handle_uretprobe_chain() iterate over
1344 * uprobe->consumers list under RCU protection without holding
1345 * uprobe->register_rwsem, we need to wait for RCU grace period to
1346 * make sure that we can't call into just unregistered
1347 * uprobe_consumer's callbacks anymore. If we don't do that, fast and
1348 * unlucky enough caller can free consumer's memory and cause
1349 * handler_chain() or handle_uretprobe_chain() to do an use-after-free.
1350 */
1351 synchronize_rcu_tasks_trace();
1352 synchronize_srcu(&uretprobes_srcu);
1353 }
1354 EXPORT_SYMBOL_GPL(uprobe_unregister_sync);
1355
1356 /**
1357 * uprobe_register - register a probe
1358 * @inode: the file in which the probe has to be placed.
1359 * @offset: offset from the start of the file.
1360 * @ref_ctr_offset: offset of SDT marker / reference counter
1361 * @uc: information on howto handle the probe..
1362 *
1363 * Apart from the access refcount, uprobe_register() takes a creation
1364 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1365 * inserted into the rbtree (i.e first consumer for a @inode:@offset
1366 * tuple). Creation refcount stops uprobe_unregister from freeing the
1367 * @uprobe even before the register operation is complete. Creation
1368 * refcount is released when the last @uc for the @uprobe
1369 * unregisters. Caller of uprobe_register() is required to keep @inode
1370 * (and the containing mount) referenced.
1371 *
1372 * Return: pointer to the new uprobe on success or an ERR_PTR on failure.
1373 */
uprobe_register(struct inode * inode,loff_t offset,loff_t ref_ctr_offset,struct uprobe_consumer * uc)1374 struct uprobe *uprobe_register(struct inode *inode,
1375 loff_t offset, loff_t ref_ctr_offset,
1376 struct uprobe_consumer *uc)
1377 {
1378 struct uprobe *uprobe;
1379 int ret;
1380
1381 /* Uprobe must have at least one set consumer */
1382 if (!uc->handler && !uc->ret_handler)
1383 return ERR_PTR(-EINVAL);
1384
1385 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
1386 if (!inode->i_mapping->a_ops->read_folio &&
1387 !shmem_mapping(inode->i_mapping))
1388 return ERR_PTR(-EIO);
1389 /* Racy, just to catch the obvious mistakes */
1390 if (offset > i_size_read(inode))
1391 return ERR_PTR(-EINVAL);
1392
1393 /*
1394 * This ensures that copy_from_page(), copy_to_page() and
1395 * __update_ref_ctr() can't cross page boundary.
1396 */
1397 if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
1398 return ERR_PTR(-EINVAL);
1399 if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
1400 return ERR_PTR(-EINVAL);
1401
1402 uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
1403 if (IS_ERR(uprobe))
1404 return uprobe;
1405
1406 down_write(&uprobe->register_rwsem);
1407 consumer_add(uprobe, uc);
1408 ret = register_for_each_vma(uprobe, uc);
1409 up_write(&uprobe->register_rwsem);
1410
1411 if (ret) {
1412 uprobe_unregister_nosync(uprobe, uc);
1413 /*
1414 * Registration might have partially succeeded, so we can have
1415 * this consumer being called right at this time. We need to
1416 * sync here. It's ok, it's unlikely slow path.
1417 */
1418 uprobe_unregister_sync();
1419 return ERR_PTR(ret);
1420 }
1421
1422 return uprobe;
1423 }
1424 EXPORT_SYMBOL_GPL(uprobe_register);
1425
1426 /**
1427 * uprobe_apply - add or remove the breakpoints according to @uc->filter
1428 * @uprobe: uprobe which "owns" the breakpoint
1429 * @uc: consumer which wants to add more or remove some breakpoints
1430 * @add: add or remove the breakpoints
1431 * Return: 0 on success or negative error code.
1432 */
uprobe_apply(struct uprobe * uprobe,struct uprobe_consumer * uc,bool add)1433 int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add)
1434 {
1435 struct uprobe_consumer *con;
1436 int ret = -ENOENT;
1437
1438 down_write(&uprobe->register_rwsem);
1439
1440 rcu_read_lock_trace();
1441 list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
1442 if (con == uc) {
1443 ret = register_for_each_vma(uprobe, add ? uc : NULL);
1444 break;
1445 }
1446 }
1447 rcu_read_unlock_trace();
1448
1449 up_write(&uprobe->register_rwsem);
1450
1451 return ret;
1452 }
1453
unapply_uprobe(struct uprobe * uprobe,struct mm_struct * mm)1454 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1455 {
1456 VMA_ITERATOR(vmi, mm, 0);
1457 struct vm_area_struct *vma;
1458 int err = 0;
1459
1460 mmap_read_lock(mm);
1461 for_each_vma(vmi, vma) {
1462 unsigned long vaddr;
1463 loff_t offset;
1464
1465 if (!valid_vma(vma, false) ||
1466 file_inode(vma->vm_file) != uprobe->inode)
1467 continue;
1468
1469 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1470 if (uprobe->offset < offset ||
1471 uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1472 continue;
1473
1474 vaddr = offset_to_vaddr(vma, uprobe->offset);
1475 err |= remove_breakpoint(uprobe, mm, vaddr);
1476 }
1477 mmap_read_unlock(mm);
1478
1479 return err;
1480 }
1481
1482 static struct rb_node *
find_node_in_range(struct inode * inode,loff_t min,loff_t max)1483 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1484 {
1485 struct rb_node *n = uprobes_tree.rb_node;
1486
1487 while (n) {
1488 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1489
1490 if (inode < u->inode) {
1491 n = n->rb_left;
1492 } else if (inode > u->inode) {
1493 n = n->rb_right;
1494 } else {
1495 if (max < u->offset)
1496 n = n->rb_left;
1497 else if (min > u->offset)
1498 n = n->rb_right;
1499 else
1500 break;
1501 }
1502 }
1503
1504 return n;
1505 }
1506
1507 /*
1508 * For a given range in vma, build a list of probes that need to be inserted.
1509 */
build_probe_list(struct inode * inode,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * head)1510 static void build_probe_list(struct inode *inode,
1511 struct vm_area_struct *vma,
1512 unsigned long start, unsigned long end,
1513 struct list_head *head)
1514 {
1515 loff_t min, max;
1516 struct rb_node *n, *t;
1517 struct uprobe *u;
1518
1519 INIT_LIST_HEAD(head);
1520 min = vaddr_to_offset(vma, start);
1521 max = min + (end - start) - 1;
1522
1523 read_lock(&uprobes_treelock);
1524 n = find_node_in_range(inode, min, max);
1525 if (n) {
1526 for (t = n; t; t = rb_prev(t)) {
1527 u = rb_entry(t, struct uprobe, rb_node);
1528 if (u->inode != inode || u->offset < min)
1529 break;
1530 /* if uprobe went away, it's safe to ignore it */
1531 if (try_get_uprobe(u))
1532 list_add(&u->pending_list, head);
1533 }
1534 for (t = n; (t = rb_next(t)); ) {
1535 u = rb_entry(t, struct uprobe, rb_node);
1536 if (u->inode != inode || u->offset > max)
1537 break;
1538 /* if uprobe went away, it's safe to ignore it */
1539 if (try_get_uprobe(u))
1540 list_add(&u->pending_list, head);
1541 }
1542 }
1543 read_unlock(&uprobes_treelock);
1544 }
1545
1546 /* @vma contains reference counter, not the probed instruction. */
delayed_ref_ctr_inc(struct vm_area_struct * vma)1547 static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1548 {
1549 struct list_head *pos, *q;
1550 struct delayed_uprobe *du;
1551 unsigned long vaddr;
1552 int ret = 0, err = 0;
1553
1554 mutex_lock(&delayed_uprobe_lock);
1555 list_for_each_safe(pos, q, &delayed_uprobe_list) {
1556 du = list_entry(pos, struct delayed_uprobe, list);
1557
1558 if (du->mm != vma->vm_mm ||
1559 !valid_ref_ctr_vma(du->uprobe, vma))
1560 continue;
1561
1562 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1563 ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1564 if (ret) {
1565 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1566 if (!err)
1567 err = ret;
1568 }
1569 delayed_uprobe_delete(du);
1570 }
1571 mutex_unlock(&delayed_uprobe_lock);
1572 return err;
1573 }
1574
1575 /*
1576 * Called from mmap_region/vma_merge with mm->mmap_lock acquired.
1577 *
1578 * Currently we ignore all errors and always return 0, the callers
1579 * can't handle the failure anyway.
1580 */
uprobe_mmap(struct vm_area_struct * vma)1581 int uprobe_mmap(struct vm_area_struct *vma)
1582 {
1583 struct list_head tmp_list;
1584 struct uprobe *uprobe, *u;
1585 struct inode *inode;
1586
1587 if (no_uprobe_events())
1588 return 0;
1589
1590 if (vma->vm_file &&
1591 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1592 test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1593 delayed_ref_ctr_inc(vma);
1594
1595 if (!valid_vma(vma, true))
1596 return 0;
1597
1598 inode = file_inode(vma->vm_file);
1599 if (!inode)
1600 return 0;
1601
1602 mutex_lock(uprobes_mmap_hash(inode));
1603 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1604 /*
1605 * We can race with uprobe_unregister(), this uprobe can be already
1606 * removed. But in this case filter_chain() must return false, all
1607 * consumers have gone away.
1608 */
1609 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1610 if (!fatal_signal_pending(current) &&
1611 filter_chain(uprobe, vma->vm_mm)) {
1612 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1613 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1614 }
1615 put_uprobe(uprobe);
1616 }
1617 mutex_unlock(uprobes_mmap_hash(inode));
1618
1619 return 0;
1620 }
1621
1622 static bool
vma_has_uprobes(struct vm_area_struct * vma,unsigned long start,unsigned long end)1623 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1624 {
1625 loff_t min, max;
1626 struct inode *inode;
1627 struct rb_node *n;
1628
1629 inode = file_inode(vma->vm_file);
1630
1631 min = vaddr_to_offset(vma, start);
1632 max = min + (end - start) - 1;
1633
1634 read_lock(&uprobes_treelock);
1635 n = find_node_in_range(inode, min, max);
1636 read_unlock(&uprobes_treelock);
1637
1638 return !!n;
1639 }
1640
1641 /*
1642 * Called in context of a munmap of a vma.
1643 */
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)1644 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1645 {
1646 if (no_uprobe_events() || !valid_vma(vma, false))
1647 return;
1648
1649 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1650 return;
1651
1652 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1653 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1654 return;
1655
1656 if (vma_has_uprobes(vma, start, end))
1657 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1658 }
1659
xol_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)1660 static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
1661 struct vm_area_struct *vma, struct vm_fault *vmf)
1662 {
1663 struct xol_area *area = vma->vm_mm->uprobes_state.xol_area;
1664
1665 vmf->page = area->page;
1666 get_page(vmf->page);
1667 return 0;
1668 }
1669
xol_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)1670 static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
1671 {
1672 return -EPERM;
1673 }
1674
1675 static const struct vm_special_mapping xol_mapping = {
1676 .name = "[uprobes]",
1677 .fault = xol_fault,
1678 .mremap = xol_mremap,
1679 };
1680
1681 /* Slot allocation for XOL */
xol_add_vma(struct mm_struct * mm,struct xol_area * area)1682 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1683 {
1684 struct vm_area_struct *vma;
1685 int ret;
1686
1687 if (mmap_write_lock_killable(mm))
1688 return -EINTR;
1689
1690 if (mm->uprobes_state.xol_area) {
1691 ret = -EALREADY;
1692 goto fail;
1693 }
1694
1695 if (!area->vaddr) {
1696 /* Try to map as high as possible, this is only a hint. */
1697 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1698 PAGE_SIZE, 0, 0);
1699 if (IS_ERR_VALUE(area->vaddr)) {
1700 ret = area->vaddr;
1701 goto fail;
1702 }
1703 }
1704
1705 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1706 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO|
1707 VM_SEALED_SYSMAP,
1708 &xol_mapping);
1709 if (IS_ERR(vma)) {
1710 ret = PTR_ERR(vma);
1711 goto fail;
1712 }
1713
1714 ret = 0;
1715 /* pairs with get_xol_area() */
1716 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1717 fail:
1718 mmap_write_unlock(mm);
1719
1720 return ret;
1721 }
1722
arch_uprobe_trampoline(unsigned long * psize)1723 void * __weak arch_uprobe_trampoline(unsigned long *psize)
1724 {
1725 static uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1726
1727 *psize = UPROBE_SWBP_INSN_SIZE;
1728 return &insn;
1729 }
1730
__create_xol_area(unsigned long vaddr)1731 static struct xol_area *__create_xol_area(unsigned long vaddr)
1732 {
1733 struct mm_struct *mm = current->mm;
1734 unsigned long insns_size;
1735 struct xol_area *area;
1736 void *insns;
1737
1738 area = kzalloc(sizeof(*area), GFP_KERNEL);
1739 if (unlikely(!area))
1740 goto out;
1741
1742 area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1743 GFP_KERNEL);
1744 if (!area->bitmap)
1745 goto free_area;
1746
1747 area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
1748 if (!area->page)
1749 goto free_bitmap;
1750
1751 area->vaddr = vaddr;
1752 init_waitqueue_head(&area->wq);
1753 /* Reserve the 1st slot for get_trampoline_vaddr() */
1754 set_bit(0, area->bitmap);
1755 insns = arch_uprobe_trampoline(&insns_size);
1756 arch_uprobe_copy_ixol(area->page, 0, insns, insns_size);
1757
1758 if (!xol_add_vma(mm, area))
1759 return area;
1760
1761 __free_page(area->page);
1762 free_bitmap:
1763 kfree(area->bitmap);
1764 free_area:
1765 kfree(area);
1766 out:
1767 return NULL;
1768 }
1769
1770 /*
1771 * get_xol_area - Allocate process's xol_area if necessary.
1772 * This area will be used for storing instructions for execution out of line.
1773 *
1774 * Returns the allocated area or NULL.
1775 */
get_xol_area(void)1776 static struct xol_area *get_xol_area(void)
1777 {
1778 struct mm_struct *mm = current->mm;
1779 struct xol_area *area;
1780
1781 if (!mm->uprobes_state.xol_area)
1782 __create_xol_area(0);
1783
1784 /* Pairs with xol_add_vma() smp_store_release() */
1785 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
1786 return area;
1787 }
1788
1789 /*
1790 * uprobe_clear_state - Free the area allocated for slots.
1791 */
uprobe_clear_state(struct mm_struct * mm)1792 void uprobe_clear_state(struct mm_struct *mm)
1793 {
1794 struct xol_area *area = mm->uprobes_state.xol_area;
1795
1796 mutex_lock(&delayed_uprobe_lock);
1797 delayed_uprobe_remove(NULL, mm);
1798 mutex_unlock(&delayed_uprobe_lock);
1799
1800 if (!area)
1801 return;
1802
1803 put_page(area->page);
1804 kfree(area->bitmap);
1805 kfree(area);
1806 }
1807
uprobe_start_dup_mmap(void)1808 void uprobe_start_dup_mmap(void)
1809 {
1810 percpu_down_read(&dup_mmap_sem);
1811 }
1812
uprobe_end_dup_mmap(void)1813 void uprobe_end_dup_mmap(void)
1814 {
1815 percpu_up_read(&dup_mmap_sem);
1816 }
1817
uprobe_dup_mmap(struct mm_struct * oldmm,struct mm_struct * newmm)1818 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1819 {
1820 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1821 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1822 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1823 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1824 }
1825 }
1826
xol_get_slot_nr(struct xol_area * area)1827 static unsigned long xol_get_slot_nr(struct xol_area *area)
1828 {
1829 unsigned long slot_nr;
1830
1831 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1832 if (slot_nr < UINSNS_PER_PAGE) {
1833 if (!test_and_set_bit(slot_nr, area->bitmap))
1834 return slot_nr;
1835 }
1836
1837 return UINSNS_PER_PAGE;
1838 }
1839
1840 /*
1841 * xol_get_insn_slot - allocate a slot for xol.
1842 */
xol_get_insn_slot(struct uprobe * uprobe,struct uprobe_task * utask)1843 static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask)
1844 {
1845 struct xol_area *area = get_xol_area();
1846 unsigned long slot_nr;
1847
1848 if (!area)
1849 return false;
1850
1851 wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE);
1852
1853 utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES;
1854 arch_uprobe_copy_ixol(area->page, utask->xol_vaddr,
1855 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1856 return true;
1857 }
1858
1859 /*
1860 * xol_free_insn_slot - free the slot allocated by xol_get_insn_slot()
1861 */
xol_free_insn_slot(struct uprobe_task * utask)1862 static void xol_free_insn_slot(struct uprobe_task *utask)
1863 {
1864 struct xol_area *area = current->mm->uprobes_state.xol_area;
1865 unsigned long offset = utask->xol_vaddr - area->vaddr;
1866 unsigned int slot_nr;
1867
1868 utask->xol_vaddr = 0;
1869 /* xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE) */
1870 if (WARN_ON_ONCE(offset >= PAGE_SIZE))
1871 return;
1872
1873 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1874 clear_bit(slot_nr, area->bitmap);
1875 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1876 if (waitqueue_active(&area->wq))
1877 wake_up(&area->wq);
1878 }
1879
arch_uprobe_copy_ixol(struct page * page,unsigned long vaddr,void * src,unsigned long len)1880 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1881 void *src, unsigned long len)
1882 {
1883 /* Initialize the slot */
1884 copy_to_page(page, vaddr, src, len);
1885
1886 /*
1887 * We probably need flush_icache_user_page() but it needs vma.
1888 * This should work on most of architectures by default. If
1889 * architecture needs to do something different it can define
1890 * its own version of the function.
1891 */
1892 flush_dcache_page(page);
1893 }
1894
1895 /**
1896 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1897 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1898 * instruction.
1899 * Return the address of the breakpoint instruction.
1900 */
uprobe_get_swbp_addr(struct pt_regs * regs)1901 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1902 {
1903 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1904 }
1905
uprobe_get_trap_addr(struct pt_regs * regs)1906 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1907 {
1908 struct uprobe_task *utask = current->utask;
1909
1910 if (unlikely(utask && utask->active_uprobe))
1911 return utask->vaddr;
1912
1913 return instruction_pointer(regs);
1914 }
1915
ri_pool_push(struct uprobe_task * utask,struct return_instance * ri)1916 static void ri_pool_push(struct uprobe_task *utask, struct return_instance *ri)
1917 {
1918 ri->cons_cnt = 0;
1919 ri->next = utask->ri_pool;
1920 utask->ri_pool = ri;
1921 }
1922
ri_pool_pop(struct uprobe_task * utask)1923 static struct return_instance *ri_pool_pop(struct uprobe_task *utask)
1924 {
1925 struct return_instance *ri = utask->ri_pool;
1926
1927 if (likely(ri))
1928 utask->ri_pool = ri->next;
1929
1930 return ri;
1931 }
1932
ri_free(struct return_instance * ri)1933 static void ri_free(struct return_instance *ri)
1934 {
1935 kfree(ri->extra_consumers);
1936 kfree_rcu(ri, rcu);
1937 }
1938
free_ret_instance(struct uprobe_task * utask,struct return_instance * ri,bool cleanup_hprobe)1939 static void free_ret_instance(struct uprobe_task *utask,
1940 struct return_instance *ri, bool cleanup_hprobe)
1941 {
1942 unsigned seq;
1943
1944 if (cleanup_hprobe) {
1945 enum hprobe_state hstate;
1946
1947 (void)hprobe_consume(&ri->hprobe, &hstate);
1948 hprobe_finalize(&ri->hprobe, hstate);
1949 }
1950
1951 /*
1952 * At this point return_instance is unlinked from utask's
1953 * return_instances list and this has become visible to ri_timer().
1954 * If seqcount now indicates that ri_timer's return instance
1955 * processing loop isn't active, we can return ri into the pool of
1956 * to-be-reused return instances for future uretprobes. If ri_timer()
1957 * happens to be running right now, though, we fallback to safety and
1958 * just perform RCU-delated freeing of ri.
1959 * Admittedly, this is a rather simple use of seqcount, but it nicely
1960 * abstracts away all the necessary memory barriers, so we use
1961 * a well-supported kernel primitive here.
1962 */
1963 if (raw_seqcount_try_begin(&utask->ri_seqcount, seq)) {
1964 /* immediate reuse of ri without RCU GP is OK */
1965 ri_pool_push(utask, ri);
1966 } else {
1967 /* we might be racing with ri_timer(), so play it safe */
1968 ri_free(ri);
1969 }
1970 }
1971
1972 /*
1973 * Called with no locks held.
1974 * Called in context of an exiting or an exec-ing thread.
1975 */
uprobe_free_utask(struct task_struct * t)1976 void uprobe_free_utask(struct task_struct *t)
1977 {
1978 struct uprobe_task *utask = t->utask;
1979 struct return_instance *ri, *ri_next;
1980
1981 if (!utask)
1982 return;
1983
1984 t->utask = NULL;
1985 WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr);
1986
1987 timer_delete_sync(&utask->ri_timer);
1988
1989 ri = utask->return_instances;
1990 while (ri) {
1991 ri_next = ri->next;
1992 free_ret_instance(utask, ri, true /* cleanup_hprobe */);
1993 ri = ri_next;
1994 }
1995
1996 /* free_ret_instance() above might add to ri_pool, so this loop should come last */
1997 ri = utask->ri_pool;
1998 while (ri) {
1999 ri_next = ri->next;
2000 ri_free(ri);
2001 ri = ri_next;
2002 }
2003
2004 kfree(utask);
2005 }
2006
2007 #define RI_TIMER_PERIOD (HZ / 10) /* 100 ms */
2008
2009 #define for_each_ret_instance_rcu(pos, head) \
2010 for (pos = rcu_dereference_raw(head); pos; pos = rcu_dereference_raw(pos->next))
2011
ri_timer(struct timer_list * timer)2012 static void ri_timer(struct timer_list *timer)
2013 {
2014 struct uprobe_task *utask = container_of(timer, struct uprobe_task, ri_timer);
2015 struct return_instance *ri;
2016
2017 /* SRCU protects uprobe from reuse for the cmpxchg() inside hprobe_expire(). */
2018 guard(srcu)(&uretprobes_srcu);
2019 /* RCU protects return_instance from freeing. */
2020 guard(rcu)();
2021
2022 /*
2023 * See free_ret_instance() for notes on seqcount use.
2024 * We also employ raw API variants to avoid lockdep false-positive
2025 * warning complaining about enabled preemption. The timer can only be
2026 * invoked once for a uprobe_task. Therefore there can only be one
2027 * writer. The reader does not require an even sequence count to make
2028 * progress, so it is OK to remain preemptible on PREEMPT_RT.
2029 */
2030 raw_write_seqcount_begin(&utask->ri_seqcount);
2031
2032 for_each_ret_instance_rcu(ri, utask->return_instances)
2033 hprobe_expire(&ri->hprobe, false);
2034
2035 raw_write_seqcount_end(&utask->ri_seqcount);
2036 }
2037
alloc_utask(void)2038 static struct uprobe_task *alloc_utask(void)
2039 {
2040 struct uprobe_task *utask;
2041
2042 utask = kzalloc(sizeof(*utask), GFP_KERNEL);
2043 if (!utask)
2044 return NULL;
2045
2046 timer_setup(&utask->ri_timer, ri_timer, 0);
2047 seqcount_init(&utask->ri_seqcount);
2048
2049 return utask;
2050 }
2051
2052 /*
2053 * Allocate a uprobe_task object for the task if necessary.
2054 * Called when the thread hits a breakpoint.
2055 *
2056 * Returns:
2057 * - pointer to new uprobe_task on success
2058 * - NULL otherwise
2059 */
get_utask(void)2060 static struct uprobe_task *get_utask(void)
2061 {
2062 if (!current->utask)
2063 current->utask = alloc_utask();
2064 return current->utask;
2065 }
2066
alloc_return_instance(struct uprobe_task * utask)2067 static struct return_instance *alloc_return_instance(struct uprobe_task *utask)
2068 {
2069 struct return_instance *ri;
2070
2071 ri = ri_pool_pop(utask);
2072 if (ri)
2073 return ri;
2074
2075 ri = kzalloc(sizeof(*ri), GFP_KERNEL);
2076 if (!ri)
2077 return ZERO_SIZE_PTR;
2078
2079 return ri;
2080 }
2081
dup_return_instance(struct return_instance * old)2082 static struct return_instance *dup_return_instance(struct return_instance *old)
2083 {
2084 struct return_instance *ri;
2085
2086 ri = kmemdup(old, sizeof(*ri), GFP_KERNEL);
2087 if (!ri)
2088 return NULL;
2089
2090 if (unlikely(old->cons_cnt > 1)) {
2091 ri->extra_consumers = kmemdup(old->extra_consumers,
2092 sizeof(ri->extra_consumers[0]) * (old->cons_cnt - 1),
2093 GFP_KERNEL);
2094 if (!ri->extra_consumers) {
2095 kfree(ri);
2096 return NULL;
2097 }
2098 }
2099
2100 return ri;
2101 }
2102
dup_utask(struct task_struct * t,struct uprobe_task * o_utask)2103 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
2104 {
2105 struct uprobe_task *n_utask;
2106 struct return_instance **p, *o, *n;
2107 struct uprobe *uprobe;
2108
2109 n_utask = alloc_utask();
2110 if (!n_utask)
2111 return -ENOMEM;
2112 t->utask = n_utask;
2113
2114 /* protect uprobes from freeing, we'll need try_get_uprobe() them */
2115 guard(srcu)(&uretprobes_srcu);
2116
2117 p = &n_utask->return_instances;
2118 for (o = o_utask->return_instances; o; o = o->next) {
2119 n = dup_return_instance(o);
2120 if (!n)
2121 return -ENOMEM;
2122
2123 /* if uprobe is non-NULL, we'll have an extra refcount for uprobe */
2124 uprobe = hprobe_expire(&o->hprobe, true);
2125
2126 /*
2127 * New utask will have stable properly refcounted uprobe or
2128 * NULL. Even if we failed to get refcounted uprobe, we still
2129 * need to preserve full set of return_instances for proper
2130 * uretprobe handling and nesting in forked task.
2131 */
2132 hprobe_init_stable(&n->hprobe, uprobe);
2133
2134 n->next = NULL;
2135 rcu_assign_pointer(*p, n);
2136 p = &n->next;
2137
2138 n_utask->depth++;
2139 }
2140
2141 return 0;
2142 }
2143
dup_xol_work(struct callback_head * work)2144 static void dup_xol_work(struct callback_head *work)
2145 {
2146 if (current->flags & PF_EXITING)
2147 return;
2148
2149 if (!__create_xol_area(current->utask->dup_xol_addr) &&
2150 !fatal_signal_pending(current))
2151 uprobe_warn(current, "dup xol area");
2152 }
2153
2154 /*
2155 * Called in context of a new clone/fork from copy_process.
2156 */
uprobe_copy_process(struct task_struct * t,unsigned long flags)2157 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
2158 {
2159 struct uprobe_task *utask = current->utask;
2160 struct mm_struct *mm = current->mm;
2161 struct xol_area *area;
2162
2163 t->utask = NULL;
2164
2165 if (!utask || !utask->return_instances)
2166 return;
2167
2168 if (mm == t->mm && !(flags & CLONE_VFORK))
2169 return;
2170
2171 if (dup_utask(t, utask))
2172 return uprobe_warn(t, "dup ret instances");
2173
2174 /* The task can fork() after dup_xol_work() fails */
2175 area = mm->uprobes_state.xol_area;
2176 if (!area)
2177 return uprobe_warn(t, "dup xol area");
2178
2179 if (mm == t->mm)
2180 return;
2181
2182 t->utask->dup_xol_addr = area->vaddr;
2183 init_task_work(&t->utask->dup_xol_work, dup_xol_work);
2184 task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
2185 }
2186
2187 /*
2188 * Current area->vaddr notion assume the trampoline address is always
2189 * equal area->vaddr.
2190 *
2191 * Returns -1 in case the xol_area is not allocated.
2192 */
uprobe_get_trampoline_vaddr(void)2193 unsigned long uprobe_get_trampoline_vaddr(void)
2194 {
2195 unsigned long trampoline_vaddr = UPROBE_NO_TRAMPOLINE_VADDR;
2196 struct xol_area *area;
2197
2198 /* Pairs with xol_add_vma() smp_store_release() */
2199 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
2200 if (area)
2201 trampoline_vaddr = area->vaddr;
2202
2203 return trampoline_vaddr;
2204 }
2205
cleanup_return_instances(struct uprobe_task * utask,bool chained,struct pt_regs * regs)2206 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
2207 struct pt_regs *regs)
2208 {
2209 struct return_instance *ri = utask->return_instances, *ri_next;
2210 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
2211
2212 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
2213 ri_next = ri->next;
2214 rcu_assign_pointer(utask->return_instances, ri_next);
2215 utask->depth--;
2216
2217 free_ret_instance(utask, ri, true /* cleanup_hprobe */);
2218 ri = ri_next;
2219 }
2220 }
2221
prepare_uretprobe(struct uprobe * uprobe,struct pt_regs * regs,struct return_instance * ri)2222 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
2223 struct return_instance *ri)
2224 {
2225 struct uprobe_task *utask = current->utask;
2226 unsigned long orig_ret_vaddr, trampoline_vaddr;
2227 bool chained;
2228 int srcu_idx;
2229
2230 if (!get_xol_area())
2231 goto free;
2232
2233 if (utask->depth >= MAX_URETPROBE_DEPTH) {
2234 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
2235 " nestedness limit pid/tgid=%d/%d\n",
2236 current->pid, current->tgid);
2237 goto free;
2238 }
2239
2240 trampoline_vaddr = uprobe_get_trampoline_vaddr();
2241 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
2242 if (orig_ret_vaddr == -1)
2243 goto free;
2244
2245 /* drop the entries invalidated by longjmp() */
2246 chained = (orig_ret_vaddr == trampoline_vaddr);
2247 cleanup_return_instances(utask, chained, regs);
2248
2249 /*
2250 * We don't want to keep trampoline address in stack, rather keep the
2251 * original return address of first caller thru all the consequent
2252 * instances. This also makes breakpoint unwrapping easier.
2253 */
2254 if (chained) {
2255 if (!utask->return_instances) {
2256 /*
2257 * This situation is not possible. Likely we have an
2258 * attack from user-space.
2259 */
2260 uprobe_warn(current, "handle tail call");
2261 goto free;
2262 }
2263 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
2264 }
2265
2266 /* __srcu_read_lock() because SRCU lock survives switch to user space */
2267 srcu_idx = __srcu_read_lock(&uretprobes_srcu);
2268
2269 ri->func = instruction_pointer(regs);
2270 ri->stack = user_stack_pointer(regs);
2271 ri->orig_ret_vaddr = orig_ret_vaddr;
2272 ri->chained = chained;
2273
2274 utask->depth++;
2275
2276 hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx);
2277 ri->next = utask->return_instances;
2278 rcu_assign_pointer(utask->return_instances, ri);
2279
2280 mod_timer(&utask->ri_timer, jiffies + RI_TIMER_PERIOD);
2281
2282 return;
2283 free:
2284 ri_free(ri);
2285 }
2286
2287 /* Prepare to single-step probed instruction out of line. */
2288 static int
pre_ssout(struct uprobe * uprobe,struct pt_regs * regs,unsigned long bp_vaddr)2289 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
2290 {
2291 struct uprobe_task *utask = current->utask;
2292 int err;
2293
2294 if (!try_get_uprobe(uprobe))
2295 return -EINVAL;
2296
2297 if (!xol_get_insn_slot(uprobe, utask)) {
2298 err = -ENOMEM;
2299 goto err_out;
2300 }
2301
2302 utask->vaddr = bp_vaddr;
2303 err = arch_uprobe_pre_xol(&uprobe->arch, regs);
2304 if (unlikely(err)) {
2305 xol_free_insn_slot(utask);
2306 goto err_out;
2307 }
2308
2309 utask->active_uprobe = uprobe;
2310 utask->state = UTASK_SSTEP;
2311 return 0;
2312 err_out:
2313 put_uprobe(uprobe);
2314 return err;
2315 }
2316
2317 /*
2318 * If we are singlestepping, then ensure this thread is not connected to
2319 * non-fatal signals until completion of singlestep. When xol insn itself
2320 * triggers the signal, restart the original insn even if the task is
2321 * already SIGKILL'ed (since coredump should report the correct ip). This
2322 * is even more important if the task has a handler for SIGSEGV/etc, The
2323 * _same_ instruction should be repeated again after return from the signal
2324 * handler, and SSTEP can never finish in this case.
2325 */
uprobe_deny_signal(void)2326 bool uprobe_deny_signal(void)
2327 {
2328 struct task_struct *t = current;
2329 struct uprobe_task *utask = t->utask;
2330
2331 if (likely(!utask || !utask->active_uprobe))
2332 return false;
2333
2334 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
2335
2336 if (task_sigpending(t)) {
2337 utask->signal_denied = true;
2338 clear_tsk_thread_flag(t, TIF_SIGPENDING);
2339
2340 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
2341 utask->state = UTASK_SSTEP_TRAPPED;
2342 set_tsk_thread_flag(t, TIF_UPROBE);
2343 }
2344 }
2345
2346 return true;
2347 }
2348
mmf_recalc_uprobes(struct mm_struct * mm)2349 static void mmf_recalc_uprobes(struct mm_struct *mm)
2350 {
2351 VMA_ITERATOR(vmi, mm, 0);
2352 struct vm_area_struct *vma;
2353
2354 for_each_vma(vmi, vma) {
2355 if (!valid_vma(vma, false))
2356 continue;
2357 /*
2358 * This is not strictly accurate, we can race with
2359 * uprobe_unregister() and see the already removed
2360 * uprobe if delete_uprobe() was not yet called.
2361 * Or this uprobe can be filtered out.
2362 */
2363 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2364 return;
2365 }
2366
2367 clear_bit(MMF_HAS_UPROBES, &mm->flags);
2368 }
2369
is_trap_at_addr(struct mm_struct * mm,unsigned long vaddr)2370 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2371 {
2372 struct page *page;
2373 uprobe_opcode_t opcode;
2374 int result;
2375
2376 if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
2377 return -EINVAL;
2378
2379 pagefault_disable();
2380 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2381 pagefault_enable();
2382
2383 if (likely(result == 0))
2384 goto out;
2385
2386 result = get_user_pages(vaddr, 1, FOLL_FORCE, &page);
2387 if (result < 0)
2388 return result;
2389
2390 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2391 put_page(page);
2392 out:
2393 /* This needs to return true for any variant of the trap insn */
2394 return is_trap_insn(&opcode);
2395 }
2396
find_active_uprobe_speculative(unsigned long bp_vaddr)2397 static struct uprobe *find_active_uprobe_speculative(unsigned long bp_vaddr)
2398 {
2399 struct mm_struct *mm = current->mm;
2400 struct uprobe *uprobe = NULL;
2401 struct vm_area_struct *vma;
2402 struct file *vm_file;
2403 loff_t offset;
2404 unsigned int seq;
2405
2406 guard(rcu)();
2407
2408 if (!mmap_lock_speculate_try_begin(mm, &seq))
2409 return NULL;
2410
2411 vma = vma_lookup(mm, bp_vaddr);
2412 if (!vma)
2413 return NULL;
2414
2415 /*
2416 * vm_file memory can be reused for another instance of struct file,
2417 * but can't be freed from under us, so it's safe to read fields from
2418 * it, even if the values are some garbage values; ultimately
2419 * find_uprobe_rcu() + mmap_lock_speculation_end() check will ensure
2420 * that whatever we speculatively found is correct
2421 */
2422 vm_file = READ_ONCE(vma->vm_file);
2423 if (!vm_file)
2424 return NULL;
2425
2426 offset = (loff_t)(vma->vm_pgoff << PAGE_SHIFT) + (bp_vaddr - vma->vm_start);
2427 uprobe = find_uprobe_rcu(vm_file->f_inode, offset);
2428 if (!uprobe)
2429 return NULL;
2430
2431 /* now double check that nothing about MM changed */
2432 if (mmap_lock_speculate_retry(mm, seq))
2433 return NULL;
2434
2435 return uprobe;
2436 }
2437
2438 /* assumes being inside RCU protected region */
find_active_uprobe_rcu(unsigned long bp_vaddr,int * is_swbp)2439 static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp)
2440 {
2441 struct mm_struct *mm = current->mm;
2442 struct uprobe *uprobe = NULL;
2443 struct vm_area_struct *vma;
2444
2445 uprobe = find_active_uprobe_speculative(bp_vaddr);
2446 if (uprobe)
2447 return uprobe;
2448
2449 mmap_read_lock(mm);
2450 vma = vma_lookup(mm, bp_vaddr);
2451 if (vma) {
2452 if (vma->vm_file) {
2453 struct inode *inode = file_inode(vma->vm_file);
2454 loff_t offset = vaddr_to_offset(vma, bp_vaddr);
2455
2456 uprobe = find_uprobe_rcu(inode, offset);
2457 }
2458
2459 if (!uprobe)
2460 *is_swbp = is_trap_at_addr(mm, bp_vaddr);
2461 } else {
2462 *is_swbp = -EFAULT;
2463 }
2464
2465 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2466 mmf_recalc_uprobes(mm);
2467 mmap_read_unlock(mm);
2468
2469 return uprobe;
2470 }
2471
push_consumer(struct return_instance * ri,__u64 id,__u64 cookie)2472 static struct return_instance *push_consumer(struct return_instance *ri, __u64 id, __u64 cookie)
2473 {
2474 struct return_consumer *ric;
2475
2476 if (unlikely(ri == ZERO_SIZE_PTR))
2477 return ri;
2478
2479 if (unlikely(ri->cons_cnt > 0)) {
2480 ric = krealloc(ri->extra_consumers, sizeof(*ric) * ri->cons_cnt, GFP_KERNEL);
2481 if (!ric) {
2482 ri_free(ri);
2483 return ZERO_SIZE_PTR;
2484 }
2485 ri->extra_consumers = ric;
2486 }
2487
2488 ric = likely(ri->cons_cnt == 0) ? &ri->consumer : &ri->extra_consumers[ri->cons_cnt - 1];
2489 ric->id = id;
2490 ric->cookie = cookie;
2491
2492 ri->cons_cnt++;
2493 return ri;
2494 }
2495
2496 static struct return_consumer *
return_consumer_find(struct return_instance * ri,int * iter,int id)2497 return_consumer_find(struct return_instance *ri, int *iter, int id)
2498 {
2499 struct return_consumer *ric;
2500 int idx;
2501
2502 for (idx = *iter; idx < ri->cons_cnt; idx++)
2503 {
2504 ric = likely(idx == 0) ? &ri->consumer : &ri->extra_consumers[idx - 1];
2505 if (ric->id == id) {
2506 *iter = idx + 1;
2507 return ric;
2508 }
2509 }
2510
2511 return NULL;
2512 }
2513
ignore_ret_handler(int rc)2514 static bool ignore_ret_handler(int rc)
2515 {
2516 return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE;
2517 }
2518
handler_chain(struct uprobe * uprobe,struct pt_regs * regs)2519 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2520 {
2521 struct uprobe_consumer *uc;
2522 bool has_consumers = false, remove = true;
2523 struct return_instance *ri = NULL;
2524 struct uprobe_task *utask = current->utask;
2525
2526 utask->auprobe = &uprobe->arch;
2527
2528 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
2529 bool session = uc->handler && uc->ret_handler;
2530 __u64 cookie = 0;
2531 int rc = 0;
2532
2533 if (uc->handler) {
2534 rc = uc->handler(uc, regs, &cookie);
2535 WARN(rc < 0 || rc > 2,
2536 "bad rc=0x%x from %ps()\n", rc, uc->handler);
2537 }
2538
2539 remove &= rc == UPROBE_HANDLER_REMOVE;
2540 has_consumers = true;
2541
2542 if (!uc->ret_handler || ignore_ret_handler(rc))
2543 continue;
2544
2545 if (!ri)
2546 ri = alloc_return_instance(utask);
2547
2548 if (session)
2549 ri = push_consumer(ri, uc->id, cookie);
2550 }
2551 utask->auprobe = NULL;
2552
2553 if (!ZERO_OR_NULL_PTR(ri))
2554 prepare_uretprobe(uprobe, regs, ri);
2555
2556 if (remove && has_consumers) {
2557 down_read(&uprobe->register_rwsem);
2558
2559 /* re-check that removal is still required, this time under lock */
2560 if (!filter_chain(uprobe, current->mm)) {
2561 WARN_ON(!uprobe_is_active(uprobe));
2562 unapply_uprobe(uprobe, current->mm);
2563 }
2564
2565 up_read(&uprobe->register_rwsem);
2566 }
2567 }
2568
2569 static void
handle_uretprobe_chain(struct return_instance * ri,struct uprobe * uprobe,struct pt_regs * regs)2570 handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs)
2571 {
2572 struct return_consumer *ric;
2573 struct uprobe_consumer *uc;
2574 int ric_idx = 0;
2575
2576 /* all consumers unsubscribed meanwhile */
2577 if (unlikely(!uprobe))
2578 return;
2579
2580 rcu_read_lock_trace();
2581 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
2582 bool session = uc->handler && uc->ret_handler;
2583
2584 if (uc->ret_handler) {
2585 ric = return_consumer_find(ri, &ric_idx, uc->id);
2586 if (!session || ric)
2587 uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
2588 }
2589 }
2590 rcu_read_unlock_trace();
2591 }
2592
find_next_ret_chain(struct return_instance * ri)2593 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2594 {
2595 bool chained;
2596
2597 do {
2598 chained = ri->chained;
2599 ri = ri->next; /* can't be NULL if chained */
2600 } while (chained);
2601
2602 return ri;
2603 }
2604
uprobe_handle_trampoline(struct pt_regs * regs)2605 void uprobe_handle_trampoline(struct pt_regs *regs)
2606 {
2607 struct uprobe_task *utask;
2608 struct return_instance *ri, *ri_next, *next_chain;
2609 struct uprobe *uprobe;
2610 enum hprobe_state hstate;
2611 bool valid;
2612
2613 utask = current->utask;
2614 if (!utask)
2615 goto sigill;
2616
2617 ri = utask->return_instances;
2618 if (!ri)
2619 goto sigill;
2620
2621 do {
2622 /*
2623 * We should throw out the frames invalidated by longjmp().
2624 * If this chain is valid, then the next one should be alive
2625 * or NULL; the latter case means that nobody but ri->func
2626 * could hit this trampoline on return. TODO: sigaltstack().
2627 */
2628 next_chain = find_next_ret_chain(ri);
2629 valid = !next_chain || arch_uretprobe_is_alive(next_chain, RP_CHECK_RET, regs);
2630
2631 instruction_pointer_set(regs, ri->orig_ret_vaddr);
2632 do {
2633 /* pop current instance from the stack of pending return instances,
2634 * as it's not pending anymore: we just fixed up original
2635 * instruction pointer in regs and are about to call handlers;
2636 * this allows fixup_uretprobe_trampoline_entries() to properly fix up
2637 * captured stack traces from uretprobe handlers, in which pending
2638 * trampoline addresses on the stack are replaced with correct
2639 * original return addresses
2640 */
2641 ri_next = ri->next;
2642 rcu_assign_pointer(utask->return_instances, ri_next);
2643 utask->depth--;
2644
2645 uprobe = hprobe_consume(&ri->hprobe, &hstate);
2646 if (valid)
2647 handle_uretprobe_chain(ri, uprobe, regs);
2648 hprobe_finalize(&ri->hprobe, hstate);
2649
2650 /* We already took care of hprobe, no need to waste more time on that. */
2651 free_ret_instance(utask, ri, false /* !cleanup_hprobe */);
2652 ri = ri_next;
2653 } while (ri != next_chain);
2654 } while (!valid);
2655
2656 return;
2657
2658 sigill:
2659 uprobe_warn(current, "handle uretprobe, sending SIGILL.");
2660 force_sig(SIGILL);
2661 }
2662
arch_uprobe_ignore(struct arch_uprobe * aup,struct pt_regs * regs)2663 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2664 {
2665 return false;
2666 }
2667
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)2668 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2669 struct pt_regs *regs)
2670 {
2671 return true;
2672 }
2673
2674 /*
2675 * Run handler and ask thread to singlestep.
2676 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2677 */
handle_swbp(struct pt_regs * regs)2678 static void handle_swbp(struct pt_regs *regs)
2679 {
2680 struct uprobe *uprobe;
2681 unsigned long bp_vaddr;
2682 int is_swbp;
2683
2684 bp_vaddr = uprobe_get_swbp_addr(regs);
2685 if (bp_vaddr == uprobe_get_trampoline_vaddr())
2686 return uprobe_handle_trampoline(regs);
2687
2688 rcu_read_lock_trace();
2689
2690 uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
2691 if (!uprobe) {
2692 if (is_swbp > 0) {
2693 /* No matching uprobe; signal SIGTRAP. */
2694 force_sig(SIGTRAP);
2695 } else {
2696 /*
2697 * Either we raced with uprobe_unregister() or we can't
2698 * access this memory. The latter is only possible if
2699 * another thread plays with our ->mm. In both cases
2700 * we can simply restart. If this vma was unmapped we
2701 * can pretend this insn was not executed yet and get
2702 * the (correct) SIGSEGV after restart.
2703 */
2704 instruction_pointer_set(regs, bp_vaddr);
2705 }
2706 goto out;
2707 }
2708
2709 /* change it in advance for ->handler() and restart */
2710 instruction_pointer_set(regs, bp_vaddr);
2711
2712 /*
2713 * TODO: move copy_insn/etc into _register and remove this hack.
2714 * After we hit the bp, _unregister + _register can install the
2715 * new and not-yet-analyzed uprobe at the same address, restart.
2716 */
2717 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2718 goto out;
2719
2720 /*
2721 * Pairs with the smp_wmb() in prepare_uprobe().
2722 *
2723 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2724 * we must also see the stores to &uprobe->arch performed by the
2725 * prepare_uprobe() call.
2726 */
2727 smp_rmb();
2728
2729 /* Tracing handlers use ->utask to communicate with fetch methods */
2730 if (!get_utask())
2731 goto out;
2732
2733 if (arch_uprobe_ignore(&uprobe->arch, regs))
2734 goto out;
2735
2736 handler_chain(uprobe, regs);
2737
2738 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
2739 goto out;
2740
2741 if (pre_ssout(uprobe, regs, bp_vaddr))
2742 goto out;
2743
2744 out:
2745 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2746 rcu_read_unlock_trace();
2747 }
2748
2749 /*
2750 * Perform required fix-ups and disable singlestep.
2751 * Allow pending signals to take effect.
2752 */
handle_singlestep(struct uprobe_task * utask,struct pt_regs * regs)2753 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2754 {
2755 struct uprobe *uprobe;
2756 int err = 0;
2757
2758 uprobe = utask->active_uprobe;
2759 if (utask->state == UTASK_SSTEP_ACK)
2760 err = arch_uprobe_post_xol(&uprobe->arch, regs);
2761 else if (utask->state == UTASK_SSTEP_TRAPPED)
2762 arch_uprobe_abort_xol(&uprobe->arch, regs);
2763 else
2764 WARN_ON_ONCE(1);
2765
2766 put_uprobe(uprobe);
2767 utask->active_uprobe = NULL;
2768 utask->state = UTASK_RUNNING;
2769 xol_free_insn_slot(utask);
2770
2771 if (utask->signal_denied) {
2772 set_thread_flag(TIF_SIGPENDING);
2773 utask->signal_denied = false;
2774 }
2775
2776 if (unlikely(err)) {
2777 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
2778 force_sig(SIGILL);
2779 }
2780 }
2781
2782 /*
2783 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2784 * allows the thread to return from interrupt. After that handle_swbp()
2785 * sets utask->active_uprobe.
2786 *
2787 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2788 * and allows the thread to return from interrupt.
2789 *
2790 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2791 * uprobe_notify_resume().
2792 */
uprobe_notify_resume(struct pt_regs * regs)2793 void uprobe_notify_resume(struct pt_regs *regs)
2794 {
2795 struct uprobe_task *utask;
2796
2797 clear_thread_flag(TIF_UPROBE);
2798
2799 utask = current->utask;
2800 if (utask && utask->active_uprobe)
2801 handle_singlestep(utask, regs);
2802 else
2803 handle_swbp(regs);
2804 }
2805
2806 /*
2807 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2808 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2809 */
uprobe_pre_sstep_notifier(struct pt_regs * regs)2810 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2811 {
2812 if (!current->mm)
2813 return 0;
2814
2815 if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) &&
2816 (!current->utask || !current->utask->return_instances))
2817 return 0;
2818
2819 set_thread_flag(TIF_UPROBE);
2820 return 1;
2821 }
2822
2823 /*
2824 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2825 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2826 */
uprobe_post_sstep_notifier(struct pt_regs * regs)2827 int uprobe_post_sstep_notifier(struct pt_regs *regs)
2828 {
2829 struct uprobe_task *utask = current->utask;
2830
2831 if (!current->mm || !utask || !utask->active_uprobe)
2832 /* task is currently not uprobed */
2833 return 0;
2834
2835 utask->state = UTASK_SSTEP_ACK;
2836 set_thread_flag(TIF_UPROBE);
2837 return 1;
2838 }
2839
2840 static struct notifier_block uprobe_exception_nb = {
2841 .notifier_call = arch_uprobe_exception_notify,
2842 .priority = INT_MAX-1, /* notified after kprobes, kgdb */
2843 };
2844
uprobes_init(void)2845 void __init uprobes_init(void)
2846 {
2847 int i;
2848
2849 for (i = 0; i < UPROBES_HASH_SZ; i++)
2850 mutex_init(&uprobes_mmap_mutex[i]);
2851
2852 BUG_ON(register_die_notifier(&uprobe_exception_nb));
2853 }
2854