Lines Matching refs:uprobe
61 struct uprobe { struct
92 struct uprobe *uprobe; member
296 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) in delayed_uprobe_check() argument
301 if (du->uprobe == uprobe && du->mm == mm) in delayed_uprobe_check()
306 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) in delayed_uprobe_add() argument
310 if (delayed_uprobe_check(uprobe, mm)) in delayed_uprobe_add()
317 du->uprobe = uprobe; in delayed_uprobe_add()
331 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) in delayed_uprobe_remove() argument
336 if (!uprobe && !mm) in delayed_uprobe_remove()
342 if (uprobe && du->uprobe != uprobe) in delayed_uprobe_remove()
351 static bool valid_ref_ctr_vma(struct uprobe *uprobe, in valid_ref_ctr_vma() argument
354 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); in valid_ref_ctr_vma()
356 return uprobe->ref_ctr_offset && in valid_ref_ctr_vma()
358 file_inode(vma->vm_file) == uprobe->inode && in valid_ref_ctr_vma()
365 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) in find_ref_ctr_vma() argument
371 if (valid_ref_ctr_vma(uprobe, tmp)) in find_ref_ctr_vma()
416 static void update_ref_ctr_warn(struct uprobe *uprobe, in update_ref_ctr_warn() argument
421 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, in update_ref_ctr_warn()
422 (unsigned long long) uprobe->offset, in update_ref_ctr_warn()
423 (unsigned long long) uprobe->ref_ctr_offset, mm); in update_ref_ctr_warn()
426 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, in update_ref_ctr() argument
433 rc_vma = find_ref_ctr_vma(uprobe, mm); in update_ref_ctr()
436 rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); in update_ref_ctr()
439 update_ref_ctr_warn(uprobe, mm, d); in update_ref_ctr()
447 ret = delayed_uprobe_add(uprobe, mm); in update_ref_ctr()
449 delayed_uprobe_remove(uprobe, mm); in update_ref_ctr()
476 struct uprobe *uprobe; in uprobe_write_opcode() local
484 uprobe = container_of(auprobe, struct uprobe, arch); in uprobe_write_opcode()
510 if (!ref_ctr_updated && uprobe->ref_ctr_offset) { in uprobe_write_opcode()
511 ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); in uprobe_write_opcode()
570 update_ref_ctr(uprobe, mm, -1); in uprobe_write_opcode()
610 static struct uprobe *get_uprobe(struct uprobe *uprobe) in get_uprobe() argument
612 refcount_inc(&uprobe->ref); in get_uprobe()
613 return uprobe; in get_uprobe()
622 static struct uprobe *try_get_uprobe(struct uprobe *uprobe) in try_get_uprobe() argument
624 if (refcount_inc_not_zero(&uprobe->ref)) in try_get_uprobe()
625 return uprobe; in try_get_uprobe()
629 static inline bool uprobe_is_active(struct uprobe *uprobe) in uprobe_is_active() argument
631 return !RB_EMPTY_NODE(&uprobe->rb_node); in uprobe_is_active()
636 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); in uprobe_free_rcu_tasks_trace() local
638 kfree(uprobe); in uprobe_free_rcu_tasks_trace()
643 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); in uprobe_free_srcu() local
645 call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace); in uprobe_free_srcu()
650 struct uprobe *uprobe = container_of(work, struct uprobe, work); in uprobe_free_deferred() local
654 if (uprobe_is_active(uprobe)) { in uprobe_free_deferred()
656 rb_erase(&uprobe->rb_node, &uprobes_tree); in uprobe_free_deferred()
668 delayed_uprobe_remove(uprobe, NULL); in uprobe_free_deferred()
672 call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu); in uprobe_free_deferred()
675 static void put_uprobe(struct uprobe *uprobe) in put_uprobe() argument
677 if (!refcount_dec_and_test(&uprobe->ref)) in put_uprobe()
680 INIT_WORK(&uprobe->work, uprobe_free_deferred); in put_uprobe()
681 schedule_work(&uprobe->work); in put_uprobe()
685 static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx) in hprobe_init_leased() argument
687 WARN_ON(!uprobe); in hprobe_init_leased()
689 hprobe->uprobe = uprobe; in hprobe_init_leased()
694 static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe) in hprobe_init_stable() argument
696 hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE; in hprobe_init_stable()
697 hprobe->uprobe = uprobe; in hprobe_init_stable()
710 static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate) in hprobe_consume()
716 return hprobe->uprobe; in hprobe_consume()
738 put_uprobe(hprobe->uprobe); in hprobe_finalize()
765 static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get) in hprobe_expire()
783 return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe; in hprobe_expire()
797 struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe); in hprobe_expire() local
805 if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) { in hprobe_expire()
808 return get ? get_uprobe(uprobe) : uprobe; in hprobe_expire()
815 if (uprobe && !get) in hprobe_expire()
816 put_uprobe(uprobe); in hprobe_expire()
825 return uprobe; in hprobe_expire()
835 const struct uprobe *r) in uprobe_cmp()
853 rb_entry((node), struct uprobe, rb_node)
868 struct uprobe *u = __node_2_uprobe(a); in __uprobe_cmp()
876 static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset) in find_uprobe_rcu()
920 static struct uprobe *__insert_uprobe(struct uprobe *uprobe) in __insert_uprobe() argument
924 node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp); in __insert_uprobe()
926 struct uprobe *u = __node_2_uprobe(node); in __insert_uprobe()
937 return uprobe; in __insert_uprobe()
944 static struct uprobe *insert_uprobe(struct uprobe *uprobe) in insert_uprobe() argument
946 struct uprobe *u; in insert_uprobe()
950 u = __insert_uprobe(uprobe); in insert_uprobe()
958 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) in ref_ctr_mismatch_warn() argument
962 uprobe->inode->i_ino, (unsigned long long) uprobe->offset, in ref_ctr_mismatch_warn()
964 (unsigned long long) uprobe->ref_ctr_offset); in ref_ctr_mismatch_warn()
967 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, in alloc_uprobe()
970 struct uprobe *uprobe, *cur_uprobe; in alloc_uprobe() local
972 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); in alloc_uprobe()
973 if (!uprobe) in alloc_uprobe()
976 uprobe->inode = inode; in alloc_uprobe()
977 uprobe->offset = offset; in alloc_uprobe()
978 uprobe->ref_ctr_offset = ref_ctr_offset; in alloc_uprobe()
979 INIT_LIST_HEAD(&uprobe->consumers); in alloc_uprobe()
980 init_rwsem(&uprobe->register_rwsem); in alloc_uprobe()
981 init_rwsem(&uprobe->consumer_rwsem); in alloc_uprobe()
982 RB_CLEAR_NODE(&uprobe->rb_node); in alloc_uprobe()
983 refcount_set(&uprobe->ref, 1); in alloc_uprobe()
986 cur_uprobe = insert_uprobe(uprobe); in alloc_uprobe()
988 if (cur_uprobe != uprobe) { in alloc_uprobe()
989 if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { in alloc_uprobe()
990 ref_ctr_mismatch_warn(cur_uprobe, uprobe); in alloc_uprobe()
992 kfree(uprobe); in alloc_uprobe()
995 kfree(uprobe); in alloc_uprobe()
996 uprobe = cur_uprobe; in alloc_uprobe()
999 return uprobe; in alloc_uprobe()
1002 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) in consumer_add() argument
1006 down_write(&uprobe->consumer_rwsem); in consumer_add()
1007 list_add_rcu(&uc->cons_node, &uprobe->consumers); in consumer_add()
1009 up_write(&uprobe->consumer_rwsem); in consumer_add()
1016 static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) in consumer_del() argument
1018 down_write(&uprobe->consumer_rwsem); in consumer_del()
1020 up_write(&uprobe->consumer_rwsem); in consumer_del()
1045 static int copy_insn(struct uprobe *uprobe, struct file *filp) in copy_insn() argument
1047 struct address_space *mapping = uprobe->inode->i_mapping; in copy_insn()
1048 loff_t offs = uprobe->offset; in copy_insn()
1049 void *insn = &uprobe->arch.insn; in copy_insn()
1050 int size = sizeof(uprobe->arch.insn); in copy_insn()
1055 if (offs >= i_size_read(uprobe->inode)) in copy_insn()
1071 static int prepare_uprobe(struct uprobe *uprobe, struct file *file, in prepare_uprobe() argument
1076 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) in prepare_uprobe()
1080 down_write(&uprobe->consumer_rwsem); in prepare_uprobe()
1081 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) in prepare_uprobe()
1084 ret = copy_insn(uprobe, file); in prepare_uprobe()
1089 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) in prepare_uprobe()
1092 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); in prepare_uprobe()
1097 set_bit(UPROBE_COPY_INSN, &uprobe->flags); in prepare_uprobe()
1100 up_write(&uprobe->consumer_rwsem); in prepare_uprobe()
1110 static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm) in filter_chain() argument
1115 down_read(&uprobe->consumer_rwsem); in filter_chain()
1116 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { in filter_chain()
1121 up_read(&uprobe->consumer_rwsem); in filter_chain()
1127 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, in install_breakpoint() argument
1133 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); in install_breakpoint()
1145 ret = set_swbp(&uprobe->arch, mm, vaddr); in install_breakpoint()
1155 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) in remove_breakpoint() argument
1158 return set_orig_insn(&uprobe->arch, mm, vaddr); in remove_breakpoint()
1245 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) in register_for_each_vma() argument
1252 info = build_map_info(uprobe->inode->i_mapping, in register_for_each_vma()
1253 uprobe->offset, is_register); in register_for_each_vma()
1278 file_inode(vma->vm_file) != uprobe->inode) in register_for_each_vma()
1282 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) in register_for_each_vma()
1288 err = install_breakpoint(uprobe, mm, vma, info->vaddr); in register_for_each_vma()
1290 if (!filter_chain(uprobe, mm)) in register_for_each_vma()
1291 err |= remove_breakpoint(uprobe, mm, info->vaddr); in register_for_each_vma()
1310 void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) in uprobe_unregister_nosync() argument
1314 down_write(&uprobe->register_rwsem); in uprobe_unregister_nosync()
1315 consumer_del(uprobe, uc); in uprobe_unregister_nosync()
1316 err = register_for_each_vma(uprobe, NULL); in uprobe_unregister_nosync()
1317 up_write(&uprobe->register_rwsem); in uprobe_unregister_nosync()
1325 put_uprobe(uprobe); in uprobe_unregister_nosync()
1363 struct uprobe *uprobe_register(struct inode *inode, in uprobe_register()
1367 struct uprobe *uprobe; in uprobe_register() local
1391 uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); in uprobe_register()
1392 if (IS_ERR(uprobe)) in uprobe_register()
1393 return uprobe; in uprobe_register()
1395 down_write(&uprobe->register_rwsem); in uprobe_register()
1396 consumer_add(uprobe, uc); in uprobe_register()
1397 ret = register_for_each_vma(uprobe, uc); in uprobe_register()
1398 up_write(&uprobe->register_rwsem); in uprobe_register()
1401 uprobe_unregister_nosync(uprobe, uc); in uprobe_register()
1411 return uprobe; in uprobe_register()
1422 int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add) in uprobe_apply() argument
1427 down_write(&uprobe->register_rwsem); in uprobe_apply()
1430 list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { in uprobe_apply()
1432 ret = register_for_each_vma(uprobe, add ? uc : NULL); in uprobe_apply()
1438 up_write(&uprobe->register_rwsem); in uprobe_apply()
1443 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) in unapply_uprobe() argument
1455 file_inode(vma->vm_file) != uprobe->inode) in unapply_uprobe()
1459 if (uprobe->offset < offset || in unapply_uprobe()
1460 uprobe->offset >= offset + vma->vm_end - vma->vm_start) in unapply_uprobe()
1463 vaddr = offset_to_vaddr(vma, uprobe->offset); in unapply_uprobe()
1464 err |= remove_breakpoint(uprobe, mm, vaddr); in unapply_uprobe()
1477 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); in find_node_in_range()
1506 struct uprobe *u; in build_probe_list()
1516 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
1524 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
1548 !valid_ref_ctr_vma(du->uprobe, vma)) in delayed_ref_ctr_inc()
1551 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); in delayed_ref_ctr_inc()
1554 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); in delayed_ref_ctr_inc()
1573 struct uprobe *uprobe, *u; in uprobe_mmap() local
1598 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { in uprobe_mmap()
1600 filter_chain(uprobe, vma->vm_mm)) { in uprobe_mmap()
1601 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); in uprobe_mmap()
1602 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); in uprobe_mmap()
1604 put_uprobe(uprobe); in uprobe_mmap()
1831 static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask) in xol_get_insn_slot() argument
1843 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); in xol_get_insn_slot()
2084 struct uprobe *uprobe; in dup_utask() local
2101 uprobe = hprobe_expire(&o->hprobe, true); in dup_utask()
2109 hprobe_init_stable(&n->hprobe, uprobe); in dup_utask()
2199 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, in prepare_uretprobe() argument
2253 hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx); in prepare_uretprobe()
2266 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) in pre_ssout() argument
2271 if (!try_get_uprobe(uprobe)) in pre_ssout()
2274 if (!xol_get_insn_slot(uprobe, utask)) { in pre_ssout()
2280 err = arch_uprobe_pre_xol(&uprobe->arch, regs); in pre_ssout()
2286 utask->active_uprobe = uprobe; in pre_ssout()
2290 put_uprobe(uprobe); in pre_ssout()
2375 static struct uprobe *find_active_uprobe_speculative(unsigned long bp_vaddr) in find_active_uprobe_speculative()
2378 struct uprobe *uprobe = NULL; in find_active_uprobe_speculative() local
2405 uprobe = find_uprobe_rcu(vm_file->f_inode, offset); in find_active_uprobe_speculative()
2406 if (!uprobe) in find_active_uprobe_speculative()
2413 return uprobe; in find_active_uprobe_speculative()
2417 static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp) in find_active_uprobe_rcu()
2420 struct uprobe *uprobe = NULL; in find_active_uprobe_rcu() local
2423 uprobe = find_active_uprobe_speculative(bp_vaddr); in find_active_uprobe_rcu()
2424 if (uprobe) in find_active_uprobe_rcu()
2425 return uprobe; in find_active_uprobe_rcu()
2434 uprobe = find_uprobe_rcu(inode, offset); in find_active_uprobe_rcu()
2437 if (!uprobe) in find_active_uprobe_rcu()
2443 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) in find_active_uprobe_rcu()
2447 return uprobe; in find_active_uprobe_rcu()
2497 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) in handler_chain() argument
2504 utask->auprobe = &uprobe->arch; in handler_chain()
2506 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { in handler_chain()
2532 prepare_uretprobe(uprobe, regs, ri); in handler_chain()
2535 down_read(&uprobe->register_rwsem); in handler_chain()
2538 if (!filter_chain(uprobe, current->mm)) { in handler_chain()
2539 WARN_ON(!uprobe_is_active(uprobe)); in handler_chain()
2540 unapply_uprobe(uprobe, current->mm); in handler_chain()
2543 up_read(&uprobe->register_rwsem); in handler_chain()
2548 handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs) in handle_uretprobe_chain() argument
2555 if (unlikely(!uprobe)) in handle_uretprobe_chain()
2559 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { in handle_uretprobe_chain()
2587 struct uprobe *uprobe; in uprobe_handle_trampoline() local
2623 uprobe = hprobe_consume(&ri->hprobe, &hstate); in uprobe_handle_trampoline()
2625 handle_uretprobe_chain(ri, uprobe, regs); in uprobe_handle_trampoline()
2658 struct uprobe *uprobe; in handle_swbp() local
2668 uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp); in handle_swbp()
2669 if (!uprobe) { in handle_swbp()
2695 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) in handle_swbp()
2711 if (arch_uprobe_ignore(&uprobe->arch, regs)) in handle_swbp()
2714 handler_chain(uprobe, regs); in handle_swbp()
2716 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) in handle_swbp()
2719 if (pre_ssout(uprobe, regs, bp_vaddr)) in handle_swbp()
2733 struct uprobe *uprobe; in handle_singlestep() local
2736 uprobe = utask->active_uprobe; in handle_singlestep()
2738 err = arch_uprobe_post_xol(&uprobe->arch, regs); in handle_singlestep()
2740 arch_uprobe_abort_xol(&uprobe->arch, regs); in handle_singlestep()
2744 put_uprobe(uprobe); in handle_singlestep()