Lines Matching +full:area +full:- +full:color +full:- +full:enable
1 // SPDX-License-Identifier: GPL-2.0-only
9 * Documentation/dev-tools/kmemleak.rst.
12 * ----------------
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
21 * red black trees used to look-up metadata based on a pointer to the
26 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
33 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
117 /* scanning area inside a memory block */
125 #define KMEMLEAK_BLACK -1
130 * object->lock. Insertions or deletions from object_list, gray_list or
132 * the notes on locking above). These objects are reference-counted
173 /* flag set for per-CPU pointers */
193 /* the list of gray-colored objects (see color_gray comment below) */
227 /* minimum and maximum address that may be valid per-CPU pointers */
293 * with the object->lock held.
298 const u8 *ptr = (const u8 *)object->pointer; in hex_dump_object()
301 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS)) in hex_dump_object()
304 if (object->flags & OBJECT_PERCPU) in hex_dump_object()
305 ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer); in hex_dump_object()
308 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object()
310 if (object->flags & OBJECT_PERCPU) in hex_dump_object()
323 * - white - orphan object, not enough references to it (count < min_count)
324 * - gray - not orphan, not marked as false positive (min_count == 0) or
326 * - black - ignore, it doesn't contain references (e.g. text section)
327 * (min_count == -1). No function defined for this color.
328 * Newly created objects don't have any color assigned (object->count == -1)
333 return object->count != KMEMLEAK_BLACK && in color_white()
334 object->count < object->min_count; in color_white()
339 return object->min_count != KMEMLEAK_BLACK && in color_gray()
340 object->count >= object->min_count; in color_gray()
344 * Objects are considered unreferenced only if their color is white, they have
350 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && in unreferenced_object()
351 time_before_eq(object->jiffies + jiffies_min_age, in unreferenced_object()
357 * print_unreferenced function must be called with the object->lock held.
366 nr_entries = stack_depot_fetch(object->trace_handle, &entries); in print_unreferenced()
368 object->pointer, object->size); in print_unreferenced()
370 object->comm, object->pid, object->jiffies); in print_unreferenced()
372 warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum); in print_unreferenced()
383 * the object->lock held.
388 object->pointer, object->size); in dump_object_info()
390 object->comm, object->pid, object->jiffies); in dump_object_info()
391 pr_notice(" min_count = %d\n", object->min_count); in dump_object_info()
392 pr_notice(" count = %d\n", object->count); in dump_object_info()
393 pr_notice(" flags = 0x%x\n", object->flags); in dump_object_info()
394 pr_notice(" checksum = %u\n", object->checksum); in dump_object_info()
396 if (object->trace_handle) in dump_object_info()
397 stack_depot_print(object->trace_handle); in dump_object_info()
410 * Look-up a memory block metadata (kmemleak_object) in the object search
418 struct rb_node *rb = object_tree(objflags)->rb_node; in __lookup_object()
426 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); in __lookup_object()
429 rb = object->rb_node.rb_left; in __lookup_object()
430 else if (untagged_objp + object->size <= untagged_ptr) in __lookup_object()
431 rb = object->rb_node.rb_right; in __lookup_object()
444 /* Look-up a kmemleak object which allocated with virtual address. */
458 return atomic_inc_not_zero(&object->use_count); in get_object()
482 list_del(&object->object_list); in mem_pool_alloc()
484 object = &mem_pool[--mem_pool_free_count]; in mem_pool_alloc()
506 list_add(&object->object_list, &mem_pool_free_list); in mem_pool_free()
516 struct kmemleak_scan_area *area; in free_object_rcu() local
524 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu()
525 hlist_del(&area->node); in free_object_rcu()
526 kmem_cache_free(scan_area_cache, area); in free_object_rcu()
533 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
535 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
540 if (!atomic_dec_and_test(&object->use_count)) in put_object()
544 WARN_ON(object->flags & OBJECT_ALLOCATED); in put_object()
552 call_rcu(&object->rcu, free_object_rcu); in put_object()
554 free_object_rcu(&object->rcu); in put_object()
591 rb_erase(&object->rb_node, object_tree(object->flags)); in __remove_object()
592 if (!(object->del_state & DELSTATE_NO_DELETE)) in __remove_object()
593 list_del_rcu(&object->object_list); in __remove_object()
594 object->del_state |= DELSTATE_REMOVED; in __remove_object()
658 INIT_LIST_HEAD(&object->object_list); in __alloc_object()
659 INIT_LIST_HEAD(&object->gray_list); in __alloc_object()
660 INIT_HLIST_HEAD(&object->area_list); in __alloc_object()
661 raw_spin_lock_init(&object->lock); in __alloc_object()
662 atomic_set(&object->use_count, 1); in __alloc_object()
663 object->excess_ref = 0; in __alloc_object()
664 object->count = 0; /* white color initially */ in __alloc_object()
665 object->checksum = 0; in __alloc_object()
666 object->del_state = 0; in __alloc_object()
670 object->pid = 0; in __alloc_object()
671 strscpy(object->comm, "hardirq"); in __alloc_object()
673 object->pid = 0; in __alloc_object()
674 strscpy(object->comm, "softirq"); in __alloc_object()
676 object->pid = current->pid; in __alloc_object()
680 * dependency issues with current->alloc_lock. In the worst in __alloc_object()
683 strscpy(object->comm, current->comm); in __alloc_object()
687 object->trace_handle = set_track_prepare(); in __alloc_object()
701 object->flags = OBJECT_ALLOCATED | objflags; in __link_object()
702 object->pointer = ptr; in __link_object()
703 object->size = kfence_ksize((void *)ptr) ?: size; in __link_object()
704 object->min_count = min_count; in __link_object()
705 object->jiffies = jiffies; in __link_object()
710 * address. And update min_percpu_addr max_percpu_addr for per-CPU in __link_object()
720 link = &object_tree(objflags)->rb_node; in __link_object()
725 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer); in __link_object()
727 link = &parent->rb_node.rb_left; in __link_object()
728 else if (untagged_objp + parent->size <= untagged_ptr) in __link_object()
729 link = &parent->rb_node.rb_right; in __link_object()
734 * No need for parent->lock here since "parent" cannot in __link_object()
738 return -EEXIST; in __link_object()
741 rb_link_node(&object->rb_node, rb_parent, link); in __link_object()
742 rb_insert_color(&object->rb_node, object_tree(objflags)); in __link_object()
743 list_add_tail_rcu(&object->object_list, &object_list); in __link_object()
784 /* Create kmemleak object corresponding to a per-CPU allocation. */
798 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); in __delete_object()
799 WARN_ON(atomic_read(&object->use_count) < 1); in __delete_object()
805 raw_spin_lock_irqsave(&object->lock, flags); in __delete_object()
806 object->flags &= ~OBJECT_ALLOCATED; in __delete_object()
807 raw_spin_unlock_irqrestore(&object->lock, flags); in __delete_object()
864 start = object->pointer; in delete_object_part()
865 end = object->pointer + object->size; in delete_object_part()
867 !__link_object(object_l, start, ptr - start, in delete_object_part()
868 object->min_count, objflags)) in delete_object_part()
871 !__link_object(object_r, ptr + size, end - ptr - size, in delete_object_part()
872 object->min_count, objflags)) in delete_object_part()
887 static void __paint_it(struct kmemleak_object *object, int color) in __paint_it() argument
889 object->min_count = color; in __paint_it()
890 if (color == KMEMLEAK_BLACK) in __paint_it()
891 object->flags |= OBJECT_NO_SCAN; in __paint_it()
894 static void paint_it(struct kmemleak_object *object, int color) in paint_it() argument
898 raw_spin_lock_irqsave(&object->lock, flags); in paint_it()
899 __paint_it(object, color); in paint_it()
900 raw_spin_unlock_irqrestore(&object->lock, flags); in paint_it()
903 static void paint_ptr(unsigned long ptr, int color, unsigned int objflags) in paint_ptr() argument
909 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", in paint_ptr()
911 (color == KMEMLEAK_GREY) ? "Grey" : in paint_ptr()
912 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); in paint_ptr()
915 paint_it(object, color); in paint_ptr()
920 * Mark an object permanently as gray-colored so that it can no longer be
929 * Mark the object as black-colored so that it is ignored from scans and
953 raw_spin_lock_irqsave(&object->lock, flags); in reset_checksum()
954 object->checksum = 0; in reset_checksum()
955 raw_spin_unlock_irqrestore(&object->lock, flags); in reset_checksum()
960 * Add a scanning area to the object. If at least one such area is added,
967 struct kmemleak_scan_area *area = NULL; in add_scan_area() local
973 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", in add_scan_area()
979 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); in add_scan_area()
982 area = kmem_cache_alloc_noprof(scan_area_cache, in add_scan_area()
985 raw_spin_lock_irqsave(&object->lock, flags); in add_scan_area()
986 if (!area) { in add_scan_area()
987 pr_warn_once("Cannot allocate a scan area, scanning the full object\n"); in add_scan_area()
989 object->flags |= OBJECT_FULL_SCAN; in add_scan_area()
993 size = untagged_objp + object->size - untagged_ptr; in add_scan_area()
994 } else if (untagged_ptr + size > untagged_objp + object->size) { in add_scan_area()
995 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); in add_scan_area()
997 kmem_cache_free(scan_area_cache, area); in add_scan_area()
1001 INIT_HLIST_NODE(&area->node); in add_scan_area()
1002 area->start = ptr; in add_scan_area()
1003 area->size = size; in add_scan_area()
1005 hlist_add_head(&area->node, &object->area_list); in add_scan_area()
1007 raw_spin_unlock_irqrestore(&object->lock, flags); in add_scan_area()
1029 raw_spin_lock_irqsave(&object->lock, flags); in object_set_excess_ref()
1030 object->excess_ref = excess_ref; in object_set_excess_ref()
1031 raw_spin_unlock_irqrestore(&object->lock, flags); in object_set_excess_ref()
1051 raw_spin_lock_irqsave(&object->lock, flags); in object_no_scan()
1052 object->flags |= OBJECT_NO_SCAN; in object_no_scan()
1053 raw_spin_unlock_irqrestore(&object->lock, flags); in object_no_scan()
1058 * kmemleak_alloc - register a newly allocated object
1064 * the object is never reported as a leak. If @min_count is -1,
1082 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1101 * kmemleak_vmalloc - register a newly vmalloc'ed object
1102 * @area: pointer to vm_struct
1109 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp) in kmemleak_vmalloc() argument
1111 pr_debug("%s(0x%px, %zu)\n", __func__, area, size); in kmemleak_vmalloc()
1118 create_object((unsigned long)area->addr, size, 2, gfp); in kmemleak_vmalloc()
1119 object_set_excess_ref((unsigned long)area, in kmemleak_vmalloc()
1120 (unsigned long)area->addr); in kmemleak_vmalloc()
1126 * kmemleak_free - unregister a previously registered object
1142 * kmemleak_free_part - partially unregister a previously registered object
1160 * kmemleak_free_percpu - unregister a previously registered __percpu object
1176 * kmemleak_update_trace - update object allocation stack trace
1203 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_update_trace()
1204 object->trace_handle = trace_handle; in kmemleak_update_trace()
1205 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_update_trace()
1212 * kmemleak_not_leak - mark an allocated object as false positive
1228 * kmemleak_transient_leak - mark an allocated object as transient false positive
1233 * is part of a singly linked list and the ->next reference to it is changed.
1245 * kmemleak_ignore - ignore an allocated object
1263 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1265 * represents the start of the scan area
1266 * @size: size of the scan area
1283 * kmemleak_no_scan - do not scan an allocated object
1301 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1321 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1337 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1355 u32 old_csum = object->checksum; in update_checksum()
1357 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS)) in update_checksum()
1362 if (object->flags & OBJECT_PERCPU) { in update_checksum()
1365 object->checksum = 0; in update_checksum()
1367 void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu); in update_checksum()
1369 object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size); in update_checksum()
1372 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size); in update_checksum()
1377 return object->checksum != old_csum; in update_checksum()
1381 * Update an object's references. object->lock must be held by the caller.
1386 /* non-orphan, ignored or new */ in update_refs()
1393 * object's color will become gray and it will be added to the in update_refs()
1396 object->count++; in update_refs()
1400 list_add_tail(&object->gray_list, &gray_list); in update_refs()
1422 * object->use_count cannot be dropped to 0 while the object in pointer_update_refs()
1434 * Avoid the lockdep recursive warning on object->lock being in pointer_update_refs()
1438 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in pointer_update_refs()
1441 excess_ref = object->excess_ref; in pointer_update_refs()
1447 raw_spin_unlock(&object->lock); in pointer_update_refs()
1456 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in pointer_update_refs()
1458 raw_spin_unlock(&object->lock); in pointer_update_refs()
1475 if (current->mm) in scan_should_stop()
1492 unsigned long *end = _end - (BYTES_PER_POINTER - 1); in scan_block()
1531 * that object->use_count >= 1.
1535 struct kmemleak_scan_area *area; in scan_object() local
1539 * Once the object->lock is acquired, the corresponding memory block in scan_object()
1542 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1543 if (object->flags & OBJECT_NO_SCAN) in scan_object()
1545 if (!(object->flags & OBJECT_ALLOCATED)) in scan_object()
1549 if (object->flags & OBJECT_PERCPU) { in scan_object()
1553 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu); in scan_object()
1554 void *end = start + object->size; in scan_object()
1558 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1560 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1561 if (!(object->flags & OBJECT_ALLOCATED)) in scan_object()
1564 } else if (hlist_empty(&object->area_list) || in scan_object()
1565 object->flags & OBJECT_FULL_SCAN) { in scan_object()
1566 void *start = object->flags & OBJECT_PHYS ? in scan_object()
1567 __va((phys_addr_t)object->pointer) : in scan_object()
1568 (void *)object->pointer; in scan_object()
1569 void *end = start + object->size; in scan_object()
1580 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1582 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1583 } while (object->flags & OBJECT_ALLOCATED); in scan_object()
1585 hlist_for_each_entry(area, &object->area_list, node) in scan_object()
1586 scan_block((void *)area->start, in scan_object()
1587 (void *)(area->start + area->size), in scan_object()
1591 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1608 while (&object->gray_list != &gray_list) { in scan_gray_list()
1615 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list()
1619 list_del(&object->gray_list); in scan_gray_list()
1638 if (object->del_state & DELSTATE_REMOVED) in kmemleak_cond_resched()
1640 object->del_state |= DELSTATE_NO_DELETE; in kmemleak_cond_resched()
1648 if (object->del_state & DELSTATE_REMOVED) in kmemleak_cond_resched()
1649 list_del_rcu(&object->object_list); in kmemleak_cond_resched()
1650 object->del_state &= ~DELSTATE_NO_DELETE; in kmemleak_cond_resched()
1673 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1679 if (atomic_read(&object->use_count) > 1) { in kmemleak_scan()
1680 pr_debug("object->use_count = %d\n", in kmemleak_scan()
1681 atomic_read(&object->use_count)); in kmemleak_scan()
1687 if ((object->flags & OBJECT_PHYS) && in kmemleak_scan()
1688 !(object->flags & OBJECT_NO_SCAN)) { in kmemleak_scan()
1689 unsigned long phys = object->pointer; in kmemleak_scan()
1692 PHYS_PFN(phys + object->size) > max_low_pfn) in kmemleak_scan()
1697 object->count = 0; in kmemleak_scan()
1699 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1701 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1709 /* per-cpu sections scanning */ in kmemleak_scan()
1720 unsigned long start_pfn = zone->zone_start_pfn; in kmemleak_scan()
1769 * scan and color them gray until the next scan. in kmemleak_scan()
1783 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1784 if (color_white(object) && (object->flags & OBJECT_ALLOCATED) in kmemleak_scan()
1786 /* color it gray temporarily */ in kmemleak_scan()
1787 object->count = object->min_count; in kmemleak_scan()
1788 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1790 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1795 * Re-scan the gray list for modified unreferenced objects. in kmemleak_scan()
1820 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1822 !(object->flags & OBJECT_REPORTED)) { in kmemleak_scan()
1823 object->flags |= OBJECT_REPORTED; in kmemleak_scan()
1830 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1924 if (n-- > 0) in kmemleak_seq_start()
1982 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_seq_show()
1983 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) in kmemleak_seq_show()
1985 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_seq_show()
2008 return -EINVAL; in dump_str_object_info()
2012 return -EINVAL; in dump_str_object_info()
2015 raw_spin_lock_irqsave(&object->lock, flags); in dump_str_object_info()
2017 raw_spin_unlock_irqrestore(&object->lock, flags); in dump_str_object_info()
2035 raw_spin_lock_irq(&object->lock); in kmemleak_clear()
2036 if ((object->flags & OBJECT_REPORTED) && in kmemleak_clear()
2039 raw_spin_unlock_irq(&object->lock); in kmemleak_clear()
2049 * File write operation to configure kmemleak at run-time. The following
2051 * off - disable kmemleak (irreversible)
2052 * stack=on - enable the task stacks scanning
2053 * stack=off - disable the tasks stacks scanning
2054 * scan=on - start the automatic memory scanning thread
2055 * scan=off - stop the automatic memory scanning thread
2056 * scan=... - set the automatic memory scanning period in seconds (0 to
2058 * scan - trigger a memory scan
2059 * clear - mark all current reported unreferenced kmemleak objects as
2062 * dump=... - dump information about the object found at the given address
2071 buf_size = min(size, (sizeof(buf) - 1)); in kmemleak_write()
2073 return -EFAULT; in kmemleak_write()
2089 ret = -EPERM; in kmemleak_write()
2125 ret = -EINVAL; in kmemleak_write()
2210 * Allow boot-time kmemleak disabling (enabled by default).
2215 return -EINVAL; in kmemleak_boot_config()
2223 return -EINVAL; in kmemleak_boot_config()
2250 create_object((unsigned long)_sdata, _edata - _sdata, in kmemleak_init()
2252 create_object((unsigned long)__bss_start, __bss_stop - __bss_start, in kmemleak_init()
2257 __end_ro_after_init - __start_ro_after_init, in kmemleak_init()
2275 * two clean-up threads but serialized by scan_mutex. in kmemleak_late_init()
2278 return -ENOMEM; in kmemleak_late_init()