xref: /linux/mm/kmemleak.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * mm/kmemleak.c
4   *
5   * Copyright (C) 2008 ARM Limited
6   * Written by Catalin Marinas <catalin.marinas@arm.com>
7   *
8   * For more information on the algorithm and kmemleak usage, please see
9   * Documentation/dev-tools/kmemleak.rst.
10   *
11   * Notes on locking
12   * ----------------
13   *
14   * The following locks and mutexes are used by kmemleak:
15   *
16   * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17   *   del_state modifications and accesses to the object trees
18   *   (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
19   *   object_list is the main list holding the metadata (struct
20   *   kmemleak_object) for the allocated memory blocks. The object trees are
21   *   red black trees used to look-up metadata based on a pointer to the
22   *   corresponding memory block. The kmemleak_object structures are added to
23   *   the object_list and the object tree root in the create_object() function
24   *   called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
25   *   delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
26   * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
27   *   Accesses to the metadata (e.g. count) are protected by this lock. Note
28   *   that some members of this structure may be protected by other means
29   *   (atomic or kmemleak_lock). This lock is also held when scanning the
30   *   corresponding memory block to avoid the kernel freeing it via the
31   *   kmemleak_free() callback. This is less heavyweight than holding a global
32   *   lock like kmemleak_lock during scanning.
33   * - scan_mutex (mutex): ensures that only one thread may scan the memory for
34   *   unreferenced objects at a time. The gray_list contains the objects which
35   *   are already referenced or marked as false positives and need to be
36   *   scanned. This list is only modified during a scanning episode when the
37   *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
38   *   Note that the kmemleak_object.use_count is incremented when an object is
39   *   added to the gray_list and therefore cannot be freed. This mutex also
40   *   prevents multiple users of the "kmemleak" debugfs file together with
41   *   modifications to the memory scanning parameters including the scan_thread
42   *   pointer
43   *
44   * Locks and mutexes are acquired/nested in the following order:
45   *
46   *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47   *
48   * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
49   * regions.
50   *
51   * The kmemleak_object structures have a use_count incremented or decremented
52   * using the get_object()/put_object() functions. When the use_count becomes
53   * 0, this count can no longer be incremented and put_object() schedules the
54   * kmemleak_object freeing via an RCU callback. All calls to the get_object()
55   * function must be protected by rcu_read_lock() to avoid accessing a freed
56   * structure.
57   */
58  
59  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60  
61  #include <linux/init.h>
62  #include <linux/kernel.h>
63  #include <linux/list.h>
64  #include <linux/sched/signal.h>
65  #include <linux/sched/task.h>
66  #include <linux/sched/task_stack.h>
67  #include <linux/jiffies.h>
68  #include <linux/delay.h>
69  #include <linux/export.h>
70  #include <linux/kthread.h>
71  #include <linux/rbtree.h>
72  #include <linux/fs.h>
73  #include <linux/debugfs.h>
74  #include <linux/seq_file.h>
75  #include <linux/cpumask.h>
76  #include <linux/spinlock.h>
77  #include <linux/module.h>
78  #include <linux/mutex.h>
79  #include <linux/rcupdate.h>
80  #include <linux/stacktrace.h>
81  #include <linux/stackdepot.h>
82  #include <linux/cache.h>
83  #include <linux/percpu.h>
84  #include <linux/memblock.h>
85  #include <linux/pfn.h>
86  #include <linux/mmzone.h>
87  #include <linux/slab.h>
88  #include <linux/thread_info.h>
89  #include <linux/err.h>
90  #include <linux/uaccess.h>
91  #include <linux/string.h>
92  #include <linux/nodemask.h>
93  #include <linux/mm.h>
94  #include <linux/workqueue.h>
95  #include <linux/crc32.h>
96  
97  #include <asm/sections.h>
98  #include <asm/processor.h>
99  #include <linux/atomic.h>
100  
101  #include <linux/kasan.h>
102  #include <linux/kfence.h>
103  #include <linux/kmemleak.h>
104  #include <linux/memory_hotplug.h>
105  
106  /*
107   * Kmemleak configuration and common defines.
108   */
109  #define MAX_TRACE		16	/* stack trace length */
110  #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
111  #define SECS_FIRST_SCAN		60	/* delay before the first scan */
112  #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
113  #define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
114  
115  #define BYTES_PER_POINTER	sizeof(void *)
116  
117  /* scanning area inside a memory block */
118  struct kmemleak_scan_area {
119  	struct hlist_node node;
120  	unsigned long start;
121  	size_t size;
122  };
123  
124  #define KMEMLEAK_GREY	0
125  #define KMEMLEAK_BLACK	-1
126  
127  /*
128   * Structure holding the metadata for each allocated memory block.
129   * Modifications to such objects should be made while holding the
130   * object->lock. Insertions or deletions from object_list, gray_list or
131   * rb_node are already protected by the corresponding locks or mutex (see
132   * the notes on locking above). These objects are reference-counted
133   * (use_count) and freed using the RCU mechanism.
134   */
135  struct kmemleak_object {
136  	raw_spinlock_t lock;
137  	unsigned int flags;		/* object status flags */
138  	struct list_head object_list;
139  	struct list_head gray_list;
140  	struct rb_node rb_node;
141  	struct rcu_head rcu;		/* object_list lockless traversal */
142  	/* object usage count; object freed when use_count == 0 */
143  	atomic_t use_count;
144  	unsigned int del_state;		/* deletion state */
145  	unsigned long pointer;
146  	size_t size;
147  	/* pass surplus references to this pointer */
148  	unsigned long excess_ref;
149  	/* minimum number of a pointers found before it is considered leak */
150  	int min_count;
151  	/* the total number of pointers found pointing to this object */
152  	int count;
153  	/* checksum for detecting modified objects */
154  	u32 checksum;
155  	depot_stack_handle_t trace_handle;
156  	/* memory ranges to be scanned inside an object (empty for all) */
157  	struct hlist_head area_list;
158  	unsigned long jiffies;		/* creation timestamp */
159  	pid_t pid;			/* pid of the current task */
160  	char comm[TASK_COMM_LEN];	/* executable name */
161  };
162  
163  /* flag representing the memory block allocation status */
164  #define OBJECT_ALLOCATED	(1 << 0)
165  /* flag set after the first reporting of an unreference object */
166  #define OBJECT_REPORTED		(1 << 1)
167  /* flag set to not scan the object */
168  #define OBJECT_NO_SCAN		(1 << 2)
169  /* flag set to fully scan the object when scan_area allocation failed */
170  #define OBJECT_FULL_SCAN	(1 << 3)
171  /* flag set for object allocated with physical address */
172  #define OBJECT_PHYS		(1 << 4)
173  /* flag set for per-CPU pointers */
174  #define OBJECT_PERCPU		(1 << 5)
175  
176  /* set when __remove_object() called */
177  #define DELSTATE_REMOVED	(1 << 0)
178  /* set to temporarily prevent deletion from object_list */
179  #define DELSTATE_NO_DELETE	(1 << 1)
180  
181  #define HEX_PREFIX		"    "
182  /* number of bytes to print per line; must be 16 or 32 */
183  #define HEX_ROW_SIZE		16
184  /* number of bytes to print at a time (1, 2, 4, 8) */
185  #define HEX_GROUP_SIZE		1
186  /* include ASCII after the hex output */
187  #define HEX_ASCII		1
188  /* max number of lines to be printed */
189  #define HEX_MAX_LINES		2
190  
191  /* the list of all allocated objects */
192  static LIST_HEAD(object_list);
193  /* the list of gray-colored objects (see color_gray comment below) */
194  static LIST_HEAD(gray_list);
195  /* memory pool allocation */
196  static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
197  static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
198  static LIST_HEAD(mem_pool_free_list);
199  /* search tree for object boundaries */
200  static struct rb_root object_tree_root = RB_ROOT;
201  /* search tree for object (with OBJECT_PHYS flag) boundaries */
202  static struct rb_root object_phys_tree_root = RB_ROOT;
203  /* search tree for object (with OBJECT_PERCPU flag) boundaries */
204  static struct rb_root object_percpu_tree_root = RB_ROOT;
205  /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
206  static DEFINE_RAW_SPINLOCK(kmemleak_lock);
207  
208  /* allocation caches for kmemleak internal data */
209  static struct kmem_cache *object_cache;
210  static struct kmem_cache *scan_area_cache;
211  
212  /* set if tracing memory operations is enabled */
213  static int kmemleak_enabled = 1;
214  /* same as above but only for the kmemleak_free() callback */
215  static int kmemleak_free_enabled = 1;
216  /* set in the late_initcall if there were no errors */
217  static int kmemleak_late_initialized;
218  /* set if a kmemleak warning was issued */
219  static int kmemleak_warning;
220  /* set if a fatal kmemleak error has occurred */
221  static int kmemleak_error;
222  
223  /* minimum and maximum address that may be valid pointers */
224  static unsigned long min_addr = ULONG_MAX;
225  static unsigned long max_addr;
226  
227  /* minimum and maximum address that may be valid per-CPU pointers */
228  static unsigned long min_percpu_addr = ULONG_MAX;
229  static unsigned long max_percpu_addr;
230  
231  static struct task_struct *scan_thread;
232  /* used to avoid reporting of recently allocated objects */
233  static unsigned long jiffies_min_age;
234  static unsigned long jiffies_last_scan;
235  /* delay between automatic memory scannings */
236  static unsigned long jiffies_scan_wait;
237  /* enables or disables the task stacks scanning */
238  static int kmemleak_stack_scan = 1;
239  /* protects the memory scanning, parameters and debug/kmemleak file access */
240  static DEFINE_MUTEX(scan_mutex);
241  /* setting kmemleak=on, will set this var, skipping the disable */
242  static int kmemleak_skip_disable;
243  /* If there are leaks that can be reported */
244  static bool kmemleak_found_leaks;
245  
246  static bool kmemleak_verbose;
247  module_param_named(verbose, kmemleak_verbose, bool, 0600);
248  
249  static void kmemleak_disable(void);
250  
251  /*
252   * Print a warning and dump the stack trace.
253   */
254  #define kmemleak_warn(x...)	do {		\
255  	pr_warn(x);				\
256  	dump_stack();				\
257  	kmemleak_warning = 1;			\
258  } while (0)
259  
260  /*
261   * Macro invoked when a serious kmemleak condition occurred and cannot be
262   * recovered from. Kmemleak will be disabled and further allocation/freeing
263   * tracing no longer available.
264   */
265  #define kmemleak_stop(x...)	do {	\
266  	kmemleak_warn(x);		\
267  	kmemleak_disable();		\
268  } while (0)
269  
270  #define warn_or_seq_printf(seq, fmt, ...)	do {	\
271  	if (seq)					\
272  		seq_printf(seq, fmt, ##__VA_ARGS__);	\
273  	else						\
274  		pr_warn(fmt, ##__VA_ARGS__);		\
275  } while (0)
276  
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)277  static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
278  				 int rowsize, int groupsize, const void *buf,
279  				 size_t len, bool ascii)
280  {
281  	if (seq)
282  		seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
283  			     buf, len, ascii);
284  	else
285  		print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
286  			       rowsize, groupsize, buf, len, ascii);
287  }
288  
289  /*
290   * Printing of the objects hex dump to the seq file. The number of lines to be
291   * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
292   * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
293   * with the object->lock held.
294   */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)295  static void hex_dump_object(struct seq_file *seq,
296  			    struct kmemleak_object *object)
297  {
298  	const u8 *ptr = (const u8 *)object->pointer;
299  	size_t len;
300  
301  	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
302  		return;
303  
304  	if (object->flags & OBJECT_PERCPU)
305  		ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
306  
307  	/* limit the number of lines to HEX_MAX_LINES */
308  	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
309  
310  	if (object->flags & OBJECT_PERCPU)
311  		warn_or_seq_printf(seq, "  hex dump (first %zu bytes on cpu %d):\n",
312  				   len, raw_smp_processor_id());
313  	else
314  		warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
315  	kasan_disable_current();
316  	warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
317  			     HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
318  	kasan_enable_current();
319  }
320  
321  /*
322   * Object colors, encoded with count and min_count:
323   * - white - orphan object, not enough references to it (count < min_count)
324   * - gray  - not orphan, not marked as false positive (min_count == 0) or
325   *		sufficient references to it (count >= min_count)
326   * - black - ignore, it doesn't contain references (e.g. text section)
327   *		(min_count == -1). No function defined for this color.
328   * Newly created objects don't have any color assigned (object->count == -1)
329   * before the next memory scan when they become white.
330   */
color_white(const struct kmemleak_object * object)331  static bool color_white(const struct kmemleak_object *object)
332  {
333  	return object->count != KMEMLEAK_BLACK &&
334  		object->count < object->min_count;
335  }
336  
color_gray(const struct kmemleak_object * object)337  static bool color_gray(const struct kmemleak_object *object)
338  {
339  	return object->min_count != KMEMLEAK_BLACK &&
340  		object->count >= object->min_count;
341  }
342  
343  /*
344   * Objects are considered unreferenced only if their color is white, they have
345   * not be deleted and have a minimum age to avoid false positives caused by
346   * pointers temporarily stored in CPU registers.
347   */
unreferenced_object(struct kmemleak_object * object)348  static bool unreferenced_object(struct kmemleak_object *object)
349  {
350  	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
351  		time_before_eq(object->jiffies + jiffies_min_age,
352  			       jiffies_last_scan);
353  }
354  
355  /*
356   * Printing of the unreferenced objects information to the seq file. The
357   * print_unreferenced function must be called with the object->lock held.
358   */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)359  static void print_unreferenced(struct seq_file *seq,
360  			       struct kmemleak_object *object)
361  {
362  	int i;
363  	unsigned long *entries;
364  	unsigned int nr_entries;
365  
366  	nr_entries = stack_depot_fetch(object->trace_handle, &entries);
367  	warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
368  			  object->pointer, object->size);
369  	warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
370  			   object->comm, object->pid, object->jiffies);
371  	hex_dump_object(seq, object);
372  	warn_or_seq_printf(seq, "  backtrace (crc %x):\n", object->checksum);
373  
374  	for (i = 0; i < nr_entries; i++) {
375  		void *ptr = (void *)entries[i];
376  		warn_or_seq_printf(seq, "    %pS\n", ptr);
377  	}
378  }
379  
380  /*
381   * Print the kmemleak_object information. This function is used mainly for
382   * debugging special cases when kmemleak operations. It must be called with
383   * the object->lock held.
384   */
dump_object_info(struct kmemleak_object * object)385  static void dump_object_info(struct kmemleak_object *object)
386  {
387  	pr_notice("Object 0x%08lx (size %zu):\n",
388  			object->pointer, object->size);
389  	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
390  			object->comm, object->pid, object->jiffies);
391  	pr_notice("  min_count = %d\n", object->min_count);
392  	pr_notice("  count = %d\n", object->count);
393  	pr_notice("  flags = 0x%x\n", object->flags);
394  	pr_notice("  checksum = %u\n", object->checksum);
395  	pr_notice("  backtrace:\n");
396  	if (object->trace_handle)
397  		stack_depot_print(object->trace_handle);
398  }
399  
object_tree(unsigned long objflags)400  static struct rb_root *object_tree(unsigned long objflags)
401  {
402  	if (objflags & OBJECT_PHYS)
403  		return &object_phys_tree_root;
404  	if (objflags & OBJECT_PERCPU)
405  		return &object_percpu_tree_root;
406  	return &object_tree_root;
407  }
408  
409  /*
410   * Look-up a memory block metadata (kmemleak_object) in the object search
411   * tree based on a pointer value. If alias is 0, only values pointing to the
412   * beginning of the memory block are allowed. The kmemleak_lock must be held
413   * when calling this function.
414   */
__lookup_object(unsigned long ptr,int alias,unsigned int objflags)415  static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
416  					       unsigned int objflags)
417  {
418  	struct rb_node *rb = object_tree(objflags)->rb_node;
419  	unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
420  
421  	while (rb) {
422  		struct kmemleak_object *object;
423  		unsigned long untagged_objp;
424  
425  		object = rb_entry(rb, struct kmemleak_object, rb_node);
426  		untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
427  
428  		if (untagged_ptr < untagged_objp)
429  			rb = object->rb_node.rb_left;
430  		else if (untagged_objp + object->size <= untagged_ptr)
431  			rb = object->rb_node.rb_right;
432  		else if (untagged_objp == untagged_ptr || alias)
433  			return object;
434  		else {
435  			kmemleak_warn("Found object by alias at 0x%08lx\n",
436  				      ptr);
437  			dump_object_info(object);
438  			break;
439  		}
440  	}
441  	return NULL;
442  }
443  
444  /* Look-up a kmemleak object which allocated with virtual address. */
lookup_object(unsigned long ptr,int alias)445  static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
446  {
447  	return __lookup_object(ptr, alias, 0);
448  }
449  
450  /*
451   * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
452   * that once an object's use_count reached 0, the RCU freeing was already
453   * registered and the object should no longer be used. This function must be
454   * called under the protection of rcu_read_lock().
455   */
get_object(struct kmemleak_object * object)456  static int get_object(struct kmemleak_object *object)
457  {
458  	return atomic_inc_not_zero(&object->use_count);
459  }
460  
461  /*
462   * Memory pool allocation and freeing. kmemleak_lock must not be held.
463   */
mem_pool_alloc(gfp_t gfp)464  static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
465  {
466  	unsigned long flags;
467  	struct kmemleak_object *object;
468  
469  	/* try the slab allocator first */
470  	if (object_cache) {
471  		object = kmem_cache_alloc_noprof(object_cache,
472  						 gfp_nested_mask(gfp));
473  		if (object)
474  			return object;
475  	}
476  
477  	/* slab allocation failed, try the memory pool */
478  	raw_spin_lock_irqsave(&kmemleak_lock, flags);
479  	object = list_first_entry_or_null(&mem_pool_free_list,
480  					  typeof(*object), object_list);
481  	if (object)
482  		list_del(&object->object_list);
483  	else if (mem_pool_free_count)
484  		object = &mem_pool[--mem_pool_free_count];
485  	else
486  		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
487  	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
488  
489  	return object;
490  }
491  
492  /*
493   * Return the object to either the slab allocator or the memory pool.
494   */
mem_pool_free(struct kmemleak_object * object)495  static void mem_pool_free(struct kmemleak_object *object)
496  {
497  	unsigned long flags;
498  
499  	if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
500  		kmem_cache_free(object_cache, object);
501  		return;
502  	}
503  
504  	/* add the object to the memory pool free list */
505  	raw_spin_lock_irqsave(&kmemleak_lock, flags);
506  	list_add(&object->object_list, &mem_pool_free_list);
507  	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
508  }
509  
510  /*
511   * RCU callback to free a kmemleak_object.
512   */
free_object_rcu(struct rcu_head * rcu)513  static void free_object_rcu(struct rcu_head *rcu)
514  {
515  	struct hlist_node *tmp;
516  	struct kmemleak_scan_area *area;
517  	struct kmemleak_object *object =
518  		container_of(rcu, struct kmemleak_object, rcu);
519  
520  	/*
521  	 * Once use_count is 0 (guaranteed by put_object), there is no other
522  	 * code accessing this object, hence no need for locking.
523  	 */
524  	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
525  		hlist_del(&area->node);
526  		kmem_cache_free(scan_area_cache, area);
527  	}
528  	mem_pool_free(object);
529  }
530  
531  /*
532   * Decrement the object use_count. Once the count is 0, free the object using
533   * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
534   * delete_object() path, the delayed RCU freeing ensures that there is no
535   * recursive call to the kernel allocator. Lock-less RCU object_list traversal
536   * is also possible.
537   */
put_object(struct kmemleak_object * object)538  static void put_object(struct kmemleak_object *object)
539  {
540  	if (!atomic_dec_and_test(&object->use_count))
541  		return;
542  
543  	/* should only get here after delete_object was called */
544  	WARN_ON(object->flags & OBJECT_ALLOCATED);
545  
546  	/*
547  	 * It may be too early for the RCU callbacks, however, there is no
548  	 * concurrent object_list traversal when !object_cache and all objects
549  	 * came from the memory pool. Free the object directly.
550  	 */
551  	if (object_cache)
552  		call_rcu(&object->rcu, free_object_rcu);
553  	else
554  		free_object_rcu(&object->rcu);
555  }
556  
557  /*
558   * Look up an object in the object search tree and increase its use_count.
559   */
__find_and_get_object(unsigned long ptr,int alias,unsigned int objflags)560  static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
561  						     unsigned int objflags)
562  {
563  	unsigned long flags;
564  	struct kmemleak_object *object;
565  
566  	rcu_read_lock();
567  	raw_spin_lock_irqsave(&kmemleak_lock, flags);
568  	object = __lookup_object(ptr, alias, objflags);
569  	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
570  
571  	/* check whether the object is still available */
572  	if (object && !get_object(object))
573  		object = NULL;
574  	rcu_read_unlock();
575  
576  	return object;
577  }
578  
579  /* Look up and get an object which allocated with virtual address. */
find_and_get_object(unsigned long ptr,int alias)580  static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
581  {
582  	return __find_and_get_object(ptr, alias, 0);
583  }
584  
585  /*
586   * Remove an object from its object tree and object_list. Must be called with
587   * the kmemleak_lock held _if_ kmemleak is still enabled.
588   */
__remove_object(struct kmemleak_object * object)589  static void __remove_object(struct kmemleak_object *object)
590  {
591  	rb_erase(&object->rb_node, object_tree(object->flags));
592  	if (!(object->del_state & DELSTATE_NO_DELETE))
593  		list_del_rcu(&object->object_list);
594  	object->del_state |= DELSTATE_REMOVED;
595  }
596  
__find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)597  static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
598  							int alias,
599  							unsigned int objflags)
600  {
601  	struct kmemleak_object *object;
602  
603  	object = __lookup_object(ptr, alias, objflags);
604  	if (object)
605  		__remove_object(object);
606  
607  	return object;
608  }
609  
610  /*
611   * Look up an object in the object search tree and remove it from both object
612   * tree root and object_list. The returned object's use_count should be at
613   * least 1, as initially set by create_object().
614   */
find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)615  static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
616  						      unsigned int objflags)
617  {
618  	unsigned long flags;
619  	struct kmemleak_object *object;
620  
621  	raw_spin_lock_irqsave(&kmemleak_lock, flags);
622  	object = __find_and_remove_object(ptr, alias, objflags);
623  	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
624  
625  	return object;
626  }
627  
set_track_prepare(void)628  static noinline depot_stack_handle_t set_track_prepare(void)
629  {
630  	depot_stack_handle_t trace_handle;
631  	unsigned long entries[MAX_TRACE];
632  	unsigned int nr_entries;
633  
634  	/*
635  	 * Use object_cache to determine whether kmemleak_init() has
636  	 * been invoked. stack_depot_early_init() is called before
637  	 * kmemleak_init() in mm_core_init().
638  	 */
639  	if (!object_cache)
640  		return 0;
641  	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
642  	trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
643  
644  	return trace_handle;
645  }
646  
__alloc_object(gfp_t gfp)647  static struct kmemleak_object *__alloc_object(gfp_t gfp)
648  {
649  	struct kmemleak_object *object;
650  
651  	object = mem_pool_alloc(gfp);
652  	if (!object) {
653  		pr_warn("Cannot allocate a kmemleak_object structure\n");
654  		kmemleak_disable();
655  		return NULL;
656  	}
657  
658  	INIT_LIST_HEAD(&object->object_list);
659  	INIT_LIST_HEAD(&object->gray_list);
660  	INIT_HLIST_HEAD(&object->area_list);
661  	raw_spin_lock_init(&object->lock);
662  	atomic_set(&object->use_count, 1);
663  	object->excess_ref = 0;
664  	object->count = 0;			/* white color initially */
665  	object->checksum = 0;
666  	object->del_state = 0;
667  
668  	/* task information */
669  	if (in_hardirq()) {
670  		object->pid = 0;
671  		strscpy(object->comm, "hardirq");
672  	} else if (in_serving_softirq()) {
673  		object->pid = 0;
674  		strscpy(object->comm, "softirq");
675  	} else {
676  		object->pid = current->pid;
677  		/*
678  		 * There is a small chance of a race with set_task_comm(),
679  		 * however using get_task_comm() here may cause locking
680  		 * dependency issues with current->alloc_lock. In the worst
681  		 * case, the command line is not correct.
682  		 */
683  		strscpy(object->comm, current->comm);
684  	}
685  
686  	/* kernel backtrace */
687  	object->trace_handle = set_track_prepare();
688  
689  	return object;
690  }
691  
__link_object(struct kmemleak_object * object,unsigned long ptr,size_t size,int min_count,unsigned int objflags)692  static int __link_object(struct kmemleak_object *object, unsigned long ptr,
693  			 size_t size, int min_count, unsigned int objflags)
694  {
695  
696  	struct kmemleak_object *parent;
697  	struct rb_node **link, *rb_parent;
698  	unsigned long untagged_ptr;
699  	unsigned long untagged_objp;
700  
701  	object->flags = OBJECT_ALLOCATED | objflags;
702  	object->pointer = ptr;
703  	object->size = kfence_ksize((void *)ptr) ?: size;
704  	object->min_count = min_count;
705  	object->jiffies = jiffies;
706  
707  	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
708  	/*
709  	 * Only update min_addr and max_addr with object storing virtual
710  	 * address. And update min_percpu_addr max_percpu_addr for per-CPU
711  	 * objects.
712  	 */
713  	if (objflags & OBJECT_PERCPU) {
714  		min_percpu_addr = min(min_percpu_addr, untagged_ptr);
715  		max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
716  	} else if (!(objflags & OBJECT_PHYS)) {
717  		min_addr = min(min_addr, untagged_ptr);
718  		max_addr = max(max_addr, untagged_ptr + size);
719  	}
720  	link = &object_tree(objflags)->rb_node;
721  	rb_parent = NULL;
722  	while (*link) {
723  		rb_parent = *link;
724  		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
725  		untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
726  		if (untagged_ptr + size <= untagged_objp)
727  			link = &parent->rb_node.rb_left;
728  		else if (untagged_objp + parent->size <= untagged_ptr)
729  			link = &parent->rb_node.rb_right;
730  		else {
731  			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
732  				      ptr);
733  			/*
734  			 * No need for parent->lock here since "parent" cannot
735  			 * be freed while the kmemleak_lock is held.
736  			 */
737  			dump_object_info(parent);
738  			return -EEXIST;
739  		}
740  	}
741  	rb_link_node(&object->rb_node, rb_parent, link);
742  	rb_insert_color(&object->rb_node, object_tree(objflags));
743  	list_add_tail_rcu(&object->object_list, &object_list);
744  
745  	return 0;
746  }
747  
748  /*
749   * Create the metadata (struct kmemleak_object) corresponding to an allocated
750   * memory block and add it to the object_list and object tree.
751   */
__create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp,unsigned int objflags)752  static void __create_object(unsigned long ptr, size_t size,
753  				int min_count, gfp_t gfp, unsigned int objflags)
754  {
755  	struct kmemleak_object *object;
756  	unsigned long flags;
757  	int ret;
758  
759  	object = __alloc_object(gfp);
760  	if (!object)
761  		return;
762  
763  	raw_spin_lock_irqsave(&kmemleak_lock, flags);
764  	ret = __link_object(object, ptr, size, min_count, objflags);
765  	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
766  	if (ret)
767  		mem_pool_free(object);
768  }
769  
770  /* Create kmemleak object which allocated with virtual address. */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)771  static void create_object(unsigned long ptr, size_t size,
772  			  int min_count, gfp_t gfp)
773  {
774  	__create_object(ptr, size, min_count, gfp, 0);
775  }
776  
777  /* Create kmemleak object which allocated with physical address. */
create_object_phys(unsigned long ptr,size_t size,int min_count,gfp_t gfp)778  static void create_object_phys(unsigned long ptr, size_t size,
779  			       int min_count, gfp_t gfp)
780  {
781  	__create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
782  }
783  
784  /* Create kmemleak object corresponding to a per-CPU allocation. */
create_object_percpu(unsigned long ptr,size_t size,int min_count,gfp_t gfp)785  static void create_object_percpu(unsigned long ptr, size_t size,
786  				 int min_count, gfp_t gfp)
787  {
788  	__create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
789  }
790  
791  /*
792   * Mark the object as not allocated and schedule RCU freeing via put_object().
793   */
__delete_object(struct kmemleak_object * object)794  static void __delete_object(struct kmemleak_object *object)
795  {
796  	unsigned long flags;
797  
798  	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
799  	WARN_ON(atomic_read(&object->use_count) < 1);
800  
801  	/*
802  	 * Locking here also ensures that the corresponding memory block
803  	 * cannot be freed when it is being scanned.
804  	 */
805  	raw_spin_lock_irqsave(&object->lock, flags);
806  	object->flags &= ~OBJECT_ALLOCATED;
807  	raw_spin_unlock_irqrestore(&object->lock, flags);
808  	put_object(object);
809  }
810  
811  /*
812   * Look up the metadata (struct kmemleak_object) corresponding to ptr and
813   * delete it.
814   */
delete_object_full(unsigned long ptr,unsigned int objflags)815  static void delete_object_full(unsigned long ptr, unsigned int objflags)
816  {
817  	struct kmemleak_object *object;
818  
819  	object = find_and_remove_object(ptr, 0, objflags);
820  	if (!object) {
821  #ifdef DEBUG
822  		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
823  			      ptr);
824  #endif
825  		return;
826  	}
827  	__delete_object(object);
828  }
829  
830  /*
831   * Look up the metadata (struct kmemleak_object) corresponding to ptr and
832   * delete it. If the memory block is partially freed, the function may create
833   * additional metadata for the remaining parts of the block.
834   */
delete_object_part(unsigned long ptr,size_t size,unsigned int objflags)835  static void delete_object_part(unsigned long ptr, size_t size,
836  			       unsigned int objflags)
837  {
838  	struct kmemleak_object *object, *object_l, *object_r;
839  	unsigned long start, end, flags;
840  
841  	object_l = __alloc_object(GFP_KERNEL);
842  	if (!object_l)
843  		return;
844  
845  	object_r = __alloc_object(GFP_KERNEL);
846  	if (!object_r)
847  		goto out;
848  
849  	raw_spin_lock_irqsave(&kmemleak_lock, flags);
850  	object = __find_and_remove_object(ptr, 1, objflags);
851  	if (!object) {
852  #ifdef DEBUG
853  		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
854  			      ptr, size);
855  #endif
856  		goto unlock;
857  	}
858  
859  	/*
860  	 * Create one or two objects that may result from the memory block
861  	 * split. Note that partial freeing is only done by free_bootmem() and
862  	 * this happens before kmemleak_init() is called.
863  	 */
864  	start = object->pointer;
865  	end = object->pointer + object->size;
866  	if ((ptr > start) &&
867  	    !__link_object(object_l, start, ptr - start,
868  			   object->min_count, objflags))
869  		object_l = NULL;
870  	if ((ptr + size < end) &&
871  	    !__link_object(object_r, ptr + size, end - ptr - size,
872  			   object->min_count, objflags))
873  		object_r = NULL;
874  
875  unlock:
876  	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
877  	if (object)
878  		__delete_object(object);
879  
880  out:
881  	if (object_l)
882  		mem_pool_free(object_l);
883  	if (object_r)
884  		mem_pool_free(object_r);
885  }
886  
__paint_it(struct kmemleak_object * object,int color)887  static void __paint_it(struct kmemleak_object *object, int color)
888  {
889  	object->min_count = color;
890  	if (color == KMEMLEAK_BLACK)
891  		object->flags |= OBJECT_NO_SCAN;
892  }
893  
paint_it(struct kmemleak_object * object,int color)894  static void paint_it(struct kmemleak_object *object, int color)
895  {
896  	unsigned long flags;
897  
898  	raw_spin_lock_irqsave(&object->lock, flags);
899  	__paint_it(object, color);
900  	raw_spin_unlock_irqrestore(&object->lock, flags);
901  }
902  
paint_ptr(unsigned long ptr,int color,unsigned int objflags)903  static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
904  {
905  	struct kmemleak_object *object;
906  
907  	object = __find_and_get_object(ptr, 0, objflags);
908  	if (!object) {
909  		kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
910  			      ptr,
911  			      (color == KMEMLEAK_GREY) ? "Grey" :
912  			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
913  		return;
914  	}
915  	paint_it(object, color);
916  	put_object(object);
917  }
918  
919  /*
920   * Mark an object permanently as gray-colored so that it can no longer be
921   * reported as a leak. This is used in general to mark a false positive.
922   */
make_gray_object(unsigned long ptr)923  static void make_gray_object(unsigned long ptr)
924  {
925  	paint_ptr(ptr, KMEMLEAK_GREY, 0);
926  }
927  
928  /*
929   * Mark the object as black-colored so that it is ignored from scans and
930   * reporting.
931   */
make_black_object(unsigned long ptr,unsigned int objflags)932  static void make_black_object(unsigned long ptr, unsigned int objflags)
933  {
934  	paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
935  }
936  
937  /*
938   * Reset the checksum of an object. The immediate effect is that it will not
939   * be reported as a leak during the next scan until its checksum is updated.
940   */
reset_checksum(unsigned long ptr)941  static void reset_checksum(unsigned long ptr)
942  {
943  	unsigned long flags;
944  	struct kmemleak_object *object;
945  
946  	object = find_and_get_object(ptr, 0);
947  	if (!object) {
948  		kmemleak_warn("Not resetting the checksum of an unknown object at 0x%08lx\n",
949  			      ptr);
950  		return;
951  	}
952  
953  	raw_spin_lock_irqsave(&object->lock, flags);
954  	object->checksum = 0;
955  	raw_spin_unlock_irqrestore(&object->lock, flags);
956  	put_object(object);
957  }
958  
959  /*
960   * Add a scanning area to the object. If at least one such area is added,
961   * kmemleak will only scan these ranges rather than the whole memory block.
962   */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)963  static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
964  {
965  	unsigned long flags;
966  	struct kmemleak_object *object;
967  	struct kmemleak_scan_area *area = NULL;
968  	unsigned long untagged_ptr;
969  	unsigned long untagged_objp;
970  
971  	object = find_and_get_object(ptr, 1);
972  	if (!object) {
973  		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
974  			      ptr);
975  		return;
976  	}
977  
978  	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
979  	untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
980  
981  	if (scan_area_cache)
982  		area = kmem_cache_alloc_noprof(scan_area_cache,
983  					       gfp_nested_mask(gfp));
984  
985  	raw_spin_lock_irqsave(&object->lock, flags);
986  	if (!area) {
987  		pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
988  		/* mark the object for full scan to avoid false positives */
989  		object->flags |= OBJECT_FULL_SCAN;
990  		goto out_unlock;
991  	}
992  	if (size == SIZE_MAX) {
993  		size = untagged_objp + object->size - untagged_ptr;
994  	} else if (untagged_ptr + size > untagged_objp + object->size) {
995  		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
996  		dump_object_info(object);
997  		kmem_cache_free(scan_area_cache, area);
998  		goto out_unlock;
999  	}
1000  
1001  	INIT_HLIST_NODE(&area->node);
1002  	area->start = ptr;
1003  	area->size = size;
1004  
1005  	hlist_add_head(&area->node, &object->area_list);
1006  out_unlock:
1007  	raw_spin_unlock_irqrestore(&object->lock, flags);
1008  	put_object(object);
1009  }
1010  
1011  /*
1012   * Any surplus references (object already gray) to 'ptr' are passed to
1013   * 'excess_ref'. This is used in the vmalloc() case where a pointer to
1014   * vm_struct may be used as an alternative reference to the vmalloc'ed object
1015   * (see free_thread_stack()).
1016   */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)1017  static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
1018  {
1019  	unsigned long flags;
1020  	struct kmemleak_object *object;
1021  
1022  	object = find_and_get_object(ptr, 0);
1023  	if (!object) {
1024  		kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
1025  			      ptr);
1026  		return;
1027  	}
1028  
1029  	raw_spin_lock_irqsave(&object->lock, flags);
1030  	object->excess_ref = excess_ref;
1031  	raw_spin_unlock_irqrestore(&object->lock, flags);
1032  	put_object(object);
1033  }
1034  
1035  /*
1036   * Set the OBJECT_NO_SCAN flag for the object corresponding to the given
1037   * pointer. Such object will not be scanned by kmemleak but references to it
1038   * are searched.
1039   */
object_no_scan(unsigned long ptr)1040  static void object_no_scan(unsigned long ptr)
1041  {
1042  	unsigned long flags;
1043  	struct kmemleak_object *object;
1044  
1045  	object = find_and_get_object(ptr, 0);
1046  	if (!object) {
1047  		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1048  		return;
1049  	}
1050  
1051  	raw_spin_lock_irqsave(&object->lock, flags);
1052  	object->flags |= OBJECT_NO_SCAN;
1053  	raw_spin_unlock_irqrestore(&object->lock, flags);
1054  	put_object(object);
1055  }
1056  
1057  /**
1058   * kmemleak_alloc - register a newly allocated object
1059   * @ptr:	pointer to beginning of the object
1060   * @size:	size of the object
1061   * @min_count:	minimum number of references to this object. If during memory
1062   *		scanning a number of references less than @min_count is found,
1063   *		the object is reported as a memory leak. If @min_count is 0,
1064   *		the object is never reported as a leak. If @min_count is -1,
1065   *		the object is ignored (not scanned and not reported as a leak)
1066   * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1067   *
1068   * This function is called from the kernel allocators when a new object
1069   * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1070   */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)1071  void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1072  			  gfp_t gfp)
1073  {
1074  	pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1075  
1076  	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1077  		create_object((unsigned long)ptr, size, min_count, gfp);
1078  }
1079  EXPORT_SYMBOL_GPL(kmemleak_alloc);
1080  
1081  /**
1082   * kmemleak_alloc_percpu - register a newly allocated __percpu object
1083   * @ptr:	__percpu pointer to beginning of the object
1084   * @size:	size of the object
1085   * @gfp:	flags used for kmemleak internal memory allocations
1086   *
1087   * This function is called from the kernel percpu allocator when a new object
1088   * (memory block) is allocated (alloc_percpu).
1089   */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)1090  void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1091  				 gfp_t gfp)
1092  {
1093  	pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1094  
1095  	if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1096  		create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
1097  }
1098  EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1099  
1100  /**
1101   * kmemleak_vmalloc - register a newly vmalloc'ed object
1102   * @area:	pointer to vm_struct
1103   * @size:	size of the object
1104   * @gfp:	__vmalloc() flags used for kmemleak internal memory allocations
1105   *
1106   * This function is called from the vmalloc() kernel allocator when a new
1107   * object (memory block) is allocated.
1108   */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)1109  void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1110  {
1111  	pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1112  
1113  	/*
1114  	 * A min_count = 2 is needed because vm_struct contains a reference to
1115  	 * the virtual address of the vmalloc'ed block.
1116  	 */
1117  	if (kmemleak_enabled) {
1118  		create_object((unsigned long)area->addr, size, 2, gfp);
1119  		object_set_excess_ref((unsigned long)area,
1120  				      (unsigned long)area->addr);
1121  	}
1122  }
1123  EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1124  
1125  /**
1126   * kmemleak_free - unregister a previously registered object
1127   * @ptr:	pointer to beginning of the object
1128   *
1129   * This function is called from the kernel allocators when an object (memory
1130   * block) is freed (kmem_cache_free, kfree, vfree etc.).
1131   */
kmemleak_free(const void * ptr)1132  void __ref kmemleak_free(const void *ptr)
1133  {
1134  	pr_debug("%s(0x%px)\n", __func__, ptr);
1135  
1136  	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1137  		delete_object_full((unsigned long)ptr, 0);
1138  }
1139  EXPORT_SYMBOL_GPL(kmemleak_free);
1140  
1141  /**
1142   * kmemleak_free_part - partially unregister a previously registered object
1143   * @ptr:	pointer to the beginning or inside the object. This also
1144   *		represents the start of the range to be freed
1145   * @size:	size to be unregistered
1146   *
1147   * This function is called when only a part of a memory block is freed
1148   * (usually from the bootmem allocator).
1149   */
kmemleak_free_part(const void * ptr,size_t size)1150  void __ref kmemleak_free_part(const void *ptr, size_t size)
1151  {
1152  	pr_debug("%s(0x%px)\n", __func__, ptr);
1153  
1154  	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1155  		delete_object_part((unsigned long)ptr, size, 0);
1156  }
1157  EXPORT_SYMBOL_GPL(kmemleak_free_part);
1158  
1159  /**
1160   * kmemleak_free_percpu - unregister a previously registered __percpu object
1161   * @ptr:	__percpu pointer to beginning of the object
1162   *
1163   * This function is called from the kernel percpu allocator when an object
1164   * (memory block) is freed (free_percpu).
1165   */
kmemleak_free_percpu(const void __percpu * ptr)1166  void __ref kmemleak_free_percpu(const void __percpu *ptr)
1167  {
1168  	pr_debug("%s(0x%px)\n", __func__, ptr);
1169  
1170  	if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
1171  		delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
1172  }
1173  EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1174  
1175  /**
1176   * kmemleak_update_trace - update object allocation stack trace
1177   * @ptr:	pointer to beginning of the object
1178   *
1179   * Override the object allocation stack trace for cases where the actual
1180   * allocation place is not always useful.
1181   */
kmemleak_update_trace(const void * ptr)1182  void __ref kmemleak_update_trace(const void *ptr)
1183  {
1184  	struct kmemleak_object *object;
1185  	depot_stack_handle_t trace_handle;
1186  	unsigned long flags;
1187  
1188  	pr_debug("%s(0x%px)\n", __func__, ptr);
1189  
1190  	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1191  		return;
1192  
1193  	object = find_and_get_object((unsigned long)ptr, 1);
1194  	if (!object) {
1195  #ifdef DEBUG
1196  		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1197  			      ptr);
1198  #endif
1199  		return;
1200  	}
1201  
1202  	trace_handle = set_track_prepare();
1203  	raw_spin_lock_irqsave(&object->lock, flags);
1204  	object->trace_handle = trace_handle;
1205  	raw_spin_unlock_irqrestore(&object->lock, flags);
1206  
1207  	put_object(object);
1208  }
1209  EXPORT_SYMBOL(kmemleak_update_trace);
1210  
1211  /**
1212   * kmemleak_not_leak - mark an allocated object as false positive
1213   * @ptr:	pointer to beginning of the object
1214   *
1215   * Calling this function on an object will cause the memory block to no longer
1216   * be reported as leak and always be scanned.
1217   */
kmemleak_not_leak(const void * ptr)1218  void __ref kmemleak_not_leak(const void *ptr)
1219  {
1220  	pr_debug("%s(0x%px)\n", __func__, ptr);
1221  
1222  	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1223  		make_gray_object((unsigned long)ptr);
1224  }
1225  EXPORT_SYMBOL(kmemleak_not_leak);
1226  
1227  /**
1228   * kmemleak_transient_leak - mark an allocated object as transient false positive
1229   * @ptr:	pointer to beginning of the object
1230   *
1231   * Calling this function on an object will cause the memory block to not be
1232   * reported as a leak temporarily. This may happen, for example, if the object
1233   * is part of a singly linked list and the ->next reference to it is changed.
1234   */
kmemleak_transient_leak(const void * ptr)1235  void __ref kmemleak_transient_leak(const void *ptr)
1236  {
1237  	pr_debug("%s(0x%px)\n", __func__, ptr);
1238  
1239  	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1240  		reset_checksum((unsigned long)ptr);
1241  }
1242  EXPORT_SYMBOL(kmemleak_transient_leak);
1243  
1244  /**
1245   * kmemleak_ignore - ignore an allocated object
1246   * @ptr:	pointer to beginning of the object
1247   *
1248   * Calling this function on an object will cause the memory block to be
1249   * ignored (not scanned and not reported as a leak). This is usually done when
1250   * it is known that the corresponding block is not a leak and does not contain
1251   * any references to other allocated memory blocks.
1252   */
kmemleak_ignore(const void * ptr)1253  void __ref kmemleak_ignore(const void *ptr)
1254  {
1255  	pr_debug("%s(0x%px)\n", __func__, ptr);
1256  
1257  	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1258  		make_black_object((unsigned long)ptr, 0);
1259  }
1260  EXPORT_SYMBOL(kmemleak_ignore);
1261  
1262  /**
1263   * kmemleak_scan_area - limit the range to be scanned in an allocated object
1264   * @ptr:	pointer to beginning or inside the object. This also
1265   *		represents the start of the scan area
1266   * @size:	size of the scan area
1267   * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1268   *
1269   * This function is used when it is known that only certain parts of an object
1270   * contain references to other objects. Kmemleak will only scan these areas
1271   * reducing the number false negatives.
1272   */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1273  void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1274  {
1275  	pr_debug("%s(0x%px)\n", __func__, ptr);
1276  
1277  	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1278  		add_scan_area((unsigned long)ptr, size, gfp);
1279  }
1280  EXPORT_SYMBOL(kmemleak_scan_area);
1281  
1282  /**
1283   * kmemleak_no_scan - do not scan an allocated object
1284   * @ptr:	pointer to beginning of the object
1285   *
1286   * This function notifies kmemleak not to scan the given memory block. Useful
1287   * in situations where it is known that the given object does not contain any
1288   * references to other objects. Kmemleak will not scan such objects reducing
1289   * the number of false negatives.
1290   */
kmemleak_no_scan(const void * ptr)1291  void __ref kmemleak_no_scan(const void *ptr)
1292  {
1293  	pr_debug("%s(0x%px)\n", __func__, ptr);
1294  
1295  	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1296  		object_no_scan((unsigned long)ptr);
1297  }
1298  EXPORT_SYMBOL(kmemleak_no_scan);
1299  
1300  /**
1301   * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1302   *			 address argument
1303   * @phys:	physical address of the object
1304   * @size:	size of the object
1305   * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1306   */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,gfp_t gfp)1307  void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1308  {
1309  	pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1310  
1311  	if (kmemleak_enabled)
1312  		/*
1313  		 * Create object with OBJECT_PHYS flag and
1314  		 * assume min_count 0.
1315  		 */
1316  		create_object_phys((unsigned long)phys, size, 0, gfp);
1317  }
1318  EXPORT_SYMBOL(kmemleak_alloc_phys);
1319  
1320  /**
1321   * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1322   *			     physical address argument
1323   * @phys:	physical address if the beginning or inside an object. This
1324   *		also represents the start of the range to be freed
1325   * @size:	size to be unregistered
1326   */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1327  void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1328  {
1329  	pr_debug("%s(0x%px)\n", __func__, &phys);
1330  
1331  	if (kmemleak_enabled)
1332  		delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1333  }
1334  EXPORT_SYMBOL(kmemleak_free_part_phys);
1335  
1336  /**
1337   * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1338   *			  address argument
1339   * @phys:	physical address of the object
1340   */
kmemleak_ignore_phys(phys_addr_t phys)1341  void __ref kmemleak_ignore_phys(phys_addr_t phys)
1342  {
1343  	pr_debug("%s(0x%px)\n", __func__, &phys);
1344  
1345  	if (kmemleak_enabled)
1346  		make_black_object((unsigned long)phys, OBJECT_PHYS);
1347  }
1348  EXPORT_SYMBOL(kmemleak_ignore_phys);
1349  
1350  /*
1351   * Update an object's checksum and return true if it was modified.
1352   */
update_checksum(struct kmemleak_object * object)1353  static bool update_checksum(struct kmemleak_object *object)
1354  {
1355  	u32 old_csum = object->checksum;
1356  
1357  	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1358  		return false;
1359  
1360  	kasan_disable_current();
1361  	kcsan_disable_current();
1362  	if (object->flags & OBJECT_PERCPU) {
1363  		unsigned int cpu;
1364  
1365  		object->checksum = 0;
1366  		for_each_possible_cpu(cpu) {
1367  			void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1368  
1369  			object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
1370  		}
1371  	} else {
1372  		object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1373  	}
1374  	kasan_enable_current();
1375  	kcsan_enable_current();
1376  
1377  	return object->checksum != old_csum;
1378  }
1379  
1380  /*
1381   * Update an object's references. object->lock must be held by the caller.
1382   */
update_refs(struct kmemleak_object * object)1383  static void update_refs(struct kmemleak_object *object)
1384  {
1385  	if (!color_white(object)) {
1386  		/* non-orphan, ignored or new */
1387  		return;
1388  	}
1389  
1390  	/*
1391  	 * Increase the object's reference count (number of pointers to the
1392  	 * memory block). If this count reaches the required minimum, the
1393  	 * object's color will become gray and it will be added to the
1394  	 * gray_list.
1395  	 */
1396  	object->count++;
1397  	if (color_gray(object)) {
1398  		/* put_object() called when removing from gray_list */
1399  		WARN_ON(!get_object(object));
1400  		list_add_tail(&object->gray_list, &gray_list);
1401  	}
1402  }
1403  
pointer_update_refs(struct kmemleak_object * scanned,unsigned long pointer,unsigned int objflags)1404  static void pointer_update_refs(struct kmemleak_object *scanned,
1405  			 unsigned long pointer, unsigned int objflags)
1406  {
1407  	struct kmemleak_object *object;
1408  	unsigned long untagged_ptr;
1409  	unsigned long excess_ref;
1410  
1411  	untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1412  	if (objflags & OBJECT_PERCPU) {
1413  		if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
1414  			return;
1415  	} else {
1416  		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1417  			return;
1418  	}
1419  
1420  	/*
1421  	 * No need for get_object() here since we hold kmemleak_lock.
1422  	 * object->use_count cannot be dropped to 0 while the object
1423  	 * is still present in object_tree_root and object_list
1424  	 * (with updates protected by kmemleak_lock).
1425  	 */
1426  	object = __lookup_object(pointer, 1, objflags);
1427  	if (!object)
1428  		return;
1429  	if (object == scanned)
1430  		/* self referenced, ignore */
1431  		return;
1432  
1433  	/*
1434  	 * Avoid the lockdep recursive warning on object->lock being
1435  	 * previously acquired in scan_object(). These locks are
1436  	 * enclosed by scan_mutex.
1437  	 */
1438  	raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1439  	/* only pass surplus references (object already gray) */
1440  	if (color_gray(object)) {
1441  		excess_ref = object->excess_ref;
1442  		/* no need for update_refs() if object already gray */
1443  	} else {
1444  		excess_ref = 0;
1445  		update_refs(object);
1446  	}
1447  	raw_spin_unlock(&object->lock);
1448  
1449  	if (excess_ref) {
1450  		object = lookup_object(excess_ref, 0);
1451  		if (!object)
1452  			return;
1453  		if (object == scanned)
1454  			/* circular reference, ignore */
1455  			return;
1456  		raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1457  		update_refs(object);
1458  		raw_spin_unlock(&object->lock);
1459  	}
1460  }
1461  
1462  /*
1463   * Memory scanning is a long process and it needs to be interruptible. This
1464   * function checks whether such interrupt condition occurred.
1465   */
scan_should_stop(void)1466  static int scan_should_stop(void)
1467  {
1468  	if (!kmemleak_enabled)
1469  		return 1;
1470  
1471  	/*
1472  	 * This function may be called from either process or kthread context,
1473  	 * hence the need to check for both stop conditions.
1474  	 */
1475  	if (current->mm)
1476  		return signal_pending(current);
1477  	else
1478  		return kthread_should_stop();
1479  
1480  	return 0;
1481  }
1482  
1483  /*
1484   * Scan a memory block (exclusive range) for valid pointers and add those
1485   * found to the gray list.
1486   */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1487  static void scan_block(void *_start, void *_end,
1488  		       struct kmemleak_object *scanned)
1489  {
1490  	unsigned long *ptr;
1491  	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1492  	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1493  	unsigned long flags;
1494  
1495  	raw_spin_lock_irqsave(&kmemleak_lock, flags);
1496  	for (ptr = start; ptr < end; ptr++) {
1497  		unsigned long pointer;
1498  
1499  		if (scan_should_stop())
1500  			break;
1501  
1502  		kasan_disable_current();
1503  		pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1504  		kasan_enable_current();
1505  
1506  		pointer_update_refs(scanned, pointer, 0);
1507  		pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
1508  	}
1509  	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1510  }
1511  
1512  /*
1513   * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1514   */
1515  #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1516  static void scan_large_block(void *start, void *end)
1517  {
1518  	void *next;
1519  
1520  	while (start < end) {
1521  		next = min(start + MAX_SCAN_SIZE, end);
1522  		scan_block(start, next, NULL);
1523  		start = next;
1524  		cond_resched();
1525  	}
1526  }
1527  #endif
1528  
1529  /*
1530   * Scan a memory block corresponding to a kmemleak_object. A condition is
1531   * that object->use_count >= 1.
1532   */
scan_object(struct kmemleak_object * object)1533  static void scan_object(struct kmemleak_object *object)
1534  {
1535  	struct kmemleak_scan_area *area;
1536  	unsigned long flags;
1537  
1538  	/*
1539  	 * Once the object->lock is acquired, the corresponding memory block
1540  	 * cannot be freed (the same lock is acquired in delete_object).
1541  	 */
1542  	raw_spin_lock_irqsave(&object->lock, flags);
1543  	if (object->flags & OBJECT_NO_SCAN)
1544  		goto out;
1545  	if (!(object->flags & OBJECT_ALLOCATED))
1546  		/* already freed object */
1547  		goto out;
1548  
1549  	if (object->flags & OBJECT_PERCPU) {
1550  		unsigned int cpu;
1551  
1552  		for_each_possible_cpu(cpu) {
1553  			void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1554  			void *end = start + object->size;
1555  
1556  			scan_block(start, end, object);
1557  
1558  			raw_spin_unlock_irqrestore(&object->lock, flags);
1559  			cond_resched();
1560  			raw_spin_lock_irqsave(&object->lock, flags);
1561  			if (!(object->flags & OBJECT_ALLOCATED))
1562  				break;
1563  		}
1564  	} else if (hlist_empty(&object->area_list) ||
1565  	    object->flags & OBJECT_FULL_SCAN) {
1566  		void *start = object->flags & OBJECT_PHYS ?
1567  				__va((phys_addr_t)object->pointer) :
1568  				(void *)object->pointer;
1569  		void *end = start + object->size;
1570  		void *next;
1571  
1572  		do {
1573  			next = min(start + MAX_SCAN_SIZE, end);
1574  			scan_block(start, next, object);
1575  
1576  			start = next;
1577  			if (start >= end)
1578  				break;
1579  
1580  			raw_spin_unlock_irqrestore(&object->lock, flags);
1581  			cond_resched();
1582  			raw_spin_lock_irqsave(&object->lock, flags);
1583  		} while (object->flags & OBJECT_ALLOCATED);
1584  	} else {
1585  		hlist_for_each_entry(area, &object->area_list, node)
1586  			scan_block((void *)area->start,
1587  				   (void *)(area->start + area->size),
1588  				   object);
1589  	}
1590  out:
1591  	raw_spin_unlock_irqrestore(&object->lock, flags);
1592  }
1593  
1594  /*
1595   * Scan the objects already referenced (gray objects). More objects will be
1596   * referenced and, if there are no memory leaks, all the objects are scanned.
1597   */
scan_gray_list(void)1598  static void scan_gray_list(void)
1599  {
1600  	struct kmemleak_object *object, *tmp;
1601  
1602  	/*
1603  	 * The list traversal is safe for both tail additions and removals
1604  	 * from inside the loop. The kmemleak objects cannot be freed from
1605  	 * outside the loop because their use_count was incremented.
1606  	 */
1607  	object = list_entry(gray_list.next, typeof(*object), gray_list);
1608  	while (&object->gray_list != &gray_list) {
1609  		cond_resched();
1610  
1611  		/* may add new objects to the list */
1612  		if (!scan_should_stop())
1613  			scan_object(object);
1614  
1615  		tmp = list_entry(object->gray_list.next, typeof(*object),
1616  				 gray_list);
1617  
1618  		/* remove the object from the list and release it */
1619  		list_del(&object->gray_list);
1620  		put_object(object);
1621  
1622  		object = tmp;
1623  	}
1624  	WARN_ON(!list_empty(&gray_list));
1625  }
1626  
1627  /*
1628   * Conditionally call resched() in an object iteration loop while making sure
1629   * that the given object won't go away without RCU read lock by performing a
1630   * get_object() if necessaary.
1631   */
kmemleak_cond_resched(struct kmemleak_object * object)1632  static void kmemleak_cond_resched(struct kmemleak_object *object)
1633  {
1634  	if (!get_object(object))
1635  		return;	/* Try next object */
1636  
1637  	raw_spin_lock_irq(&kmemleak_lock);
1638  	if (object->del_state & DELSTATE_REMOVED)
1639  		goto unlock_put;	/* Object removed */
1640  	object->del_state |= DELSTATE_NO_DELETE;
1641  	raw_spin_unlock_irq(&kmemleak_lock);
1642  
1643  	rcu_read_unlock();
1644  	cond_resched();
1645  	rcu_read_lock();
1646  
1647  	raw_spin_lock_irq(&kmemleak_lock);
1648  	if (object->del_state & DELSTATE_REMOVED)
1649  		list_del_rcu(&object->object_list);
1650  	object->del_state &= ~DELSTATE_NO_DELETE;
1651  unlock_put:
1652  	raw_spin_unlock_irq(&kmemleak_lock);
1653  	put_object(object);
1654  }
1655  
1656  /*
1657   * Scan data sections and all the referenced memory blocks allocated via the
1658   * kernel's standard allocators. This function must be called with the
1659   * scan_mutex held.
1660   */
kmemleak_scan(void)1661  static void kmemleak_scan(void)
1662  {
1663  	struct kmemleak_object *object;
1664  	struct zone *zone;
1665  	int __maybe_unused i;
1666  	int new_leaks = 0;
1667  
1668  	jiffies_last_scan = jiffies;
1669  
1670  	/* prepare the kmemleak_object's */
1671  	rcu_read_lock();
1672  	list_for_each_entry_rcu(object, &object_list, object_list) {
1673  		raw_spin_lock_irq(&object->lock);
1674  #ifdef DEBUG
1675  		/*
1676  		 * With a few exceptions there should be a maximum of
1677  		 * 1 reference to any object at this point.
1678  		 */
1679  		if (atomic_read(&object->use_count) > 1) {
1680  			pr_debug("object->use_count = %d\n",
1681  				 atomic_read(&object->use_count));
1682  			dump_object_info(object);
1683  		}
1684  #endif
1685  
1686  		/* ignore objects outside lowmem (paint them black) */
1687  		if ((object->flags & OBJECT_PHYS) &&
1688  		   !(object->flags & OBJECT_NO_SCAN)) {
1689  			unsigned long phys = object->pointer;
1690  
1691  			if (PHYS_PFN(phys) < min_low_pfn ||
1692  			    PHYS_PFN(phys + object->size) > max_low_pfn)
1693  				__paint_it(object, KMEMLEAK_BLACK);
1694  		}
1695  
1696  		/* reset the reference count (whiten the object) */
1697  		object->count = 0;
1698  		if (color_gray(object) && get_object(object))
1699  			list_add_tail(&object->gray_list, &gray_list);
1700  
1701  		raw_spin_unlock_irq(&object->lock);
1702  
1703  		if (need_resched())
1704  			kmemleak_cond_resched(object);
1705  	}
1706  	rcu_read_unlock();
1707  
1708  #ifdef CONFIG_SMP
1709  	/* per-cpu sections scanning */
1710  	for_each_possible_cpu(i)
1711  		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1712  				 __per_cpu_end + per_cpu_offset(i));
1713  #endif
1714  
1715  	/*
1716  	 * Struct page scanning for each node.
1717  	 */
1718  	get_online_mems();
1719  	for_each_populated_zone(zone) {
1720  		unsigned long start_pfn = zone->zone_start_pfn;
1721  		unsigned long end_pfn = zone_end_pfn(zone);
1722  		unsigned long pfn;
1723  
1724  		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1725  			struct page *page = pfn_to_online_page(pfn);
1726  
1727  			if (!(pfn & 63))
1728  				cond_resched();
1729  
1730  			if (!page)
1731  				continue;
1732  
1733  			/* only scan pages belonging to this zone */
1734  			if (page_zone(page) != zone)
1735  				continue;
1736  			/* only scan if page is in use */
1737  			if (page_count(page) == 0)
1738  				continue;
1739  			scan_block(page, page + 1, NULL);
1740  		}
1741  	}
1742  	put_online_mems();
1743  
1744  	/*
1745  	 * Scanning the task stacks (may introduce false negatives).
1746  	 */
1747  	if (kmemleak_stack_scan) {
1748  		struct task_struct *p, *g;
1749  
1750  		rcu_read_lock();
1751  		for_each_process_thread(g, p) {
1752  			void *stack = try_get_task_stack(p);
1753  			if (stack) {
1754  				scan_block(stack, stack + THREAD_SIZE, NULL);
1755  				put_task_stack(p);
1756  			}
1757  		}
1758  		rcu_read_unlock();
1759  	}
1760  
1761  	/*
1762  	 * Scan the objects already referenced from the sections scanned
1763  	 * above.
1764  	 */
1765  	scan_gray_list();
1766  
1767  	/*
1768  	 * Check for new or unreferenced objects modified since the previous
1769  	 * scan and color them gray until the next scan.
1770  	 */
1771  	rcu_read_lock();
1772  	list_for_each_entry_rcu(object, &object_list, object_list) {
1773  		if (need_resched())
1774  			kmemleak_cond_resched(object);
1775  
1776  		/*
1777  		 * This is racy but we can save the overhead of lock/unlock
1778  		 * calls. The missed objects, if any, should be caught in
1779  		 * the next scan.
1780  		 */
1781  		if (!color_white(object))
1782  			continue;
1783  		raw_spin_lock_irq(&object->lock);
1784  		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1785  		    && update_checksum(object) && get_object(object)) {
1786  			/* color it gray temporarily */
1787  			object->count = object->min_count;
1788  			list_add_tail(&object->gray_list, &gray_list);
1789  		}
1790  		raw_spin_unlock_irq(&object->lock);
1791  	}
1792  	rcu_read_unlock();
1793  
1794  	/*
1795  	 * Re-scan the gray list for modified unreferenced objects.
1796  	 */
1797  	scan_gray_list();
1798  
1799  	/*
1800  	 * If scanning was stopped do not report any new unreferenced objects.
1801  	 */
1802  	if (scan_should_stop())
1803  		return;
1804  
1805  	/*
1806  	 * Scanning result reporting.
1807  	 */
1808  	rcu_read_lock();
1809  	list_for_each_entry_rcu(object, &object_list, object_list) {
1810  		if (need_resched())
1811  			kmemleak_cond_resched(object);
1812  
1813  		/*
1814  		 * This is racy but we can save the overhead of lock/unlock
1815  		 * calls. The missed objects, if any, should be caught in
1816  		 * the next scan.
1817  		 */
1818  		if (!color_white(object))
1819  			continue;
1820  		raw_spin_lock_irq(&object->lock);
1821  		if (unreferenced_object(object) &&
1822  		    !(object->flags & OBJECT_REPORTED)) {
1823  			object->flags |= OBJECT_REPORTED;
1824  
1825  			if (kmemleak_verbose)
1826  				print_unreferenced(NULL, object);
1827  
1828  			new_leaks++;
1829  		}
1830  		raw_spin_unlock_irq(&object->lock);
1831  	}
1832  	rcu_read_unlock();
1833  
1834  	if (new_leaks) {
1835  		kmemleak_found_leaks = true;
1836  
1837  		pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1838  			new_leaks);
1839  	}
1840  
1841  }
1842  
1843  /*
1844   * Thread function performing automatic memory scanning. Unreferenced objects
1845   * at the end of a memory scan are reported but only the first time.
1846   */
kmemleak_scan_thread(void * arg)1847  static int kmemleak_scan_thread(void *arg)
1848  {
1849  	static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1850  
1851  	pr_info("Automatic memory scanning thread started\n");
1852  	set_user_nice(current, 10);
1853  
1854  	/*
1855  	 * Wait before the first scan to allow the system to fully initialize.
1856  	 */
1857  	if (first_run) {
1858  		signed long timeout = secs_to_jiffies(SECS_FIRST_SCAN);
1859  		first_run = 0;
1860  		while (timeout && !kthread_should_stop())
1861  			timeout = schedule_timeout_interruptible(timeout);
1862  	}
1863  
1864  	while (!kthread_should_stop()) {
1865  		signed long timeout = READ_ONCE(jiffies_scan_wait);
1866  
1867  		mutex_lock(&scan_mutex);
1868  		kmemleak_scan();
1869  		mutex_unlock(&scan_mutex);
1870  
1871  		/* wait before the next scan */
1872  		while (timeout && !kthread_should_stop())
1873  			timeout = schedule_timeout_interruptible(timeout);
1874  	}
1875  
1876  	pr_info("Automatic memory scanning thread ended\n");
1877  
1878  	return 0;
1879  }
1880  
1881  /*
1882   * Start the automatic memory scanning thread. This function must be called
1883   * with the scan_mutex held.
1884   */
start_scan_thread(void)1885  static void start_scan_thread(void)
1886  {
1887  	if (scan_thread)
1888  		return;
1889  	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1890  	if (IS_ERR(scan_thread)) {
1891  		pr_warn("Failed to create the scan thread\n");
1892  		scan_thread = NULL;
1893  	}
1894  }
1895  
1896  /*
1897   * Stop the automatic memory scanning thread.
1898   */
stop_scan_thread(void)1899  static void stop_scan_thread(void)
1900  {
1901  	if (scan_thread) {
1902  		kthread_stop(scan_thread);
1903  		scan_thread = NULL;
1904  	}
1905  }
1906  
1907  /*
1908   * Iterate over the object_list and return the first valid object at or after
1909   * the required position with its use_count incremented. The function triggers
1910   * a memory scanning when the pos argument points to the first position.
1911   */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1912  static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1913  {
1914  	struct kmemleak_object *object;
1915  	loff_t n = *pos;
1916  	int err;
1917  
1918  	err = mutex_lock_interruptible(&scan_mutex);
1919  	if (err < 0)
1920  		return ERR_PTR(err);
1921  
1922  	rcu_read_lock();
1923  	list_for_each_entry_rcu(object, &object_list, object_list) {
1924  		if (n-- > 0)
1925  			continue;
1926  		if (get_object(object))
1927  			goto out;
1928  	}
1929  	object = NULL;
1930  out:
1931  	return object;
1932  }
1933  
1934  /*
1935   * Return the next object in the object_list. The function decrements the
1936   * use_count of the previous object and increases that of the next one.
1937   */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1938  static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1939  {
1940  	struct kmemleak_object *prev_obj = v;
1941  	struct kmemleak_object *next_obj = NULL;
1942  	struct kmemleak_object *obj = prev_obj;
1943  
1944  	++(*pos);
1945  
1946  	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1947  		if (get_object(obj)) {
1948  			next_obj = obj;
1949  			break;
1950  		}
1951  	}
1952  
1953  	put_object(prev_obj);
1954  	return next_obj;
1955  }
1956  
1957  /*
1958   * Decrement the use_count of the last object required, if any.
1959   */
kmemleak_seq_stop(struct seq_file * seq,void * v)1960  static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1961  {
1962  	if (!IS_ERR(v)) {
1963  		/*
1964  		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1965  		 * waiting was interrupted, so only release it if !IS_ERR.
1966  		 */
1967  		rcu_read_unlock();
1968  		mutex_unlock(&scan_mutex);
1969  		if (v)
1970  			put_object(v);
1971  	}
1972  }
1973  
1974  /*
1975   * Print the information for an unreferenced object to the seq file.
1976   */
kmemleak_seq_show(struct seq_file * seq,void * v)1977  static int kmemleak_seq_show(struct seq_file *seq, void *v)
1978  {
1979  	struct kmemleak_object *object = v;
1980  	unsigned long flags;
1981  
1982  	raw_spin_lock_irqsave(&object->lock, flags);
1983  	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1984  		print_unreferenced(seq, object);
1985  	raw_spin_unlock_irqrestore(&object->lock, flags);
1986  	return 0;
1987  }
1988  
1989  static const struct seq_operations kmemleak_seq_ops = {
1990  	.start = kmemleak_seq_start,
1991  	.next  = kmemleak_seq_next,
1992  	.stop  = kmemleak_seq_stop,
1993  	.show  = kmemleak_seq_show,
1994  };
1995  
kmemleak_open(struct inode * inode,struct file * file)1996  static int kmemleak_open(struct inode *inode, struct file *file)
1997  {
1998  	return seq_open(file, &kmemleak_seq_ops);
1999  }
2000  
dump_str_object_info(const char * str)2001  static int dump_str_object_info(const char *str)
2002  {
2003  	unsigned long flags;
2004  	struct kmemleak_object *object;
2005  	unsigned long addr;
2006  
2007  	if (kstrtoul(str, 0, &addr))
2008  		return -EINVAL;
2009  	object = find_and_get_object(addr, 0);
2010  	if (!object) {
2011  		pr_info("Unknown object at 0x%08lx\n", addr);
2012  		return -EINVAL;
2013  	}
2014  
2015  	raw_spin_lock_irqsave(&object->lock, flags);
2016  	dump_object_info(object);
2017  	raw_spin_unlock_irqrestore(&object->lock, flags);
2018  
2019  	put_object(object);
2020  	return 0;
2021  }
2022  
2023  /*
2024   * We use grey instead of black to ensure we can do future scans on the same
2025   * objects. If we did not do future scans these black objects could
2026   * potentially contain references to newly allocated objects in the future and
2027   * we'd end up with false positives.
2028   */
kmemleak_clear(void)2029  static void kmemleak_clear(void)
2030  {
2031  	struct kmemleak_object *object;
2032  
2033  	rcu_read_lock();
2034  	list_for_each_entry_rcu(object, &object_list, object_list) {
2035  		raw_spin_lock_irq(&object->lock);
2036  		if ((object->flags & OBJECT_REPORTED) &&
2037  		    unreferenced_object(object))
2038  			__paint_it(object, KMEMLEAK_GREY);
2039  		raw_spin_unlock_irq(&object->lock);
2040  	}
2041  	rcu_read_unlock();
2042  
2043  	kmemleak_found_leaks = false;
2044  }
2045  
2046  static void __kmemleak_do_cleanup(void);
2047  
2048  /*
2049   * File write operation to configure kmemleak at run-time. The following
2050   * commands can be written to the /sys/kernel/debug/kmemleak file:
2051   *   off	- disable kmemleak (irreversible)
2052   *   stack=on	- enable the task stacks scanning
2053   *   stack=off	- disable the tasks stacks scanning
2054   *   scan=on	- start the automatic memory scanning thread
2055   *   scan=off	- stop the automatic memory scanning thread
2056   *   scan=...	- set the automatic memory scanning period in seconds (0 to
2057   *		  disable it)
2058   *   scan	- trigger a memory scan
2059   *   clear	- mark all current reported unreferenced kmemleak objects as
2060   *		  grey to ignore printing them, or free all kmemleak objects
2061   *		  if kmemleak has been disabled.
2062   *   dump=...	- dump information about the object found at the given address
2063   */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)2064  static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
2065  			      size_t size, loff_t *ppos)
2066  {
2067  	char buf[64];
2068  	int buf_size;
2069  	int ret;
2070  
2071  	buf_size = min(size, (sizeof(buf) - 1));
2072  	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2073  		return -EFAULT;
2074  	buf[buf_size] = 0;
2075  
2076  	ret = mutex_lock_interruptible(&scan_mutex);
2077  	if (ret < 0)
2078  		return ret;
2079  
2080  	if (strncmp(buf, "clear", 5) == 0) {
2081  		if (kmemleak_enabled)
2082  			kmemleak_clear();
2083  		else
2084  			__kmemleak_do_cleanup();
2085  		goto out;
2086  	}
2087  
2088  	if (!kmemleak_enabled) {
2089  		ret = -EPERM;
2090  		goto out;
2091  	}
2092  
2093  	if (strncmp(buf, "off", 3) == 0)
2094  		kmemleak_disable();
2095  	else if (strncmp(buf, "stack=on", 8) == 0)
2096  		kmemleak_stack_scan = 1;
2097  	else if (strncmp(buf, "stack=off", 9) == 0)
2098  		kmemleak_stack_scan = 0;
2099  	else if (strncmp(buf, "scan=on", 7) == 0)
2100  		start_scan_thread();
2101  	else if (strncmp(buf, "scan=off", 8) == 0)
2102  		stop_scan_thread();
2103  	else if (strncmp(buf, "scan=", 5) == 0) {
2104  		unsigned secs;
2105  		unsigned long msecs;
2106  
2107  		ret = kstrtouint(buf + 5, 0, &secs);
2108  		if (ret < 0)
2109  			goto out;
2110  
2111  		msecs = secs * MSEC_PER_SEC;
2112  		if (msecs > UINT_MAX)
2113  			msecs = UINT_MAX;
2114  
2115  		stop_scan_thread();
2116  		if (msecs) {
2117  			WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2118  			start_scan_thread();
2119  		}
2120  	} else if (strncmp(buf, "scan", 4) == 0)
2121  		kmemleak_scan();
2122  	else if (strncmp(buf, "dump=", 5) == 0)
2123  		ret = dump_str_object_info(buf + 5);
2124  	else
2125  		ret = -EINVAL;
2126  
2127  out:
2128  	mutex_unlock(&scan_mutex);
2129  	if (ret < 0)
2130  		return ret;
2131  
2132  	/* ignore the rest of the buffer, only one command at a time */
2133  	*ppos += size;
2134  	return size;
2135  }
2136  
2137  static const struct file_operations kmemleak_fops = {
2138  	.owner		= THIS_MODULE,
2139  	.open		= kmemleak_open,
2140  	.read		= seq_read,
2141  	.write		= kmemleak_write,
2142  	.llseek		= seq_lseek,
2143  	.release	= seq_release,
2144  };
2145  
__kmemleak_do_cleanup(void)2146  static void __kmemleak_do_cleanup(void)
2147  {
2148  	struct kmemleak_object *object, *tmp;
2149  
2150  	/*
2151  	 * Kmemleak has already been disabled, no need for RCU list traversal
2152  	 * or kmemleak_lock held.
2153  	 */
2154  	list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2155  		__remove_object(object);
2156  		__delete_object(object);
2157  	}
2158  }
2159  
2160  /*
2161   * Stop the memory scanning thread and free the kmemleak internal objects if
2162   * no previous scan thread (otherwise, kmemleak may still have some useful
2163   * information on memory leaks).
2164   */
kmemleak_do_cleanup(struct work_struct * work)2165  static void kmemleak_do_cleanup(struct work_struct *work)
2166  {
2167  	stop_scan_thread();
2168  
2169  	mutex_lock(&scan_mutex);
2170  	/*
2171  	 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2172  	 * longer track object freeing. Ordering of the scan thread stopping and
2173  	 * the memory accesses below is guaranteed by the kthread_stop()
2174  	 * function.
2175  	 */
2176  	kmemleak_free_enabled = 0;
2177  	mutex_unlock(&scan_mutex);
2178  
2179  	if (!kmemleak_found_leaks)
2180  		__kmemleak_do_cleanup();
2181  	else
2182  		pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2183  }
2184  
2185  static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2186  
2187  /*
2188   * Disable kmemleak. No memory allocation/freeing will be traced once this
2189   * function is called. Disabling kmemleak is an irreversible operation.
2190   */
kmemleak_disable(void)2191  static void kmemleak_disable(void)
2192  {
2193  	/* atomically check whether it was already invoked */
2194  	if (cmpxchg(&kmemleak_error, 0, 1))
2195  		return;
2196  
2197  	/* stop any memory operation tracing */
2198  	kmemleak_enabled = 0;
2199  
2200  	/* check whether it is too early for a kernel thread */
2201  	if (kmemleak_late_initialized)
2202  		schedule_work(&cleanup_work);
2203  	else
2204  		kmemleak_free_enabled = 0;
2205  
2206  	pr_info("Kernel memory leak detector disabled\n");
2207  }
2208  
2209  /*
2210   * Allow boot-time kmemleak disabling (enabled by default).
2211   */
kmemleak_boot_config(char * str)2212  static int __init kmemleak_boot_config(char *str)
2213  {
2214  	if (!str)
2215  		return -EINVAL;
2216  	if (strcmp(str, "off") == 0)
2217  		kmemleak_disable();
2218  	else if (strcmp(str, "on") == 0) {
2219  		kmemleak_skip_disable = 1;
2220  		stack_depot_request_early_init();
2221  	}
2222  	else
2223  		return -EINVAL;
2224  	return 0;
2225  }
2226  early_param("kmemleak", kmemleak_boot_config);
2227  
2228  /*
2229   * Kmemleak initialization.
2230   */
kmemleak_init(void)2231  void __init kmemleak_init(void)
2232  {
2233  #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2234  	if (!kmemleak_skip_disable) {
2235  		kmemleak_disable();
2236  		return;
2237  	}
2238  #endif
2239  
2240  	if (kmemleak_error)
2241  		return;
2242  
2243  	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2244  	jiffies_scan_wait = secs_to_jiffies(SECS_SCAN_WAIT);
2245  
2246  	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2247  	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2248  
2249  	/* register the data/bss sections */
2250  	create_object((unsigned long)_sdata, _edata - _sdata,
2251  		      KMEMLEAK_GREY, GFP_ATOMIC);
2252  	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2253  		      KMEMLEAK_GREY, GFP_ATOMIC);
2254  	/* only register .data..ro_after_init if not within .data */
2255  	if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2256  		create_object((unsigned long)__start_ro_after_init,
2257  			      __end_ro_after_init - __start_ro_after_init,
2258  			      KMEMLEAK_GREY, GFP_ATOMIC);
2259  }
2260  
2261  /*
2262   * Late initialization function.
2263   */
kmemleak_late_init(void)2264  static int __init kmemleak_late_init(void)
2265  {
2266  	kmemleak_late_initialized = 1;
2267  
2268  	debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2269  
2270  	if (kmemleak_error) {
2271  		/*
2272  		 * Some error occurred and kmemleak was disabled. There is a
2273  		 * small chance that kmemleak_disable() was called immediately
2274  		 * after setting kmemleak_late_initialized and we may end up with
2275  		 * two clean-up threads but serialized by scan_mutex.
2276  		 */
2277  		schedule_work(&cleanup_work);
2278  		return -ENOMEM;
2279  	}
2280  
2281  	if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2282  		mutex_lock(&scan_mutex);
2283  		start_scan_thread();
2284  		mutex_unlock(&scan_mutex);
2285  	}
2286  
2287  	pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2288  		mem_pool_free_count);
2289  
2290  	return 0;
2291  }
2292  late_initcall(kmemleak_late_init);
2293