xref: /linux/mm/kmemleak.c (revision 8026aed072e1221f0a61e5acc48c64546341bd4d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/kmemleak.c
4  *
5  * Copyright (C) 2008 ARM Limited
6  * Written by Catalin Marinas <catalin.marinas@arm.com>
7  *
8  * For more information on the algorithm and kmemleak usage, please see
9  * Documentation/dev-tools/kmemleak.rst.
10  *
11  * Notes on locking
12  * ----------------
13  *
14  * The following locks and mutexes are used by kmemleak:
15  *
16  * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17  *   del_state modifications and accesses to the object trees
18  *   (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
19  *   object_list is the main list holding the metadata (struct
20  *   kmemleak_object) for the allocated memory blocks. The object trees are
21  *   red black trees used to look-up metadata based on a pointer to the
22  *   corresponding memory block. The kmemleak_object structures are added to
23  *   the object_list and the object tree root in the create_object() function
24  *   called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
25  *   delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
26  * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
27  *   Accesses to the metadata (e.g. count) are protected by this lock. Note
28  *   that some members of this structure may be protected by other means
29  *   (atomic or kmemleak_lock). This lock is also held when scanning the
30  *   corresponding memory block to avoid the kernel freeing it via the
31  *   kmemleak_free() callback. This is less heavyweight than holding a global
32  *   lock like kmemleak_lock during scanning.
33  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
34  *   unreferenced objects at a time. The gray_list contains the objects which
35  *   are already referenced or marked as false positives and need to be
36  *   scanned. This list is only modified during a scanning episode when the
37  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
38  *   Note that the kmemleak_object.use_count is incremented when an object is
39  *   added to the gray_list and therefore cannot be freed. This mutex also
40  *   prevents multiple users of the "kmemleak" debugfs file together with
41  *   modifications to the memory scanning parameters including the scan_thread
42  *   pointer
43  *
44  * Locks and mutexes are acquired/nested in the following order:
45  *
46  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47  *
48  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
49  * regions.
50  *
51  * The kmemleak_object structures have a use_count incremented or decremented
52  * using the get_object()/put_object() functions. When the use_count becomes
53  * 0, this count can no longer be incremented and put_object() schedules the
54  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
55  * function must be protected by rcu_read_lock() to avoid accessing a freed
56  * structure.
57  */
58 
59 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60 
61 #include <linux/init.h>
62 #include <linux/kernel.h>
63 #include <linux/list.h>
64 #include <linux/sched/signal.h>
65 #include <linux/sched/task.h>
66 #include <linux/sched/task_stack.h>
67 #include <linux/jiffies.h>
68 #include <linux/delay.h>
69 #include <linux/export.h>
70 #include <linux/kthread.h>
71 #include <linux/rbtree.h>
72 #include <linux/fs.h>
73 #include <linux/debugfs.h>
74 #include <linux/seq_file.h>
75 #include <linux/cpumask.h>
76 #include <linux/spinlock.h>
77 #include <linux/module.h>
78 #include <linux/mutex.h>
79 #include <linux/rcupdate.h>
80 #include <linux/stacktrace.h>
81 #include <linux/stackdepot.h>
82 #include <linux/cache.h>
83 #include <linux/percpu.h>
84 #include <linux/memblock.h>
85 #include <linux/pfn.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96 
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
100 
101 #include <linux/kasan.h>
102 #include <linux/kfence.h>
103 #include <linux/kmemleak.h>
104 #include <linux/memory_hotplug.h>
105 
106 /*
107  * Kmemleak configuration and common defines.
108  */
109 #define MAX_TRACE		16	/* stack trace length */
110 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
111 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
112 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
113 #define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
114 
115 #define BYTES_PER_POINTER	sizeof(void *)
116 
117 /* scanning area inside a memory block */
118 struct kmemleak_scan_area {
119 	struct hlist_node node;
120 	unsigned long start;
121 	size_t size;
122 };
123 
124 #define KMEMLEAK_GREY	0
125 #define KMEMLEAK_BLACK	-1
126 
127 /*
128  * Structure holding the metadata for each allocated memory block.
129  * Modifications to such objects should be made while holding the
130  * object->lock. Insertions or deletions from object_list, gray_list or
131  * rb_node are already protected by the corresponding locks or mutex (see
132  * the notes on locking above). These objects are reference-counted
133  * (use_count) and freed using the RCU mechanism.
134  */
135 struct kmemleak_object {
136 	raw_spinlock_t lock;
137 	unsigned int flags;		/* object status flags */
138 	struct list_head object_list;
139 	struct list_head gray_list;
140 	struct rb_node rb_node;
141 	struct rcu_head rcu;		/* object_list lockless traversal */
142 	/* object usage count; object freed when use_count == 0 */
143 	atomic_t use_count;
144 	unsigned int del_state;		/* deletion state */
145 	unsigned long pointer;
146 	size_t size;
147 	/* pass surplus references to this pointer */
148 	unsigned long excess_ref;
149 	/* minimum number of a pointers found before it is considered leak */
150 	int min_count;
151 	/* the total number of pointers found pointing to this object */
152 	int count;
153 	/* checksum for detecting modified objects */
154 	u32 checksum;
155 	depot_stack_handle_t trace_handle;
156 	/* memory ranges to be scanned inside an object (empty for all) */
157 	struct hlist_head area_list;
158 	unsigned long jiffies;		/* creation timestamp */
159 	pid_t pid;			/* pid of the current task */
160 	char comm[TASK_COMM_LEN];	/* executable name */
161 };
162 
163 /* flag representing the memory block allocation status */
164 #define OBJECT_ALLOCATED	(1 << 0)
165 /* flag set after the first reporting of an unreference object */
166 #define OBJECT_REPORTED		(1 << 1)
167 /* flag set to not scan the object */
168 #define OBJECT_NO_SCAN		(1 << 2)
169 /* flag set to fully scan the object when scan_area allocation failed */
170 #define OBJECT_FULL_SCAN	(1 << 3)
171 /* flag set for object allocated with physical address */
172 #define OBJECT_PHYS		(1 << 4)
173 /* flag set for per-CPU pointers */
174 #define OBJECT_PERCPU		(1 << 5)
175 
176 /* set when __remove_object() called */
177 #define DELSTATE_REMOVED	(1 << 0)
178 /* set to temporarily prevent deletion from object_list */
179 #define DELSTATE_NO_DELETE	(1 << 1)
180 
181 #define HEX_PREFIX		"    "
182 /* number of bytes to print per line; must be 16 or 32 */
183 #define HEX_ROW_SIZE		16
184 /* number of bytes to print at a time (1, 2, 4, 8) */
185 #define HEX_GROUP_SIZE		1
186 /* include ASCII after the hex output */
187 #define HEX_ASCII		1
188 /* max number of lines to be printed */
189 #define HEX_MAX_LINES		2
190 
191 /* the list of all allocated objects */
192 static LIST_HEAD(object_list);
193 /* the list of gray-colored objects (see color_gray comment below) */
194 static LIST_HEAD(gray_list);
195 /* memory pool allocation */
196 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
197 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
198 static LIST_HEAD(mem_pool_free_list);
199 /* search tree for object boundaries */
200 static struct rb_root object_tree_root = RB_ROOT;
201 /* search tree for object (with OBJECT_PHYS flag) boundaries */
202 static struct rb_root object_phys_tree_root = RB_ROOT;
203 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
204 static struct rb_root object_percpu_tree_root = RB_ROOT;
205 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
206 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
207 
208 /* allocation caches for kmemleak internal data */
209 static struct kmem_cache *object_cache;
210 static struct kmem_cache *scan_area_cache;
211 
212 /* set if tracing memory operations is enabled */
213 static int kmemleak_enabled __read_mostly = 1;
214 /* same as above but only for the kmemleak_free() callback */
215 static int kmemleak_free_enabled __read_mostly = 1;
216 /* set in the late_initcall if there were no errors */
217 static int kmemleak_late_initialized;
218 /* set if a fatal kmemleak error has occurred */
219 static int kmemleak_error;
220 
221 /* minimum and maximum address that may be valid pointers */
222 static unsigned long min_addr = ULONG_MAX;
223 static unsigned long max_addr;
224 
225 /* minimum and maximum address that may be valid per-CPU pointers */
226 static unsigned long min_percpu_addr = ULONG_MAX;
227 static unsigned long max_percpu_addr;
228 
229 static struct task_struct *scan_thread;
230 /* used to avoid reporting of recently allocated objects */
231 static unsigned long jiffies_min_age;
232 static unsigned long jiffies_last_scan;
233 /* delay between automatic memory scannings */
234 static unsigned long jiffies_scan_wait;
235 /* enables or disables the task stacks scanning */
236 static int kmemleak_stack_scan = 1;
237 /* protects the memory scanning, parameters and debug/kmemleak file access */
238 static DEFINE_MUTEX(scan_mutex);
239 /* setting kmemleak=on, will set this var, skipping the disable */
240 static int kmemleak_skip_disable;
241 /* If there are leaks that can be reported */
242 static bool kmemleak_found_leaks;
243 
244 static bool kmemleak_verbose;
245 module_param_named(verbose, kmemleak_verbose, bool, 0600);
246 
247 static void kmemleak_disable(void);
248 
249 /*
250  * Print a warning and dump the stack trace.
251  */
252 #define kmemleak_warn(x...)	do {		\
253 	pr_warn(x);				\
254 	dump_stack();				\
255 } while (0)
256 
257 /*
258  * Macro invoked when a serious kmemleak condition occurred and cannot be
259  * recovered from. Kmemleak will be disabled and further allocation/freeing
260  * tracing no longer available.
261  */
262 #define kmemleak_stop(x...)	do {	\
263 	kmemleak_warn(x);		\
264 	kmemleak_disable();		\
265 } while (0)
266 
267 #define warn_or_seq_printf(seq, fmt, ...)	do {	\
268 	if (seq)					\
269 		seq_printf(seq, fmt, ##__VA_ARGS__);	\
270 	else						\
271 		pr_warn(fmt, ##__VA_ARGS__);		\
272 } while (0)
273 
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)274 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
275 				 int rowsize, int groupsize, const void *buf,
276 				 size_t len, bool ascii)
277 {
278 	if (seq)
279 		seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
280 			     buf, len, ascii);
281 	else
282 		print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
283 			       rowsize, groupsize, buf, len, ascii);
284 }
285 
286 /*
287  * Printing of the objects hex dump to the seq file. The number of lines to be
288  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
289  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
290  * with the object->lock held.
291  */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)292 static void hex_dump_object(struct seq_file *seq,
293 			    struct kmemleak_object *object)
294 {
295 	const u8 *ptr = (const u8 *)object->pointer;
296 	size_t len;
297 
298 	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
299 		return;
300 
301 	if (object->flags & OBJECT_PERCPU)
302 		ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
303 
304 	/* limit the number of lines to HEX_MAX_LINES */
305 	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
306 
307 	if (object->flags & OBJECT_PERCPU)
308 		warn_or_seq_printf(seq, "  hex dump (first %zu bytes on cpu %d):\n",
309 				   len, raw_smp_processor_id());
310 	else
311 		warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
312 	kasan_disable_current();
313 	warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
314 			     HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
315 	kasan_enable_current();
316 }
317 
318 /*
319  * Object colors, encoded with count and min_count:
320  * - white - orphan object, not enough references to it (count < min_count)
321  * - gray  - not orphan, not marked as false positive (min_count == 0) or
322  *		sufficient references to it (count >= min_count)
323  * - black - ignore, it doesn't contain references (e.g. text section)
324  *		(min_count == -1). No function defined for this color.
325  */
color_white(const struct kmemleak_object * object)326 static bool color_white(const struct kmemleak_object *object)
327 {
328 	return object->count != KMEMLEAK_BLACK &&
329 		object->count < object->min_count;
330 }
331 
color_gray(const struct kmemleak_object * object)332 static bool color_gray(const struct kmemleak_object *object)
333 {
334 	return object->min_count != KMEMLEAK_BLACK &&
335 		object->count >= object->min_count;
336 }
337 
338 /*
339  * Objects are considered unreferenced only if their color is white, they have
340  * not be deleted and have a minimum age to avoid false positives caused by
341  * pointers temporarily stored in CPU registers.
342  */
unreferenced_object(struct kmemleak_object * object)343 static bool unreferenced_object(struct kmemleak_object *object)
344 {
345 	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
346 		time_before_eq(object->jiffies + jiffies_min_age,
347 			       jiffies_last_scan);
348 }
349 
__object_type_str(struct kmemleak_object * object)350 static const char *__object_type_str(struct kmemleak_object *object)
351 {
352 	if (object->flags & OBJECT_PHYS)
353 		return " (phys)";
354 	if (object->flags & OBJECT_PERCPU)
355 		return " (percpu)";
356 	return "";
357 }
358 
359 /*
360  * Printing of the unreferenced objects information to the seq file. The
361  * print_unreferenced function must be called with the object->lock held.
362  */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)363 static void print_unreferenced(struct seq_file *seq,
364 			       struct kmemleak_object *object)
365 {
366 	int i;
367 	unsigned long *entries;
368 	unsigned int nr_entries;
369 
370 	nr_entries = stack_depot_fetch(object->trace_handle, &entries);
371 	warn_or_seq_printf(seq, "unreferenced object%s 0x%08lx (size %zu):\n",
372 			   __object_type_str(object),
373 			   object->pointer, object->size);
374 	warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
375 			   object->comm, object->pid, object->jiffies);
376 	hex_dump_object(seq, object);
377 	warn_or_seq_printf(seq, "  backtrace (crc %x):\n", object->checksum);
378 
379 	for (i = 0; i < nr_entries; i++) {
380 		void *ptr = (void *)entries[i];
381 		warn_or_seq_printf(seq, "    %pS\n", ptr);
382 	}
383 }
384 
385 /*
386  * Print the kmemleak_object information. This function is used mainly for
387  * debugging special cases when kmemleak operations. It must be called with
388  * the object->lock held.
389  */
dump_object_info(struct kmemleak_object * object)390 static void dump_object_info(struct kmemleak_object *object)
391 {
392 	pr_notice("Object%s 0x%08lx (size %zu):\n",
393 		  __object_type_str(object), object->pointer, object->size);
394 	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
395 		  object->comm, object->pid, object->jiffies);
396 	pr_notice("  min_count = %d\n", object->min_count);
397 	pr_notice("  count = %d\n", object->count);
398 	pr_notice("  flags = 0x%x\n", object->flags);
399 	pr_notice("  checksum = %u\n", object->checksum);
400 	pr_notice("  backtrace:\n");
401 	if (object->trace_handle)
402 		stack_depot_print(object->trace_handle);
403 }
404 
object_tree(unsigned long objflags)405 static struct rb_root *object_tree(unsigned long objflags)
406 {
407 	if (objflags & OBJECT_PHYS)
408 		return &object_phys_tree_root;
409 	if (objflags & OBJECT_PERCPU)
410 		return &object_percpu_tree_root;
411 	return &object_tree_root;
412 }
413 
414 /*
415  * Look-up a memory block metadata (kmemleak_object) in the object search
416  * tree based on a pointer value. If alias is 0, only values pointing to the
417  * beginning of the memory block are allowed. The kmemleak_lock must be held
418  * when calling this function.
419  */
__lookup_object(unsigned long ptr,int alias,unsigned int objflags)420 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
421 					       unsigned int objflags)
422 {
423 	struct rb_node *rb = object_tree(objflags)->rb_node;
424 	unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
425 
426 	while (rb) {
427 		struct kmemleak_object *object;
428 		unsigned long untagged_objp;
429 
430 		object = rb_entry(rb, struct kmemleak_object, rb_node);
431 		untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
432 
433 		if (untagged_ptr < untagged_objp)
434 			rb = object->rb_node.rb_left;
435 		else if (untagged_objp + object->size <= untagged_ptr)
436 			rb = object->rb_node.rb_right;
437 		else if (untagged_objp == untagged_ptr || alias)
438 			return object;
439 		else {
440 			/*
441 			 * Printk deferring due to the kmemleak_lock held.
442 			 * This is done to avoid deadlock.
443 			 */
444 			printk_deferred_enter();
445 			kmemleak_warn("Found object by alias at 0x%08lx\n",
446 				      ptr);
447 			dump_object_info(object);
448 			printk_deferred_exit();
449 			break;
450 		}
451 	}
452 	return NULL;
453 }
454 
455 /* Look-up a kmemleak object which allocated with virtual address. */
lookup_object(unsigned long ptr,int alias)456 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
457 {
458 	return __lookup_object(ptr, alias, 0);
459 }
460 
461 /*
462  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
463  * that once an object's use_count reached 0, the RCU freeing was already
464  * registered and the object should no longer be used. This function must be
465  * called under the protection of rcu_read_lock().
466  */
get_object(struct kmemleak_object * object)467 static int get_object(struct kmemleak_object *object)
468 {
469 	return atomic_inc_not_zero(&object->use_count);
470 }
471 
472 /*
473  * Memory pool allocation and freeing. kmemleak_lock must not be held.
474  */
mem_pool_alloc(gfp_t gfp)475 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
476 {
477 	unsigned long flags;
478 	struct kmemleak_object *object;
479 	bool warn = false;
480 
481 	/* try the slab allocator first */
482 	if (object_cache) {
483 		object = kmem_cache_alloc_noprof(object_cache,
484 						 gfp_nested_mask(gfp));
485 		if (object)
486 			return object;
487 	}
488 
489 	/* slab allocation failed, try the memory pool */
490 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
491 	object = list_first_entry_or_null(&mem_pool_free_list,
492 					  typeof(*object), object_list);
493 	if (object)
494 		list_del(&object->object_list);
495 	else if (mem_pool_free_count)
496 		object = &mem_pool[--mem_pool_free_count];
497 	else
498 		warn = true;
499 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
500 	if (warn)
501 		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
502 
503 	return object;
504 }
505 
506 /*
507  * Return the object to either the slab allocator or the memory pool.
508  */
mem_pool_free(struct kmemleak_object * object)509 static void mem_pool_free(struct kmemleak_object *object)
510 {
511 	unsigned long flags;
512 
513 	if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
514 		kmem_cache_free(object_cache, object);
515 		return;
516 	}
517 
518 	/* add the object to the memory pool free list */
519 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
520 	list_add(&object->object_list, &mem_pool_free_list);
521 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
522 }
523 
524 /*
525  * RCU callback to free a kmemleak_object.
526  */
free_object_rcu(struct rcu_head * rcu)527 static void free_object_rcu(struct rcu_head *rcu)
528 {
529 	struct hlist_node *tmp;
530 	struct kmemleak_scan_area *area;
531 	struct kmemleak_object *object =
532 		container_of(rcu, struct kmemleak_object, rcu);
533 
534 	/*
535 	 * Once use_count is 0 (guaranteed by put_object), there is no other
536 	 * code accessing this object, hence no need for locking.
537 	 */
538 	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
539 		hlist_del(&area->node);
540 		kmem_cache_free(scan_area_cache, area);
541 	}
542 	mem_pool_free(object);
543 }
544 
545 /*
546  * Decrement the object use_count. Once the count is 0, free the object using
547  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
548  * delete_object() path, the delayed RCU freeing ensures that there is no
549  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
550  * is also possible.
551  */
put_object(struct kmemleak_object * object)552 static void put_object(struct kmemleak_object *object)
553 {
554 	if (!atomic_dec_and_test(&object->use_count))
555 		return;
556 
557 	/* should only get here after delete_object was called */
558 	WARN_ON(object->flags & OBJECT_ALLOCATED);
559 
560 	/*
561 	 * It may be too early for the RCU callbacks, however, there is no
562 	 * concurrent object_list traversal when !object_cache and all objects
563 	 * came from the memory pool. Free the object directly.
564 	 */
565 	if (object_cache)
566 		call_rcu(&object->rcu, free_object_rcu);
567 	else
568 		free_object_rcu(&object->rcu);
569 }
570 
571 /*
572  * Look up an object in the object search tree and increase its use_count.
573  */
__find_and_get_object(unsigned long ptr,int alias,unsigned int objflags)574 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
575 						     unsigned int objflags)
576 {
577 	unsigned long flags;
578 	struct kmemleak_object *object;
579 
580 	rcu_read_lock();
581 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
582 	object = __lookup_object(ptr, alias, objflags);
583 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
584 
585 	/* check whether the object is still available */
586 	if (object && !get_object(object))
587 		object = NULL;
588 	rcu_read_unlock();
589 
590 	return object;
591 }
592 
593 /* Look up and get an object which allocated with virtual address. */
find_and_get_object(unsigned long ptr,int alias)594 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
595 {
596 	return __find_and_get_object(ptr, alias, 0);
597 }
598 
599 /*
600  * Remove an object from its object tree and object_list. Must be called with
601  * the kmemleak_lock held _if_ kmemleak is still enabled.
602  */
__remove_object(struct kmemleak_object * object)603 static void __remove_object(struct kmemleak_object *object)
604 {
605 	rb_erase(&object->rb_node, object_tree(object->flags));
606 	if (!(object->del_state & DELSTATE_NO_DELETE))
607 		list_del_rcu(&object->object_list);
608 	object->del_state |= DELSTATE_REMOVED;
609 }
610 
__find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)611 static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
612 							int alias,
613 							unsigned int objflags)
614 {
615 	struct kmemleak_object *object;
616 
617 	object = __lookup_object(ptr, alias, objflags);
618 	if (object)
619 		__remove_object(object);
620 
621 	return object;
622 }
623 
624 /*
625  * Look up an object in the object search tree and remove it from both object
626  * tree root and object_list. The returned object's use_count should be at
627  * least 1, as initially set by create_object().
628  */
find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)629 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
630 						      unsigned int objflags)
631 {
632 	unsigned long flags;
633 	struct kmemleak_object *object;
634 
635 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
636 	object = __find_and_remove_object(ptr, alias, objflags);
637 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
638 
639 	return object;
640 }
641 
set_track_prepare(void)642 static noinline depot_stack_handle_t set_track_prepare(void)
643 {
644 	depot_stack_handle_t trace_handle;
645 	unsigned long entries[MAX_TRACE];
646 	unsigned int nr_entries;
647 
648 	/*
649 	 * Use object_cache to determine whether kmemleak_init() has
650 	 * been invoked. stack_depot_early_init() is called before
651 	 * kmemleak_init() in mm_core_init().
652 	 */
653 	if (!object_cache)
654 		return 0;
655 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
656 	trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
657 
658 	return trace_handle;
659 }
660 
__alloc_object(gfp_t gfp)661 static struct kmemleak_object *__alloc_object(gfp_t gfp)
662 {
663 	struct kmemleak_object *object;
664 
665 	object = mem_pool_alloc(gfp);
666 	if (!object) {
667 		pr_warn("Cannot allocate a kmemleak_object structure\n");
668 		kmemleak_disable();
669 		return NULL;
670 	}
671 
672 	INIT_LIST_HEAD(&object->object_list);
673 	INIT_LIST_HEAD(&object->gray_list);
674 	INIT_HLIST_HEAD(&object->area_list);
675 	raw_spin_lock_init(&object->lock);
676 	atomic_set(&object->use_count, 1);
677 	object->excess_ref = 0;
678 	object->count = 0;			/* white color initially */
679 	object->checksum = 0;
680 	object->del_state = 0;
681 
682 	/* task information */
683 	if (in_hardirq()) {
684 		object->pid = 0;
685 		strscpy(object->comm, "hardirq");
686 	} else if (in_serving_softirq()) {
687 		object->pid = 0;
688 		strscpy(object->comm, "softirq");
689 	} else {
690 		object->pid = current->pid;
691 		/*
692 		 * There is a small chance of a race with set_task_comm(),
693 		 * however using get_task_comm() here may cause locking
694 		 * dependency issues with current->alloc_lock. In the worst
695 		 * case, the command line is not correct.
696 		 */
697 		strscpy(object->comm, current->comm);
698 	}
699 
700 	/* kernel backtrace */
701 	object->trace_handle = set_track_prepare();
702 
703 	return object;
704 }
705 
__link_object(struct kmemleak_object * object,unsigned long ptr,size_t size,int min_count,unsigned int objflags)706 static int __link_object(struct kmemleak_object *object, unsigned long ptr,
707 			 size_t size, int min_count, unsigned int objflags)
708 {
709 
710 	struct kmemleak_object *parent;
711 	struct rb_node **link, *rb_parent;
712 	unsigned long untagged_ptr;
713 	unsigned long untagged_objp;
714 
715 	object->flags = OBJECT_ALLOCATED | objflags;
716 	object->pointer = ptr;
717 	object->size = kfence_ksize((void *)ptr) ?: size;
718 	object->min_count = min_count;
719 	object->jiffies = jiffies;
720 
721 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
722 	/*
723 	 * Only update min_addr and max_addr with object storing virtual
724 	 * address. And update min_percpu_addr max_percpu_addr for per-CPU
725 	 * objects.
726 	 */
727 	if (objflags & OBJECT_PERCPU) {
728 		min_percpu_addr = min(min_percpu_addr, untagged_ptr);
729 		max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
730 	} else if (!(objflags & OBJECT_PHYS)) {
731 		min_addr = min(min_addr, untagged_ptr);
732 		max_addr = max(max_addr, untagged_ptr + size);
733 	}
734 	link = &object_tree(objflags)->rb_node;
735 	rb_parent = NULL;
736 	while (*link) {
737 		rb_parent = *link;
738 		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
739 		untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
740 		if (untagged_ptr + size <= untagged_objp)
741 			link = &parent->rb_node.rb_left;
742 		else if (untagged_objp + parent->size <= untagged_ptr)
743 			link = &parent->rb_node.rb_right;
744 		else {
745 			/*
746 			 * Printk deferring due to the kmemleak_lock held.
747 			 * This is done to avoid deadlock.
748 			 */
749 			printk_deferred_enter();
750 			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
751 				      ptr);
752 			/*
753 			 * No need for parent->lock here since "parent" cannot
754 			 * be freed while the kmemleak_lock is held.
755 			 */
756 			dump_object_info(parent);
757 			printk_deferred_exit();
758 			return -EEXIST;
759 		}
760 	}
761 	rb_link_node(&object->rb_node, rb_parent, link);
762 	rb_insert_color(&object->rb_node, object_tree(objflags));
763 	list_add_tail_rcu(&object->object_list, &object_list);
764 
765 	return 0;
766 }
767 
768 /*
769  * Create the metadata (struct kmemleak_object) corresponding to an allocated
770  * memory block and add it to the object_list and object tree.
771  */
__create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp,unsigned int objflags)772 static void __create_object(unsigned long ptr, size_t size,
773 				int min_count, gfp_t gfp, unsigned int objflags)
774 {
775 	struct kmemleak_object *object;
776 	unsigned long flags;
777 	int ret;
778 
779 	object = __alloc_object(gfp);
780 	if (!object)
781 		return;
782 
783 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
784 	ret = __link_object(object, ptr, size, min_count, objflags);
785 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
786 	if (ret)
787 		mem_pool_free(object);
788 }
789 
790 /* Create kmemleak object which allocated with virtual address. */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)791 static void create_object(unsigned long ptr, size_t size,
792 			  int min_count, gfp_t gfp)
793 {
794 	__create_object(ptr, size, min_count, gfp, 0);
795 }
796 
797 /* Create kmemleak object which allocated with physical address. */
create_object_phys(unsigned long ptr,size_t size,int min_count,gfp_t gfp)798 static void create_object_phys(unsigned long ptr, size_t size,
799 			       int min_count, gfp_t gfp)
800 {
801 	__create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
802 }
803 
804 /* Create kmemleak object corresponding to a per-CPU allocation. */
create_object_percpu(unsigned long ptr,size_t size,int min_count,gfp_t gfp)805 static void create_object_percpu(unsigned long ptr, size_t size,
806 				 int min_count, gfp_t gfp)
807 {
808 	__create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
809 }
810 
811 /*
812  * Mark the object as not allocated and schedule RCU freeing via put_object().
813  */
__delete_object(struct kmemleak_object * object)814 static void __delete_object(struct kmemleak_object *object)
815 {
816 	unsigned long flags;
817 
818 	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
819 	WARN_ON(atomic_read(&object->use_count) < 1);
820 
821 	/*
822 	 * Locking here also ensures that the corresponding memory block
823 	 * cannot be freed when it is being scanned.
824 	 */
825 	raw_spin_lock_irqsave(&object->lock, flags);
826 	object->flags &= ~OBJECT_ALLOCATED;
827 	raw_spin_unlock_irqrestore(&object->lock, flags);
828 	put_object(object);
829 }
830 
831 /*
832  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
833  * delete it.
834  */
delete_object_full(unsigned long ptr,unsigned int objflags)835 static void delete_object_full(unsigned long ptr, unsigned int objflags)
836 {
837 	struct kmemleak_object *object;
838 
839 	object = find_and_remove_object(ptr, 0, objflags);
840 	if (!object) {
841 #ifdef DEBUG
842 		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
843 			      ptr);
844 #endif
845 		return;
846 	}
847 	__delete_object(object);
848 }
849 
850 /*
851  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
852  * delete it. If the memory block is partially freed, the function may create
853  * additional metadata for the remaining parts of the block.
854  */
delete_object_part(unsigned long ptr,size_t size,unsigned int objflags)855 static void delete_object_part(unsigned long ptr, size_t size,
856 			       unsigned int objflags)
857 {
858 	struct kmemleak_object *object, *object_l, *object_r;
859 	unsigned long start, end, flags;
860 
861 	object_l = __alloc_object(GFP_KERNEL);
862 	if (!object_l)
863 		return;
864 
865 	object_r = __alloc_object(GFP_KERNEL);
866 	if (!object_r)
867 		goto out;
868 
869 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
870 	object = __find_and_remove_object(ptr, 1, objflags);
871 	if (!object)
872 		goto unlock;
873 
874 	/*
875 	 * Create one or two objects that may result from the memory block
876 	 * split. Note that partial freeing is only done by free_bootmem() and
877 	 * this happens before kmemleak_init() is called.
878 	 */
879 	start = object->pointer;
880 	end = object->pointer + object->size;
881 	if ((ptr > start) &&
882 	    !__link_object(object_l, start, ptr - start,
883 			   object->min_count, objflags))
884 		object_l = NULL;
885 	if ((ptr + size < end) &&
886 	    !__link_object(object_r, ptr + size, end - ptr - size,
887 			   object->min_count, objflags))
888 		object_r = NULL;
889 
890 unlock:
891 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
892 	if (object) {
893 		__delete_object(object);
894 	} else {
895 #ifdef DEBUG
896 		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
897 			      ptr, size);
898 #endif
899 	}
900 
901 out:
902 	if (object_l)
903 		mem_pool_free(object_l);
904 	if (object_r)
905 		mem_pool_free(object_r);
906 }
907 
__paint_it(struct kmemleak_object * object,int color)908 static void __paint_it(struct kmemleak_object *object, int color)
909 {
910 	object->min_count = color;
911 	if (color == KMEMLEAK_BLACK)
912 		object->flags |= OBJECT_NO_SCAN;
913 }
914 
paint_it(struct kmemleak_object * object,int color)915 static void paint_it(struct kmemleak_object *object, int color)
916 {
917 	unsigned long flags;
918 
919 	raw_spin_lock_irqsave(&object->lock, flags);
920 	__paint_it(object, color);
921 	raw_spin_unlock_irqrestore(&object->lock, flags);
922 }
923 
paint_ptr(unsigned long ptr,int color,unsigned int objflags)924 static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
925 {
926 	struct kmemleak_object *object;
927 
928 	object = __find_and_get_object(ptr, 0, objflags);
929 	if (!object) {
930 		kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
931 			      ptr,
932 			      (color == KMEMLEAK_GREY) ? "Grey" :
933 			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
934 		return;
935 	}
936 	paint_it(object, color);
937 	put_object(object);
938 }
939 
940 /*
941  * Mark an object permanently as gray-colored so that it can no longer be
942  * reported as a leak. This is used in general to mark a false positive.
943  */
make_gray_object(unsigned long ptr)944 static void make_gray_object(unsigned long ptr)
945 {
946 	paint_ptr(ptr, KMEMLEAK_GREY, 0);
947 }
948 
949 /*
950  * Mark the object as black-colored so that it is ignored from scans and
951  * reporting.
952  */
make_black_object(unsigned long ptr,unsigned int objflags)953 static void make_black_object(unsigned long ptr, unsigned int objflags)
954 {
955 	paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
956 }
957 
958 /*
959  * Reset the checksum of an object. The immediate effect is that it will not
960  * be reported as a leak during the next scan until its checksum is updated.
961  */
reset_checksum(unsigned long ptr)962 static void reset_checksum(unsigned long ptr)
963 {
964 	unsigned long flags;
965 	struct kmemleak_object *object;
966 
967 	object = find_and_get_object(ptr, 0);
968 	if (!object) {
969 		kmemleak_warn("Not resetting the checksum of an unknown object at 0x%08lx\n",
970 			      ptr);
971 		return;
972 	}
973 
974 	raw_spin_lock_irqsave(&object->lock, flags);
975 	object->checksum = 0;
976 	raw_spin_unlock_irqrestore(&object->lock, flags);
977 	put_object(object);
978 }
979 
980 /*
981  * Add a scanning area to the object. If at least one such area is added,
982  * kmemleak will only scan these ranges rather than the whole memory block.
983  */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)984 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
985 {
986 	unsigned long flags;
987 	struct kmemleak_object *object;
988 	struct kmemleak_scan_area *area = NULL;
989 	unsigned long untagged_ptr;
990 	unsigned long untagged_objp;
991 
992 	object = find_and_get_object(ptr, 1);
993 	if (!object) {
994 		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
995 			      ptr);
996 		return;
997 	}
998 
999 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
1000 	untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
1001 
1002 	if (scan_area_cache)
1003 		area = kmem_cache_alloc_noprof(scan_area_cache,
1004 					       gfp_nested_mask(gfp));
1005 
1006 	raw_spin_lock_irqsave(&object->lock, flags);
1007 	if (!area) {
1008 		pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
1009 		/* mark the object for full scan to avoid false positives */
1010 		object->flags |= OBJECT_FULL_SCAN;
1011 		goto out_unlock;
1012 	}
1013 	if (size == SIZE_MAX) {
1014 		size = untagged_objp + object->size - untagged_ptr;
1015 	} else if (untagged_ptr + size > untagged_objp + object->size) {
1016 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
1017 		dump_object_info(object);
1018 		kmem_cache_free(scan_area_cache, area);
1019 		goto out_unlock;
1020 	}
1021 
1022 	INIT_HLIST_NODE(&area->node);
1023 	area->start = ptr;
1024 	area->size = size;
1025 
1026 	hlist_add_head(&area->node, &object->area_list);
1027 out_unlock:
1028 	raw_spin_unlock_irqrestore(&object->lock, flags);
1029 	put_object(object);
1030 }
1031 
1032 /*
1033  * Any surplus references (object already gray) to 'ptr' are passed to
1034  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
1035  * vm_struct may be used as an alternative reference to the vmalloc'ed object
1036  * (see free_thread_stack()).
1037  */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)1038 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
1039 {
1040 	unsigned long flags;
1041 	struct kmemleak_object *object;
1042 
1043 	object = find_and_get_object(ptr, 0);
1044 	if (!object) {
1045 		kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
1046 			      ptr);
1047 		return;
1048 	}
1049 
1050 	raw_spin_lock_irqsave(&object->lock, flags);
1051 	object->excess_ref = excess_ref;
1052 	raw_spin_unlock_irqrestore(&object->lock, flags);
1053 	put_object(object);
1054 }
1055 
1056 /*
1057  * Set the OBJECT_NO_SCAN flag for the object corresponding to the given
1058  * pointer. Such object will not be scanned by kmemleak but references to it
1059  * are searched.
1060  */
object_no_scan(unsigned long ptr)1061 static void object_no_scan(unsigned long ptr)
1062 {
1063 	unsigned long flags;
1064 	struct kmemleak_object *object;
1065 
1066 	object = find_and_get_object(ptr, 0);
1067 	if (!object) {
1068 		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1069 		return;
1070 	}
1071 
1072 	raw_spin_lock_irqsave(&object->lock, flags);
1073 	object->flags |= OBJECT_NO_SCAN;
1074 	raw_spin_unlock_irqrestore(&object->lock, flags);
1075 	put_object(object);
1076 }
1077 
1078 /**
1079  * kmemleak_alloc - register a newly allocated object
1080  * @ptr:	pointer to beginning of the object
1081  * @size:	size of the object
1082  * @min_count:	minimum number of references to this object. If during memory
1083  *		scanning a number of references less than @min_count is found,
1084  *		the object is reported as a memory leak. If @min_count is 0,
1085  *		the object is never reported as a leak. If @min_count is -1,
1086  *		the object is ignored (not scanned and not reported as a leak)
1087  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1088  *
1089  * This function is called from the kernel allocators when a new object
1090  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1091  */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)1092 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1093 			  gfp_t gfp)
1094 {
1095 	pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1096 
1097 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1098 		create_object((unsigned long)ptr, size, min_count, gfp);
1099 }
1100 EXPORT_SYMBOL_GPL(kmemleak_alloc);
1101 
1102 /**
1103  * kmemleak_alloc_percpu - register a newly allocated __percpu object
1104  * @ptr:	__percpu pointer to beginning of the object
1105  * @size:	size of the object
1106  * @gfp:	flags used for kmemleak internal memory allocations
1107  *
1108  * This function is called from the kernel percpu allocator when a new object
1109  * (memory block) is allocated (alloc_percpu).
1110  */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)1111 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1112 				 gfp_t gfp)
1113 {
1114 	pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1115 
1116 	if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1117 		create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
1118 }
1119 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1120 
1121 /**
1122  * kmemleak_vmalloc - register a newly vmalloc'ed object
1123  * @area:	pointer to vm_struct
1124  * @size:	size of the object
1125  * @gfp:	__vmalloc() flags used for kmemleak internal memory allocations
1126  *
1127  * This function is called from the vmalloc() kernel allocator when a new
1128  * object (memory block) is allocated.
1129  */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)1130 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1131 {
1132 	pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1133 
1134 	/*
1135 	 * A min_count = 2 is needed because vm_struct contains a reference to
1136 	 * the virtual address of the vmalloc'ed block.
1137 	 */
1138 	if (kmemleak_enabled) {
1139 		create_object((unsigned long)area->addr, size, 2, gfp);
1140 		object_set_excess_ref((unsigned long)area,
1141 				      (unsigned long)area->addr);
1142 	}
1143 }
1144 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1145 
1146 /**
1147  * kmemleak_free - unregister a previously registered object
1148  * @ptr:	pointer to beginning of the object
1149  *
1150  * This function is called from the kernel allocators when an object (memory
1151  * block) is freed (kmem_cache_free, kfree, vfree etc.).
1152  */
kmemleak_free(const void * ptr)1153 void __ref kmemleak_free(const void *ptr)
1154 {
1155 	pr_debug("%s(0x%px)\n", __func__, ptr);
1156 
1157 	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1158 		delete_object_full((unsigned long)ptr, 0);
1159 }
1160 EXPORT_SYMBOL_GPL(kmemleak_free);
1161 
1162 /**
1163  * kmemleak_free_part - partially unregister a previously registered object
1164  * @ptr:	pointer to the beginning or inside the object. This also
1165  *		represents the start of the range to be freed
1166  * @size:	size to be unregistered
1167  *
1168  * This function is called when only a part of a memory block is freed
1169  * (usually from the bootmem allocator).
1170  */
kmemleak_free_part(const void * ptr,size_t size)1171 void __ref kmemleak_free_part(const void *ptr, size_t size)
1172 {
1173 	pr_debug("%s(0x%px)\n", __func__, ptr);
1174 
1175 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1176 		delete_object_part((unsigned long)ptr, size, 0);
1177 }
1178 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1179 
1180 /**
1181  * kmemleak_free_percpu - unregister a previously registered __percpu object
1182  * @ptr:	__percpu pointer to beginning of the object
1183  *
1184  * This function is called from the kernel percpu allocator when an object
1185  * (memory block) is freed (free_percpu).
1186  */
kmemleak_free_percpu(const void __percpu * ptr)1187 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1188 {
1189 	pr_debug("%s(0x%px)\n", __func__, ptr);
1190 
1191 	if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
1192 		delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
1193 }
1194 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1195 
1196 /**
1197  * kmemleak_update_trace - update object allocation stack trace
1198  * @ptr:	pointer to beginning of the object
1199  *
1200  * Override the object allocation stack trace for cases where the actual
1201  * allocation place is not always useful.
1202  */
kmemleak_update_trace(const void * ptr)1203 void __ref kmemleak_update_trace(const void *ptr)
1204 {
1205 	struct kmemleak_object *object;
1206 	depot_stack_handle_t trace_handle;
1207 	unsigned long flags;
1208 
1209 	pr_debug("%s(0x%px)\n", __func__, ptr);
1210 
1211 	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1212 		return;
1213 
1214 	object = find_and_get_object((unsigned long)ptr, 1);
1215 	if (!object) {
1216 #ifdef DEBUG
1217 		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1218 			      ptr);
1219 #endif
1220 		return;
1221 	}
1222 
1223 	trace_handle = set_track_prepare();
1224 	raw_spin_lock_irqsave(&object->lock, flags);
1225 	object->trace_handle = trace_handle;
1226 	raw_spin_unlock_irqrestore(&object->lock, flags);
1227 
1228 	put_object(object);
1229 }
1230 EXPORT_SYMBOL(kmemleak_update_trace);
1231 
1232 /**
1233  * kmemleak_not_leak - mark an allocated object as false positive
1234  * @ptr:	pointer to beginning of the object
1235  *
1236  * Calling this function on an object will cause the memory block to no longer
1237  * be reported as leak and always be scanned.
1238  */
kmemleak_not_leak(const void * ptr)1239 void __ref kmemleak_not_leak(const void *ptr)
1240 {
1241 	pr_debug("%s(0x%px)\n", __func__, ptr);
1242 
1243 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1244 		make_gray_object((unsigned long)ptr);
1245 }
1246 EXPORT_SYMBOL(kmemleak_not_leak);
1247 
1248 /**
1249  * kmemleak_transient_leak - mark an allocated object as transient false positive
1250  * @ptr:	pointer to beginning of the object
1251  *
1252  * Calling this function on an object will cause the memory block to not be
1253  * reported as a leak temporarily. This may happen, for example, if the object
1254  * is part of a singly linked list and the ->next reference to it is changed.
1255  */
kmemleak_transient_leak(const void * ptr)1256 void __ref kmemleak_transient_leak(const void *ptr)
1257 {
1258 	pr_debug("%s(0x%px)\n", __func__, ptr);
1259 
1260 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1261 		reset_checksum((unsigned long)ptr);
1262 }
1263 EXPORT_SYMBOL(kmemleak_transient_leak);
1264 
1265 /**
1266  * kmemleak_ignore_percpu - similar to kmemleak_ignore but taking a percpu
1267  *			    address argument
1268  * @ptr:	percpu address of the object
1269  */
kmemleak_ignore_percpu(const void __percpu * ptr)1270 void __ref kmemleak_ignore_percpu(const void __percpu *ptr)
1271 {
1272 	pr_debug("%s(0x%px)\n", __func__, ptr);
1273 
1274 	if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1275 		make_black_object((unsigned long)ptr, OBJECT_PERCPU);
1276 }
1277 EXPORT_SYMBOL_GPL(kmemleak_ignore_percpu);
1278 
1279 /**
1280  * kmemleak_ignore - ignore an allocated object
1281  * @ptr:	pointer to beginning of the object
1282  *
1283  * Calling this function on an object will cause the memory block to be
1284  * ignored (not scanned and not reported as a leak). This is usually done when
1285  * it is known that the corresponding block is not a leak and does not contain
1286  * any references to other allocated memory blocks.
1287  */
kmemleak_ignore(const void * ptr)1288 void __ref kmemleak_ignore(const void *ptr)
1289 {
1290 	pr_debug("%s(0x%px)\n", __func__, ptr);
1291 
1292 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1293 		make_black_object((unsigned long)ptr, 0);
1294 }
1295 EXPORT_SYMBOL(kmemleak_ignore);
1296 
1297 /**
1298  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1299  * @ptr:	pointer to beginning or inside the object. This also
1300  *		represents the start of the scan area
1301  * @size:	size of the scan area
1302  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1303  *
1304  * This function is used when it is known that only certain parts of an object
1305  * contain references to other objects. Kmemleak will only scan these areas
1306  * reducing the number false negatives.
1307  */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1308 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1309 {
1310 	pr_debug("%s(0x%px)\n", __func__, ptr);
1311 
1312 	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1313 		add_scan_area((unsigned long)ptr, size, gfp);
1314 }
1315 EXPORT_SYMBOL(kmemleak_scan_area);
1316 
1317 /**
1318  * kmemleak_no_scan - do not scan an allocated object
1319  * @ptr:	pointer to beginning of the object
1320  *
1321  * This function notifies kmemleak not to scan the given memory block. Useful
1322  * in situations where it is known that the given object does not contain any
1323  * references to other objects. Kmemleak will not scan such objects reducing
1324  * the number of false negatives.
1325  */
kmemleak_no_scan(const void * ptr)1326 void __ref kmemleak_no_scan(const void *ptr)
1327 {
1328 	pr_debug("%s(0x%px)\n", __func__, ptr);
1329 
1330 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1331 		object_no_scan((unsigned long)ptr);
1332 }
1333 EXPORT_SYMBOL(kmemleak_no_scan);
1334 
1335 /**
1336  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1337  *			 address argument
1338  * @phys:	physical address of the object
1339  * @size:	size of the object
1340  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1341  */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,gfp_t gfp)1342 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1343 {
1344 	pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1345 
1346 	if (kmemleak_enabled)
1347 		/*
1348 		 * Create object with OBJECT_PHYS flag and
1349 		 * assume min_count 0.
1350 		 */
1351 		create_object_phys((unsigned long)phys, size, 0, gfp);
1352 }
1353 EXPORT_SYMBOL(kmemleak_alloc_phys);
1354 
1355 /**
1356  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1357  *			     physical address argument
1358  * @phys:	physical address if the beginning or inside an object. This
1359  *		also represents the start of the range to be freed
1360  * @size:	size to be unregistered
1361  */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1362 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1363 {
1364 	pr_debug("%s(0x%px)\n", __func__, &phys);
1365 
1366 	if (kmemleak_enabled)
1367 		delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1368 }
1369 EXPORT_SYMBOL(kmemleak_free_part_phys);
1370 
1371 /**
1372  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1373  *			  address argument
1374  * @phys:	physical address of the object
1375  */
kmemleak_ignore_phys(phys_addr_t phys)1376 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1377 {
1378 	pr_debug("%s(0x%px)\n", __func__, &phys);
1379 
1380 	if (kmemleak_enabled)
1381 		make_black_object((unsigned long)phys, OBJECT_PHYS);
1382 }
1383 EXPORT_SYMBOL(kmemleak_ignore_phys);
1384 
1385 /*
1386  * Update an object's checksum and return true if it was modified.
1387  */
update_checksum(struct kmemleak_object * object)1388 static bool update_checksum(struct kmemleak_object *object)
1389 {
1390 	u32 old_csum = object->checksum;
1391 
1392 	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1393 		return false;
1394 
1395 	kasan_disable_current();
1396 	kcsan_disable_current();
1397 	if (object->flags & OBJECT_PERCPU) {
1398 		unsigned int cpu;
1399 
1400 		object->checksum = 0;
1401 		for_each_possible_cpu(cpu) {
1402 			void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1403 
1404 			object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
1405 		}
1406 	} else {
1407 		object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1408 	}
1409 	kasan_enable_current();
1410 	kcsan_enable_current();
1411 
1412 	return object->checksum != old_csum;
1413 }
1414 
1415 /*
1416  * Update an object's references. object->lock must be held by the caller.
1417  */
update_refs(struct kmemleak_object * object)1418 static void update_refs(struct kmemleak_object *object)
1419 {
1420 	if (!color_white(object)) {
1421 		/* non-orphan, ignored or new */
1422 		return;
1423 	}
1424 
1425 	/*
1426 	 * Increase the object's reference count (number of pointers to the
1427 	 * memory block). If this count reaches the required minimum, the
1428 	 * object's color will become gray and it will be added to the
1429 	 * gray_list.
1430 	 */
1431 	object->count++;
1432 	if (color_gray(object)) {
1433 		/* put_object() called when removing from gray_list */
1434 		WARN_ON(!get_object(object));
1435 		list_add_tail(&object->gray_list, &gray_list);
1436 	}
1437 }
1438 
pointer_update_refs(struct kmemleak_object * scanned,unsigned long pointer,unsigned int objflags)1439 static void pointer_update_refs(struct kmemleak_object *scanned,
1440 			 unsigned long pointer, unsigned int objflags)
1441 {
1442 	struct kmemleak_object *object;
1443 	unsigned long untagged_ptr;
1444 	unsigned long excess_ref;
1445 
1446 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1447 	if (objflags & OBJECT_PERCPU) {
1448 		if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
1449 			return;
1450 	} else {
1451 		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1452 			return;
1453 	}
1454 
1455 	/*
1456 	 * No need for get_object() here since we hold kmemleak_lock.
1457 	 * object->use_count cannot be dropped to 0 while the object
1458 	 * is still present in object_tree_root and object_list
1459 	 * (with updates protected by kmemleak_lock).
1460 	 */
1461 	object = __lookup_object(pointer, 1, objflags);
1462 	if (!object)
1463 		return;
1464 	if (object == scanned)
1465 		/* self referenced, ignore */
1466 		return;
1467 
1468 	/*
1469 	 * Avoid the lockdep recursive warning on object->lock being
1470 	 * previously acquired in scan_object(). These locks are
1471 	 * enclosed by scan_mutex.
1472 	 */
1473 	raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1474 	/* only pass surplus references (object already gray) */
1475 	if (color_gray(object)) {
1476 		excess_ref = object->excess_ref;
1477 		/* no need for update_refs() if object already gray */
1478 	} else {
1479 		excess_ref = 0;
1480 		update_refs(object);
1481 	}
1482 	raw_spin_unlock(&object->lock);
1483 
1484 	if (excess_ref) {
1485 		object = lookup_object(excess_ref, 0);
1486 		if (!object)
1487 			return;
1488 		if (object == scanned)
1489 			/* circular reference, ignore */
1490 			return;
1491 		raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1492 		update_refs(object);
1493 		raw_spin_unlock(&object->lock);
1494 	}
1495 }
1496 
1497 /*
1498  * Memory scanning is a long process and it needs to be interruptible. This
1499  * function checks whether such interrupt condition occurred.
1500  */
scan_should_stop(void)1501 static int scan_should_stop(void)
1502 {
1503 	if (!kmemleak_enabled)
1504 		return 1;
1505 
1506 	/*
1507 	 * This function may be called from either process or kthread context,
1508 	 * hence the need to check for both stop conditions.
1509 	 */
1510 	if (current->mm)
1511 		return signal_pending(current);
1512 	else
1513 		return kthread_should_stop();
1514 
1515 	return 0;
1516 }
1517 
1518 /*
1519  * Scan a memory block (exclusive range) for valid pointers and add those
1520  * found to the gray list.
1521  */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1522 static void scan_block(void *_start, void *_end,
1523 		       struct kmemleak_object *scanned)
1524 {
1525 	unsigned long *ptr;
1526 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1527 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1528 	unsigned long flags;
1529 
1530 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
1531 	for (ptr = start; ptr < end; ptr++) {
1532 		unsigned long pointer;
1533 
1534 		if (scan_should_stop())
1535 			break;
1536 
1537 		kasan_disable_current();
1538 		pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1539 		kasan_enable_current();
1540 
1541 		pointer_update_refs(scanned, pointer, 0);
1542 		pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
1543 	}
1544 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1545 }
1546 
1547 /*
1548  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1549  */
1550 #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1551 static void scan_large_block(void *start, void *end)
1552 {
1553 	void *next;
1554 
1555 	while (start < end) {
1556 		next = min(start + MAX_SCAN_SIZE, end);
1557 		scan_block(start, next, NULL);
1558 		start = next;
1559 		cond_resched();
1560 	}
1561 }
1562 #endif
1563 
1564 /*
1565  * Scan a memory block corresponding to a kmemleak_object. A condition is
1566  * that object->use_count >= 1.
1567  */
scan_object(struct kmemleak_object * object)1568 static void scan_object(struct kmemleak_object *object)
1569 {
1570 	struct kmemleak_scan_area *area;
1571 	unsigned long flags;
1572 
1573 	/*
1574 	 * Once the object->lock is acquired, the corresponding memory block
1575 	 * cannot be freed (the same lock is acquired in delete_object).
1576 	 */
1577 	raw_spin_lock_irqsave(&object->lock, flags);
1578 	if (object->flags & OBJECT_NO_SCAN)
1579 		goto out;
1580 	if (!(object->flags & OBJECT_ALLOCATED))
1581 		/* already freed object */
1582 		goto out;
1583 
1584 	if (object->flags & OBJECT_PERCPU) {
1585 		unsigned int cpu;
1586 
1587 		for_each_possible_cpu(cpu) {
1588 			void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1589 			void *end = start + object->size;
1590 
1591 			scan_block(start, end, object);
1592 
1593 			raw_spin_unlock_irqrestore(&object->lock, flags);
1594 			cond_resched();
1595 			raw_spin_lock_irqsave(&object->lock, flags);
1596 			if (!(object->flags & OBJECT_ALLOCATED))
1597 				break;
1598 		}
1599 	} else if (hlist_empty(&object->area_list) ||
1600 	    object->flags & OBJECT_FULL_SCAN) {
1601 		void *start = object->flags & OBJECT_PHYS ?
1602 				__va((phys_addr_t)object->pointer) :
1603 				(void *)object->pointer;
1604 		void *end = start + object->size;
1605 		void *next;
1606 
1607 		do {
1608 			next = min(start + MAX_SCAN_SIZE, end);
1609 			scan_block(start, next, object);
1610 
1611 			start = next;
1612 			if (start >= end)
1613 				break;
1614 
1615 			raw_spin_unlock_irqrestore(&object->lock, flags);
1616 			cond_resched();
1617 			raw_spin_lock_irqsave(&object->lock, flags);
1618 		} while (object->flags & OBJECT_ALLOCATED);
1619 	} else {
1620 		hlist_for_each_entry(area, &object->area_list, node)
1621 			scan_block((void *)area->start,
1622 				   (void *)(area->start + area->size),
1623 				   object);
1624 	}
1625 out:
1626 	raw_spin_unlock_irqrestore(&object->lock, flags);
1627 }
1628 
1629 /*
1630  * Scan the objects already referenced (gray objects). More objects will be
1631  * referenced and, if there are no memory leaks, all the objects are scanned.
1632  */
scan_gray_list(void)1633 static void scan_gray_list(void)
1634 {
1635 	struct kmemleak_object *object, *tmp;
1636 
1637 	/*
1638 	 * The list traversal is safe for both tail additions and removals
1639 	 * from inside the loop. The kmemleak objects cannot be freed from
1640 	 * outside the loop because their use_count was incremented.
1641 	 */
1642 	object = list_entry(gray_list.next, typeof(*object), gray_list);
1643 	while (&object->gray_list != &gray_list) {
1644 		cond_resched();
1645 
1646 		/* may add new objects to the list */
1647 		if (!scan_should_stop())
1648 			scan_object(object);
1649 
1650 		tmp = list_entry(object->gray_list.next, typeof(*object),
1651 				 gray_list);
1652 
1653 		/* remove the object from the list and release it */
1654 		list_del(&object->gray_list);
1655 		put_object(object);
1656 
1657 		object = tmp;
1658 	}
1659 	WARN_ON(!list_empty(&gray_list));
1660 }
1661 
1662 /*
1663  * Conditionally call resched() in an object iteration loop while making sure
1664  * that the given object won't go away without RCU read lock by performing a
1665  * get_object() if necessaary.
1666  */
kmemleak_cond_resched(struct kmemleak_object * object)1667 static void kmemleak_cond_resched(struct kmemleak_object *object)
1668 {
1669 	if (!get_object(object))
1670 		return;	/* Try next object */
1671 
1672 	raw_spin_lock_irq(&kmemleak_lock);
1673 	if (object->del_state & DELSTATE_REMOVED)
1674 		goto unlock_put;	/* Object removed */
1675 	object->del_state |= DELSTATE_NO_DELETE;
1676 	raw_spin_unlock_irq(&kmemleak_lock);
1677 
1678 	rcu_read_unlock();
1679 	cond_resched();
1680 	rcu_read_lock();
1681 
1682 	raw_spin_lock_irq(&kmemleak_lock);
1683 	if (object->del_state & DELSTATE_REMOVED)
1684 		list_del_rcu(&object->object_list);
1685 	object->del_state &= ~DELSTATE_NO_DELETE;
1686 unlock_put:
1687 	raw_spin_unlock_irq(&kmemleak_lock);
1688 	put_object(object);
1689 }
1690 
1691 /*
1692  * Scan data sections and all the referenced memory blocks allocated via the
1693  * kernel's standard allocators. This function must be called with the
1694  * scan_mutex held.
1695  */
kmemleak_scan(void)1696 static void kmemleak_scan(void)
1697 {
1698 	struct kmemleak_object *object;
1699 	struct zone *zone;
1700 	int __maybe_unused i;
1701 	int new_leaks = 0;
1702 
1703 	jiffies_last_scan = jiffies;
1704 
1705 	/* prepare the kmemleak_object's */
1706 	rcu_read_lock();
1707 	list_for_each_entry_rcu(object, &object_list, object_list) {
1708 		raw_spin_lock_irq(&object->lock);
1709 #ifdef DEBUG
1710 		/*
1711 		 * With a few exceptions there should be a maximum of
1712 		 * 1 reference to any object at this point.
1713 		 */
1714 		if (atomic_read(&object->use_count) > 1) {
1715 			pr_debug("object->use_count = %d\n",
1716 				 atomic_read(&object->use_count));
1717 			dump_object_info(object);
1718 		}
1719 #endif
1720 
1721 		/* ignore objects outside lowmem (paint them black) */
1722 		if ((object->flags & OBJECT_PHYS) &&
1723 		   !(object->flags & OBJECT_NO_SCAN)) {
1724 			unsigned long phys = object->pointer;
1725 
1726 			if (PHYS_PFN(phys) < min_low_pfn ||
1727 			    PHYS_PFN(phys + object->size) > max_low_pfn)
1728 				__paint_it(object, KMEMLEAK_BLACK);
1729 		}
1730 
1731 		/* reset the reference count (whiten the object) */
1732 		object->count = 0;
1733 		if (color_gray(object) && get_object(object))
1734 			list_add_tail(&object->gray_list, &gray_list);
1735 
1736 		raw_spin_unlock_irq(&object->lock);
1737 
1738 		if (need_resched())
1739 			kmemleak_cond_resched(object);
1740 	}
1741 	rcu_read_unlock();
1742 
1743 #ifdef CONFIG_SMP
1744 	/* per-cpu sections scanning */
1745 	for_each_possible_cpu(i)
1746 		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1747 				 __per_cpu_end + per_cpu_offset(i));
1748 #endif
1749 
1750 	/*
1751 	 * Struct page scanning for each node.
1752 	 */
1753 	get_online_mems();
1754 	for_each_populated_zone(zone) {
1755 		unsigned long start_pfn = zone->zone_start_pfn;
1756 		unsigned long end_pfn = zone_end_pfn(zone);
1757 		unsigned long pfn;
1758 
1759 		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1760 			struct page *page = pfn_to_online_page(pfn);
1761 
1762 			if (!(pfn & 63))
1763 				cond_resched();
1764 
1765 			if (!page)
1766 				continue;
1767 
1768 			/* only scan pages belonging to this zone */
1769 			if (page_zone(page) != zone)
1770 				continue;
1771 			/* only scan if page is in use */
1772 			if (page_count(page) == 0)
1773 				continue;
1774 			scan_block(page, page + 1, NULL);
1775 		}
1776 	}
1777 	put_online_mems();
1778 
1779 	/*
1780 	 * Scanning the task stacks (may introduce false negatives).
1781 	 */
1782 	if (kmemleak_stack_scan) {
1783 		struct task_struct *p, *g;
1784 
1785 		rcu_read_lock();
1786 		for_each_process_thread(g, p) {
1787 			void *stack = try_get_task_stack(p);
1788 			if (stack) {
1789 				scan_block(stack, stack + THREAD_SIZE, NULL);
1790 				put_task_stack(p);
1791 			}
1792 		}
1793 		rcu_read_unlock();
1794 	}
1795 
1796 	/*
1797 	 * Scan the objects already referenced from the sections scanned
1798 	 * above.
1799 	 */
1800 	scan_gray_list();
1801 
1802 	/*
1803 	 * Check for new or unreferenced objects modified since the previous
1804 	 * scan and color them gray until the next scan.
1805 	 */
1806 	rcu_read_lock();
1807 	list_for_each_entry_rcu(object, &object_list, object_list) {
1808 		if (need_resched())
1809 			kmemleak_cond_resched(object);
1810 
1811 		/*
1812 		 * This is racy but we can save the overhead of lock/unlock
1813 		 * calls. The missed objects, if any, should be caught in
1814 		 * the next scan.
1815 		 */
1816 		if (!color_white(object))
1817 			continue;
1818 		raw_spin_lock_irq(&object->lock);
1819 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1820 		    && update_checksum(object) && get_object(object)) {
1821 			/* color it gray temporarily */
1822 			object->count = object->min_count;
1823 			list_add_tail(&object->gray_list, &gray_list);
1824 		}
1825 		raw_spin_unlock_irq(&object->lock);
1826 	}
1827 	rcu_read_unlock();
1828 
1829 	/*
1830 	 * Re-scan the gray list for modified unreferenced objects.
1831 	 */
1832 	scan_gray_list();
1833 
1834 	/*
1835 	 * If scanning was stopped do not report any new unreferenced objects.
1836 	 */
1837 	if (scan_should_stop())
1838 		return;
1839 
1840 	/*
1841 	 * Scanning result reporting.
1842 	 */
1843 	rcu_read_lock();
1844 	list_for_each_entry_rcu(object, &object_list, object_list) {
1845 		if (need_resched())
1846 			kmemleak_cond_resched(object);
1847 
1848 		/*
1849 		 * This is racy but we can save the overhead of lock/unlock
1850 		 * calls. The missed objects, if any, should be caught in
1851 		 * the next scan.
1852 		 */
1853 		if (!color_white(object))
1854 			continue;
1855 		raw_spin_lock_irq(&object->lock);
1856 		if (unreferenced_object(object) &&
1857 		    !(object->flags & OBJECT_REPORTED)) {
1858 			object->flags |= OBJECT_REPORTED;
1859 
1860 			if (kmemleak_verbose)
1861 				print_unreferenced(NULL, object);
1862 
1863 			new_leaks++;
1864 		}
1865 		raw_spin_unlock_irq(&object->lock);
1866 	}
1867 	rcu_read_unlock();
1868 
1869 	if (new_leaks) {
1870 		kmemleak_found_leaks = true;
1871 
1872 		pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1873 			new_leaks);
1874 	}
1875 
1876 }
1877 
1878 /*
1879  * Thread function performing automatic memory scanning. Unreferenced objects
1880  * at the end of a memory scan are reported but only the first time.
1881  */
kmemleak_scan_thread(void * arg)1882 static int kmemleak_scan_thread(void *arg)
1883 {
1884 	static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1885 
1886 	pr_info("Automatic memory scanning thread started\n");
1887 	set_user_nice(current, 10);
1888 
1889 	/*
1890 	 * Wait before the first scan to allow the system to fully initialize.
1891 	 */
1892 	if (first_run) {
1893 		signed long timeout = secs_to_jiffies(SECS_FIRST_SCAN);
1894 		first_run = 0;
1895 		while (timeout && !kthread_should_stop())
1896 			timeout = schedule_timeout_interruptible(timeout);
1897 	}
1898 
1899 	while (!kthread_should_stop()) {
1900 		signed long timeout = READ_ONCE(jiffies_scan_wait);
1901 
1902 		mutex_lock(&scan_mutex);
1903 		kmemleak_scan();
1904 		mutex_unlock(&scan_mutex);
1905 
1906 		/* wait before the next scan */
1907 		while (timeout && !kthread_should_stop())
1908 			timeout = schedule_timeout_interruptible(timeout);
1909 	}
1910 
1911 	pr_info("Automatic memory scanning thread ended\n");
1912 
1913 	return 0;
1914 }
1915 
1916 /*
1917  * Start the automatic memory scanning thread. This function must be called
1918  * with the scan_mutex held.
1919  */
start_scan_thread(void)1920 static void start_scan_thread(void)
1921 {
1922 	if (scan_thread)
1923 		return;
1924 	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1925 	if (IS_ERR(scan_thread)) {
1926 		pr_warn("Failed to create the scan thread\n");
1927 		scan_thread = NULL;
1928 	}
1929 }
1930 
1931 /*
1932  * Stop the automatic memory scanning thread.
1933  */
stop_scan_thread(void)1934 static void stop_scan_thread(void)
1935 {
1936 	if (scan_thread) {
1937 		kthread_stop(scan_thread);
1938 		scan_thread = NULL;
1939 	}
1940 }
1941 
1942 /*
1943  * Iterate over the object_list and return the first valid object at or after
1944  * the required position with its use_count incremented. The function triggers
1945  * a memory scanning when the pos argument points to the first position.
1946  */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1947 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1948 {
1949 	struct kmemleak_object *object;
1950 	loff_t n = *pos;
1951 	int err;
1952 
1953 	err = mutex_lock_interruptible(&scan_mutex);
1954 	if (err < 0)
1955 		return ERR_PTR(err);
1956 
1957 	rcu_read_lock();
1958 	list_for_each_entry_rcu(object, &object_list, object_list) {
1959 		if (n-- > 0)
1960 			continue;
1961 		if (get_object(object))
1962 			goto out;
1963 	}
1964 	object = NULL;
1965 out:
1966 	return object;
1967 }
1968 
1969 /*
1970  * Return the next object in the object_list. The function decrements the
1971  * use_count of the previous object and increases that of the next one.
1972  */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1973 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1974 {
1975 	struct kmemleak_object *prev_obj = v;
1976 	struct kmemleak_object *next_obj = NULL;
1977 	struct kmemleak_object *obj = prev_obj;
1978 
1979 	++(*pos);
1980 
1981 	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1982 		if (get_object(obj)) {
1983 			next_obj = obj;
1984 			break;
1985 		}
1986 	}
1987 
1988 	put_object(prev_obj);
1989 	return next_obj;
1990 }
1991 
1992 /*
1993  * Decrement the use_count of the last object required, if any.
1994  */
kmemleak_seq_stop(struct seq_file * seq,void * v)1995 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1996 {
1997 	if (!IS_ERR(v)) {
1998 		/*
1999 		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
2000 		 * waiting was interrupted, so only release it if !IS_ERR.
2001 		 */
2002 		rcu_read_unlock();
2003 		mutex_unlock(&scan_mutex);
2004 		if (v)
2005 			put_object(v);
2006 	}
2007 }
2008 
2009 /*
2010  * Print the information for an unreferenced object to the seq file.
2011  */
kmemleak_seq_show(struct seq_file * seq,void * v)2012 static int kmemleak_seq_show(struct seq_file *seq, void *v)
2013 {
2014 	struct kmemleak_object *object = v;
2015 	unsigned long flags;
2016 
2017 	raw_spin_lock_irqsave(&object->lock, flags);
2018 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
2019 		print_unreferenced(seq, object);
2020 	raw_spin_unlock_irqrestore(&object->lock, flags);
2021 	return 0;
2022 }
2023 
2024 static const struct seq_operations kmemleak_seq_ops = {
2025 	.start = kmemleak_seq_start,
2026 	.next  = kmemleak_seq_next,
2027 	.stop  = kmemleak_seq_stop,
2028 	.show  = kmemleak_seq_show,
2029 };
2030 
kmemleak_open(struct inode * inode,struct file * file)2031 static int kmemleak_open(struct inode *inode, struct file *file)
2032 {
2033 	return seq_open(file, &kmemleak_seq_ops);
2034 }
2035 
__dump_str_object_info(unsigned long addr,unsigned int objflags)2036 static bool __dump_str_object_info(unsigned long addr, unsigned int objflags)
2037 {
2038 	unsigned long flags;
2039 	struct kmemleak_object *object;
2040 
2041 	object = __find_and_get_object(addr, 1, objflags);
2042 	if (!object)
2043 		return false;
2044 
2045 	raw_spin_lock_irqsave(&object->lock, flags);
2046 	dump_object_info(object);
2047 	raw_spin_unlock_irqrestore(&object->lock, flags);
2048 
2049 	put_object(object);
2050 
2051 	return true;
2052 }
2053 
dump_str_object_info(const char * str)2054 static int dump_str_object_info(const char *str)
2055 {
2056 	unsigned long addr;
2057 	bool found = false;
2058 
2059 	if (kstrtoul(str, 0, &addr))
2060 		return -EINVAL;
2061 
2062 	found |= __dump_str_object_info(addr, 0);
2063 	found |= __dump_str_object_info(addr, OBJECT_PHYS);
2064 	found |= __dump_str_object_info(addr, OBJECT_PERCPU);
2065 
2066 	if (!found) {
2067 		pr_info("Unknown object at 0x%08lx\n", addr);
2068 		return -EINVAL;
2069 	}
2070 
2071 	return 0;
2072 }
2073 
2074 /*
2075  * We use grey instead of black to ensure we can do future scans on the same
2076  * objects. If we did not do future scans these black objects could
2077  * potentially contain references to newly allocated objects in the future and
2078  * we'd end up with false positives.
2079  */
kmemleak_clear(void)2080 static void kmemleak_clear(void)
2081 {
2082 	struct kmemleak_object *object;
2083 
2084 	rcu_read_lock();
2085 	list_for_each_entry_rcu(object, &object_list, object_list) {
2086 		raw_spin_lock_irq(&object->lock);
2087 		if ((object->flags & OBJECT_REPORTED) &&
2088 		    unreferenced_object(object))
2089 			__paint_it(object, KMEMLEAK_GREY);
2090 		raw_spin_unlock_irq(&object->lock);
2091 	}
2092 	rcu_read_unlock();
2093 
2094 	kmemleak_found_leaks = false;
2095 }
2096 
2097 static void __kmemleak_do_cleanup(void);
2098 
2099 /*
2100  * File write operation to configure kmemleak at run-time. The following
2101  * commands can be written to the /sys/kernel/debug/kmemleak file:
2102  *   off	- disable kmemleak (irreversible)
2103  *   stack=on	- enable the task stacks scanning
2104  *   stack=off	- disable the tasks stacks scanning
2105  *   scan=on	- start the automatic memory scanning thread
2106  *   scan=off	- stop the automatic memory scanning thread
2107  *   scan=...	- set the automatic memory scanning period in seconds (0 to
2108  *		  disable it)
2109  *   scan	- trigger a memory scan
2110  *   clear	- mark all current reported unreferenced kmemleak objects as
2111  *		  grey to ignore printing them, or free all kmemleak objects
2112  *		  if kmemleak has been disabled.
2113  *   dump=...	- dump information about the object found at the given address
2114  */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)2115 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
2116 			      size_t size, loff_t *ppos)
2117 {
2118 	char buf[64];
2119 	int buf_size;
2120 	int ret;
2121 
2122 	buf_size = min(size, (sizeof(buf) - 1));
2123 	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2124 		return -EFAULT;
2125 	buf[buf_size] = 0;
2126 
2127 	ret = mutex_lock_interruptible(&scan_mutex);
2128 	if (ret < 0)
2129 		return ret;
2130 
2131 	if (strncmp(buf, "clear", 5) == 0) {
2132 		if (kmemleak_enabled)
2133 			kmemleak_clear();
2134 		else
2135 			__kmemleak_do_cleanup();
2136 		goto out;
2137 	}
2138 
2139 	if (!kmemleak_enabled) {
2140 		ret = -EPERM;
2141 		goto out;
2142 	}
2143 
2144 	if (strncmp(buf, "off", 3) == 0)
2145 		kmemleak_disable();
2146 	else if (strncmp(buf, "stack=on", 8) == 0)
2147 		kmemleak_stack_scan = 1;
2148 	else if (strncmp(buf, "stack=off", 9) == 0)
2149 		kmemleak_stack_scan = 0;
2150 	else if (strncmp(buf, "scan=on", 7) == 0)
2151 		start_scan_thread();
2152 	else if (strncmp(buf, "scan=off", 8) == 0)
2153 		stop_scan_thread();
2154 	else if (strncmp(buf, "scan=", 5) == 0) {
2155 		unsigned secs;
2156 		unsigned long msecs;
2157 
2158 		ret = kstrtouint(buf + 5, 0, &secs);
2159 		if (ret < 0)
2160 			goto out;
2161 
2162 		msecs = secs * MSEC_PER_SEC;
2163 		if (msecs > UINT_MAX)
2164 			msecs = UINT_MAX;
2165 
2166 		stop_scan_thread();
2167 		if (msecs) {
2168 			WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2169 			start_scan_thread();
2170 		}
2171 	} else if (strncmp(buf, "scan", 4) == 0)
2172 		kmemleak_scan();
2173 	else if (strncmp(buf, "dump=", 5) == 0)
2174 		ret = dump_str_object_info(buf + 5);
2175 	else
2176 		ret = -EINVAL;
2177 
2178 out:
2179 	mutex_unlock(&scan_mutex);
2180 	if (ret < 0)
2181 		return ret;
2182 
2183 	/* ignore the rest of the buffer, only one command at a time */
2184 	*ppos += size;
2185 	return size;
2186 }
2187 
2188 static const struct file_operations kmemleak_fops = {
2189 	.owner		= THIS_MODULE,
2190 	.open		= kmemleak_open,
2191 	.read		= seq_read,
2192 	.write		= kmemleak_write,
2193 	.llseek		= seq_lseek,
2194 	.release	= seq_release,
2195 };
2196 
__kmemleak_do_cleanup(void)2197 static void __kmemleak_do_cleanup(void)
2198 {
2199 	struct kmemleak_object *object, *tmp;
2200 	unsigned int cnt = 0;
2201 
2202 	/*
2203 	 * Kmemleak has already been disabled, no need for RCU list traversal
2204 	 * or kmemleak_lock held.
2205 	 */
2206 	list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2207 		__remove_object(object);
2208 		__delete_object(object);
2209 
2210 		/* Call cond_resched() once per 64 iterations to avoid soft lockup */
2211 		if (!(++cnt & 0x3f))
2212 			cond_resched();
2213 	}
2214 }
2215 
2216 /*
2217  * Stop the memory scanning thread and free the kmemleak internal objects if
2218  * no previous scan thread (otherwise, kmemleak may still have some useful
2219  * information on memory leaks).
2220  */
kmemleak_do_cleanup(struct work_struct * work)2221 static void kmemleak_do_cleanup(struct work_struct *work)
2222 {
2223 	stop_scan_thread();
2224 
2225 	mutex_lock(&scan_mutex);
2226 	/*
2227 	 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2228 	 * longer track object freeing. Ordering of the scan thread stopping and
2229 	 * the memory accesses below is guaranteed by the kthread_stop()
2230 	 * function.
2231 	 */
2232 	kmemleak_free_enabled = 0;
2233 	mutex_unlock(&scan_mutex);
2234 
2235 	if (!kmemleak_found_leaks)
2236 		__kmemleak_do_cleanup();
2237 	else
2238 		pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2239 }
2240 
2241 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2242 
2243 /*
2244  * Disable kmemleak. No memory allocation/freeing will be traced once this
2245  * function is called. Disabling kmemleak is an irreversible operation.
2246  */
kmemleak_disable(void)2247 static void kmemleak_disable(void)
2248 {
2249 	/* atomically check whether it was already invoked */
2250 	if (cmpxchg(&kmemleak_error, 0, 1))
2251 		return;
2252 
2253 	/* stop any memory operation tracing */
2254 	kmemleak_enabled = 0;
2255 
2256 	/* check whether it is too early for a kernel thread */
2257 	if (kmemleak_late_initialized)
2258 		schedule_work(&cleanup_work);
2259 	else
2260 		kmemleak_free_enabled = 0;
2261 
2262 	pr_info("Kernel memory leak detector disabled\n");
2263 }
2264 
2265 /*
2266  * Allow boot-time kmemleak disabling (enabled by default).
2267  */
kmemleak_boot_config(char * str)2268 static int __init kmemleak_boot_config(char *str)
2269 {
2270 	if (!str)
2271 		return -EINVAL;
2272 	if (strcmp(str, "off") == 0)
2273 		kmemleak_disable();
2274 	else if (strcmp(str, "on") == 0) {
2275 		kmemleak_skip_disable = 1;
2276 		stack_depot_request_early_init();
2277 	}
2278 	else
2279 		return -EINVAL;
2280 	return 0;
2281 }
2282 early_param("kmemleak", kmemleak_boot_config);
2283 
2284 /*
2285  * Kmemleak initialization.
2286  */
kmemleak_init(void)2287 void __init kmemleak_init(void)
2288 {
2289 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2290 	if (!kmemleak_skip_disable) {
2291 		kmemleak_disable();
2292 		return;
2293 	}
2294 #endif
2295 
2296 	if (kmemleak_error)
2297 		return;
2298 
2299 	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2300 	jiffies_scan_wait = secs_to_jiffies(SECS_SCAN_WAIT);
2301 
2302 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2303 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2304 
2305 	/* register the data/bss sections */
2306 	create_object((unsigned long)_sdata, _edata - _sdata,
2307 		      KMEMLEAK_GREY, GFP_ATOMIC);
2308 	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2309 		      KMEMLEAK_GREY, GFP_ATOMIC);
2310 	/* only register .data..ro_after_init if not within .data */
2311 	if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2312 		create_object((unsigned long)__start_ro_after_init,
2313 			      __end_ro_after_init - __start_ro_after_init,
2314 			      KMEMLEAK_GREY, GFP_ATOMIC);
2315 }
2316 
2317 /*
2318  * Late initialization function.
2319  */
kmemleak_late_init(void)2320 static int __init kmemleak_late_init(void)
2321 {
2322 	kmemleak_late_initialized = 1;
2323 
2324 	debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2325 
2326 	if (kmemleak_error) {
2327 		/*
2328 		 * Some error occurred and kmemleak was disabled. There is a
2329 		 * small chance that kmemleak_disable() was called immediately
2330 		 * after setting kmemleak_late_initialized and we may end up with
2331 		 * two clean-up threads but serialized by scan_mutex.
2332 		 */
2333 		schedule_work(&cleanup_work);
2334 		return -ENOMEM;
2335 	}
2336 
2337 	if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2338 		mutex_lock(&scan_mutex);
2339 		start_scan_thread();
2340 		mutex_unlock(&scan_mutex);
2341 	}
2342 
2343 	pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2344 		mem_pool_free_count);
2345 
2346 	return 0;
2347 }
2348 late_initcall(kmemleak_late_init);
2349