xref: /linux/mm/kmemleak.c (revision dfff0fa65ab15db45acd64b3189787d37ab163cd)
1 /*
2  * mm/kmemleak.c
3  *
4  * Copyright (C) 2008 ARM Limited
5  * Written by Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  *
21  * For more information on the algorithm and kmemleak usage, please see
22  * Documentation/kmemleak.txt.
23  *
24  * Notes on locking
25  * ----------------
26  *
27  * The following locks and mutexes are used by kmemleak:
28  *
29  * - kmemleak_lock (rwlock): protects the object_list modifications and
30  *   accesses to the object_tree_root. The object_list is the main list
31  *   holding the metadata (struct kmemleak_object) for the allocated memory
32  *   blocks. The object_tree_root is a priority search tree used to look-up
33  *   metadata based on a pointer to the corresponding memory block.  The
34  *   kmemleak_object structures are added to the object_list and
35  *   object_tree_root in the create_object() function called from the
36  *   kmemleak_alloc() callback and removed in delete_object() called from the
37  *   kmemleak_free() callback
38  * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39  *   the metadata (e.g. count) are protected by this lock. Note that some
40  *   members of this structure may be protected by other means (atomic or
41  *   kmemleak_lock). This lock is also held when scanning the corresponding
42  *   memory block to avoid the kernel freeing it via the kmemleak_free()
43  *   callback. This is less heavyweight than holding a global lock like
44  *   kmemleak_lock during scanning
45  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46  *   unreferenced objects at a time. The gray_list contains the objects which
47  *   are already referenced or marked as false positives and need to be
48  *   scanned. This list is only modified during a scanning episode when the
49  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50  *   Note that the kmemleak_object.use_count is incremented when an object is
51  *   added to the gray_list and therefore cannot be freed. This mutex also
52  *   prevents multiple users of the "kmemleak" debugfs file together with
53  *   modifications to the memory scanning parameters including the scan_thread
54  *   pointer
55  *
56  * The kmemleak_object structures have a use_count incremented or decremented
57  * using the get_object()/put_object() functions. When the use_count becomes
58  * 0, this count can no longer be incremented and put_object() schedules the
59  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60  * function must be protected by rcu_read_lock() to avoid accessing a freed
61  * structure.
62  */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 
66 #include <linux/init.h>
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
70 #include <linux/jiffies.h>
71 #include <linux/delay.h>
72 #include <linux/module.h>
73 #include <linux/kthread.h>
74 #include <linux/prio_tree.h>
75 #include <linux/gfp.h>
76 #include <linux/fs.h>
77 #include <linux/debugfs.h>
78 #include <linux/seq_file.h>
79 #include <linux/cpumask.h>
80 #include <linux/spinlock.h>
81 #include <linux/mutex.h>
82 #include <linux/rcupdate.h>
83 #include <linux/stacktrace.h>
84 #include <linux/cache.h>
85 #include <linux/percpu.h>
86 #include <linux/hardirq.h>
87 #include <linux/mmzone.h>
88 #include <linux/slab.h>
89 #include <linux/thread_info.h>
90 #include <linux/err.h>
91 #include <linux/uaccess.h>
92 #include <linux/string.h>
93 #include <linux/nodemask.h>
94 #include <linux/mm.h>
95 
96 #include <asm/sections.h>
97 #include <asm/processor.h>
98 #include <asm/atomic.h>
99 
100 #include <linux/kmemleak.h>
101 
102 /*
103  * Kmemleak configuration and common defines.
104  */
105 #define MAX_TRACE		16	/* stack trace length */
106 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
107 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
108 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
109 #define GRAY_LIST_PASSES	25	/* maximum number of gray list scans */
110 
111 #define BYTES_PER_POINTER	sizeof(void *)
112 
113 /* GFP bitmask for kmemleak internal allocations */
114 #define GFP_KMEMLEAK_MASK	(GFP_KERNEL | GFP_ATOMIC)
115 
116 /* scanning area inside a memory block */
117 struct kmemleak_scan_area {
118 	struct hlist_node node;
119 	unsigned long offset;
120 	size_t length;
121 };
122 
123 /*
124  * Structure holding the metadata for each allocated memory block.
125  * Modifications to such objects should be made while holding the
126  * object->lock. Insertions or deletions from object_list, gray_list or
127  * tree_node are already protected by the corresponding locks or mutex (see
128  * the notes on locking above). These objects are reference-counted
129  * (use_count) and freed using the RCU mechanism.
130  */
131 struct kmemleak_object {
132 	spinlock_t lock;
133 	unsigned long flags;		/* object status flags */
134 	struct list_head object_list;
135 	struct list_head gray_list;
136 	struct prio_tree_node tree_node;
137 	struct rcu_head rcu;		/* object_list lockless traversal */
138 	/* object usage count; object freed when use_count == 0 */
139 	atomic_t use_count;
140 	unsigned long pointer;
141 	size_t size;
142 	/* minimum number of a pointers found before it is considered leak */
143 	int min_count;
144 	/* the total number of pointers found pointing to this object */
145 	int count;
146 	/* memory ranges to be scanned inside an object (empty for all) */
147 	struct hlist_head area_list;
148 	unsigned long trace[MAX_TRACE];
149 	unsigned int trace_len;
150 	unsigned long jiffies;		/* creation timestamp */
151 	pid_t pid;			/* pid of the current task */
152 	char comm[TASK_COMM_LEN];	/* executable name */
153 };
154 
155 /* flag representing the memory block allocation status */
156 #define OBJECT_ALLOCATED	(1 << 0)
157 /* flag set after the first reporting of an unreference object */
158 #define OBJECT_REPORTED		(1 << 1)
159 /* flag set to not scan the object */
160 #define OBJECT_NO_SCAN		(1 << 2)
161 /* flag set on newly allocated objects */
162 #define OBJECT_NEW		(1 << 3)
163 
164 /* the list of all allocated objects */
165 static LIST_HEAD(object_list);
166 /* the list of gray-colored objects (see color_gray comment below) */
167 static LIST_HEAD(gray_list);
168 /* prio search tree for object boundaries */
169 static struct prio_tree_root object_tree_root;
170 /* rw_lock protecting the access to object_list and prio_tree_root */
171 static DEFINE_RWLOCK(kmemleak_lock);
172 
173 /* allocation caches for kmemleak internal data */
174 static struct kmem_cache *object_cache;
175 static struct kmem_cache *scan_area_cache;
176 
177 /* set if tracing memory operations is enabled */
178 static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
179 /* set in the late_initcall if there were no errors */
180 static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
181 /* enables or disables early logging of the memory operations */
182 static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
183 /* set if a fata kmemleak error has occurred */
184 static atomic_t kmemleak_error = ATOMIC_INIT(0);
185 
186 /* minimum and maximum address that may be valid pointers */
187 static unsigned long min_addr = ULONG_MAX;
188 static unsigned long max_addr;
189 
190 static struct task_struct *scan_thread;
191 /* used to avoid reporting of recently allocated objects */
192 static unsigned long jiffies_min_age;
193 static unsigned long jiffies_last_scan;
194 /* delay between automatic memory scannings */
195 static signed long jiffies_scan_wait;
196 /* enables or disables the task stacks scanning */
197 static int kmemleak_stack_scan = 1;
198 /* protects the memory scanning, parameters and debug/kmemleak file access */
199 static DEFINE_MUTEX(scan_mutex);
200 
201 /*
202  * Early object allocation/freeing logging. Kmemleak is initialized after the
203  * kernel allocator. However, both the kernel allocator and kmemleak may
204  * allocate memory blocks which need to be tracked. Kmemleak defines an
205  * arbitrary buffer to hold the allocation/freeing information before it is
206  * fully initialized.
207  */
208 
209 /* kmemleak operation type for early logging */
210 enum {
211 	KMEMLEAK_ALLOC,
212 	KMEMLEAK_FREE,
213 	KMEMLEAK_FREE_PART,
214 	KMEMLEAK_NOT_LEAK,
215 	KMEMLEAK_IGNORE,
216 	KMEMLEAK_SCAN_AREA,
217 	KMEMLEAK_NO_SCAN
218 };
219 
220 /*
221  * Structure holding the information passed to kmemleak callbacks during the
222  * early logging.
223  */
224 struct early_log {
225 	int op_type;			/* kmemleak operation type */
226 	const void *ptr;		/* allocated/freed memory block */
227 	size_t size;			/* memory block size */
228 	int min_count;			/* minimum reference count */
229 	unsigned long offset;		/* scan area offset */
230 	size_t length;			/* scan area length */
231 };
232 
233 /* early logging buffer and current position */
234 static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
235 static int crt_early_log;
236 
237 static void kmemleak_disable(void);
238 
239 /*
240  * Print a warning and dump the stack trace.
241  */
242 #define kmemleak_warn(x...)	do {	\
243 	pr_warning(x);			\
244 	dump_stack();			\
245 } while (0)
246 
247 /*
248  * Macro invoked when a serious kmemleak condition occured and cannot be
249  * recovered from. Kmemleak will be disabled and further allocation/freeing
250  * tracing no longer available.
251  */
252 #define kmemleak_stop(x...)	do {	\
253 	kmemleak_warn(x);		\
254 	kmemleak_disable();		\
255 } while (0)
256 
257 /*
258  * Object colors, encoded with count and min_count:
259  * - white - orphan object, not enough references to it (count < min_count)
260  * - gray  - not orphan, not marked as false positive (min_count == 0) or
261  *		sufficient references to it (count >= min_count)
262  * - black - ignore, it doesn't contain references (e.g. text section)
263  *		(min_count == -1). No function defined for this color.
264  * Newly created objects don't have any color assigned (object->count == -1)
265  * before the next memory scan when they become white.
266  */
267 static int color_white(const struct kmemleak_object *object)
268 {
269 	return object->count != -1 && object->count < object->min_count;
270 }
271 
272 static int color_gray(const struct kmemleak_object *object)
273 {
274 	return object->min_count != -1 && object->count >= object->min_count;
275 }
276 
277 static int color_black(const struct kmemleak_object *object)
278 {
279 	return object->min_count == -1;
280 }
281 
282 /*
283  * Objects are considered unreferenced only if their color is white, they have
284  * not be deleted and have a minimum age to avoid false positives caused by
285  * pointers temporarily stored in CPU registers.
286  */
287 static int unreferenced_object(struct kmemleak_object *object)
288 {
289 	return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
290 		time_before_eq(object->jiffies + jiffies_min_age,
291 			       jiffies_last_scan);
292 }
293 
294 /*
295  * Printing of the unreferenced objects information to the seq file. The
296  * print_unreferenced function must be called with the object->lock held.
297  */
298 static void print_unreferenced(struct seq_file *seq,
299 			       struct kmemleak_object *object)
300 {
301 	int i;
302 
303 	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
304 		   object->pointer, object->size);
305 	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
306 		   object->comm, object->pid, object->jiffies);
307 	seq_printf(seq, "  backtrace:\n");
308 
309 	for (i = 0; i < object->trace_len; i++) {
310 		void *ptr = (void *)object->trace[i];
311 		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
312 	}
313 }
314 
315 /*
316  * Print the kmemleak_object information. This function is used mainly for
317  * debugging special cases when kmemleak operations. It must be called with
318  * the object->lock held.
319  */
320 static void dump_object_info(struct kmemleak_object *object)
321 {
322 	struct stack_trace trace;
323 
324 	trace.nr_entries = object->trace_len;
325 	trace.entries = object->trace;
326 
327 	pr_notice("Object 0x%08lx (size %zu):\n",
328 		  object->tree_node.start, object->size);
329 	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
330 		  object->comm, object->pid, object->jiffies);
331 	pr_notice("  min_count = %d\n", object->min_count);
332 	pr_notice("  count = %d\n", object->count);
333 	pr_notice("  backtrace:\n");
334 	print_stack_trace(&trace, 4);
335 }
336 
337 /*
338  * Look-up a memory block metadata (kmemleak_object) in the priority search
339  * tree based on a pointer value. If alias is 0, only values pointing to the
340  * beginning of the memory block are allowed. The kmemleak_lock must be held
341  * when calling this function.
342  */
343 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
344 {
345 	struct prio_tree_node *node;
346 	struct prio_tree_iter iter;
347 	struct kmemleak_object *object;
348 
349 	prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
350 	node = prio_tree_next(&iter);
351 	if (node) {
352 		object = prio_tree_entry(node, struct kmemleak_object,
353 					 tree_node);
354 		if (!alias && object->pointer != ptr) {
355 			kmemleak_warn("Found object by alias");
356 			object = NULL;
357 		}
358 	} else
359 		object = NULL;
360 
361 	return object;
362 }
363 
364 /*
365  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
366  * that once an object's use_count reached 0, the RCU freeing was already
367  * registered and the object should no longer be used. This function must be
368  * called under the protection of rcu_read_lock().
369  */
370 static int get_object(struct kmemleak_object *object)
371 {
372 	return atomic_inc_not_zero(&object->use_count);
373 }
374 
375 /*
376  * RCU callback to free a kmemleak_object.
377  */
378 static void free_object_rcu(struct rcu_head *rcu)
379 {
380 	struct hlist_node *elem, *tmp;
381 	struct kmemleak_scan_area *area;
382 	struct kmemleak_object *object =
383 		container_of(rcu, struct kmemleak_object, rcu);
384 
385 	/*
386 	 * Once use_count is 0 (guaranteed by put_object), there is no other
387 	 * code accessing this object, hence no need for locking.
388 	 */
389 	hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
390 		hlist_del(elem);
391 		kmem_cache_free(scan_area_cache, area);
392 	}
393 	kmem_cache_free(object_cache, object);
394 }
395 
396 /*
397  * Decrement the object use_count. Once the count is 0, free the object using
398  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
399  * delete_object() path, the delayed RCU freeing ensures that there is no
400  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
401  * is also possible.
402  */
403 static void put_object(struct kmemleak_object *object)
404 {
405 	if (!atomic_dec_and_test(&object->use_count))
406 		return;
407 
408 	/* should only get here after delete_object was called */
409 	WARN_ON(object->flags & OBJECT_ALLOCATED);
410 
411 	call_rcu(&object->rcu, free_object_rcu);
412 }
413 
414 /*
415  * Look up an object in the prio search tree and increase its use_count.
416  */
417 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
418 {
419 	unsigned long flags;
420 	struct kmemleak_object *object = NULL;
421 
422 	rcu_read_lock();
423 	read_lock_irqsave(&kmemleak_lock, flags);
424 	if (ptr >= min_addr && ptr < max_addr)
425 		object = lookup_object(ptr, alias);
426 	read_unlock_irqrestore(&kmemleak_lock, flags);
427 
428 	/* check whether the object is still available */
429 	if (object && !get_object(object))
430 		object = NULL;
431 	rcu_read_unlock();
432 
433 	return object;
434 }
435 
436 /*
437  * Create the metadata (struct kmemleak_object) corresponding to an allocated
438  * memory block and add it to the object_list and object_tree_root.
439  */
440 static void create_object(unsigned long ptr, size_t size, int min_count,
441 			  gfp_t gfp)
442 {
443 	unsigned long flags;
444 	struct kmemleak_object *object;
445 	struct prio_tree_node *node;
446 	struct stack_trace trace;
447 
448 	object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
449 	if (!object) {
450 		kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
451 		return;
452 	}
453 
454 	INIT_LIST_HEAD(&object->object_list);
455 	INIT_LIST_HEAD(&object->gray_list);
456 	INIT_HLIST_HEAD(&object->area_list);
457 	spin_lock_init(&object->lock);
458 	atomic_set(&object->use_count, 1);
459 	object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
460 	object->pointer = ptr;
461 	object->size = size;
462 	object->min_count = min_count;
463 	object->count = -1;			/* no color initially */
464 	object->jiffies = jiffies;
465 
466 	/* task information */
467 	if (in_irq()) {
468 		object->pid = 0;
469 		strncpy(object->comm, "hardirq", sizeof(object->comm));
470 	} else if (in_softirq()) {
471 		object->pid = 0;
472 		strncpy(object->comm, "softirq", sizeof(object->comm));
473 	} else {
474 		object->pid = current->pid;
475 		/*
476 		 * There is a small chance of a race with set_task_comm(),
477 		 * however using get_task_comm() here may cause locking
478 		 * dependency issues with current->alloc_lock. In the worst
479 		 * case, the command line is not correct.
480 		 */
481 		strncpy(object->comm, current->comm, sizeof(object->comm));
482 	}
483 
484 	/* kernel backtrace */
485 	trace.max_entries = MAX_TRACE;
486 	trace.nr_entries = 0;
487 	trace.entries = object->trace;
488 	trace.skip = 1;
489 	save_stack_trace(&trace);
490 	object->trace_len = trace.nr_entries;
491 
492 	INIT_PRIO_TREE_NODE(&object->tree_node);
493 	object->tree_node.start = ptr;
494 	object->tree_node.last = ptr + size - 1;
495 
496 	write_lock_irqsave(&kmemleak_lock, flags);
497 	min_addr = min(min_addr, ptr);
498 	max_addr = max(max_addr, ptr + size);
499 	node = prio_tree_insert(&object_tree_root, &object->tree_node);
500 	/*
501 	 * The code calling the kernel does not yet have the pointer to the
502 	 * memory block to be able to free it.  However, we still hold the
503 	 * kmemleak_lock here in case parts of the kernel started freeing
504 	 * random memory blocks.
505 	 */
506 	if (node != &object->tree_node) {
507 		unsigned long flags;
508 
509 		kmemleak_stop("Cannot insert 0x%lx into the object search tree "
510 			      "(already existing)\n", ptr);
511 		object = lookup_object(ptr, 1);
512 		spin_lock_irqsave(&object->lock, flags);
513 		dump_object_info(object);
514 		spin_unlock_irqrestore(&object->lock, flags);
515 
516 		goto out;
517 	}
518 	list_add_tail_rcu(&object->object_list, &object_list);
519 out:
520 	write_unlock_irqrestore(&kmemleak_lock, flags);
521 }
522 
523 /*
524  * Remove the metadata (struct kmemleak_object) for a memory block from the
525  * object_list and object_tree_root and decrement its use_count.
526  */
527 static void __delete_object(struct kmemleak_object *object)
528 {
529 	unsigned long flags;
530 
531 	write_lock_irqsave(&kmemleak_lock, flags);
532 	prio_tree_remove(&object_tree_root, &object->tree_node);
533 	list_del_rcu(&object->object_list);
534 	write_unlock_irqrestore(&kmemleak_lock, flags);
535 
536 	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
537 	WARN_ON(atomic_read(&object->use_count) < 2);
538 
539 	/*
540 	 * Locking here also ensures that the corresponding memory block
541 	 * cannot be freed when it is being scanned.
542 	 */
543 	spin_lock_irqsave(&object->lock, flags);
544 	object->flags &= ~OBJECT_ALLOCATED;
545 	spin_unlock_irqrestore(&object->lock, flags);
546 	put_object(object);
547 }
548 
549 /*
550  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
551  * delete it.
552  */
553 static void delete_object_full(unsigned long ptr)
554 {
555 	struct kmemleak_object *object;
556 
557 	object = find_and_get_object(ptr, 0);
558 	if (!object) {
559 #ifdef DEBUG
560 		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
561 			      ptr);
562 #endif
563 		return;
564 	}
565 	__delete_object(object);
566 	put_object(object);
567 }
568 
569 /*
570  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
571  * delete it. If the memory block is partially freed, the function may create
572  * additional metadata for the remaining parts of the block.
573  */
574 static void delete_object_part(unsigned long ptr, size_t size)
575 {
576 	struct kmemleak_object *object;
577 	unsigned long start, end;
578 
579 	object = find_and_get_object(ptr, 1);
580 	if (!object) {
581 #ifdef DEBUG
582 		kmemleak_warn("Partially freeing unknown object at 0x%08lx "
583 			      "(size %zu)\n", ptr, size);
584 #endif
585 		return;
586 	}
587 	__delete_object(object);
588 
589 	/*
590 	 * Create one or two objects that may result from the memory block
591 	 * split. Note that partial freeing is only done by free_bootmem() and
592 	 * this happens before kmemleak_init() is called. The path below is
593 	 * only executed during early log recording in kmemleak_init(), so
594 	 * GFP_KERNEL is enough.
595 	 */
596 	start = object->pointer;
597 	end = object->pointer + object->size;
598 	if (ptr > start)
599 		create_object(start, ptr - start, object->min_count,
600 			      GFP_KERNEL);
601 	if (ptr + size < end)
602 		create_object(ptr + size, end - ptr - size, object->min_count,
603 			      GFP_KERNEL);
604 
605 	put_object(object);
606 }
607 /*
608  * Make a object permanently as gray-colored so that it can no longer be
609  * reported as a leak. This is used in general to mark a false positive.
610  */
611 static void make_gray_object(unsigned long ptr)
612 {
613 	unsigned long flags;
614 	struct kmemleak_object *object;
615 
616 	object = find_and_get_object(ptr, 0);
617 	if (!object) {
618 		kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
619 		return;
620 	}
621 
622 	spin_lock_irqsave(&object->lock, flags);
623 	object->min_count = 0;
624 	spin_unlock_irqrestore(&object->lock, flags);
625 	put_object(object);
626 }
627 
628 /*
629  * Mark the object as black-colored so that it is ignored from scans and
630  * reporting.
631  */
632 static void make_black_object(unsigned long ptr)
633 {
634 	unsigned long flags;
635 	struct kmemleak_object *object;
636 
637 	object = find_and_get_object(ptr, 0);
638 	if (!object) {
639 		kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
640 		return;
641 	}
642 
643 	spin_lock_irqsave(&object->lock, flags);
644 	object->min_count = -1;
645 	spin_unlock_irqrestore(&object->lock, flags);
646 	put_object(object);
647 }
648 
649 /*
650  * Add a scanning area to the object. If at least one such area is added,
651  * kmemleak will only scan these ranges rather than the whole memory block.
652  */
653 static void add_scan_area(unsigned long ptr, unsigned long offset,
654 			  size_t length, gfp_t gfp)
655 {
656 	unsigned long flags;
657 	struct kmemleak_object *object;
658 	struct kmemleak_scan_area *area;
659 
660 	object = find_and_get_object(ptr, 0);
661 	if (!object) {
662 		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
663 			      ptr);
664 		return;
665 	}
666 
667 	area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
668 	if (!area) {
669 		kmemleak_warn("Cannot allocate a scan area\n");
670 		goto out;
671 	}
672 
673 	spin_lock_irqsave(&object->lock, flags);
674 	if (offset + length > object->size) {
675 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
676 		dump_object_info(object);
677 		kmem_cache_free(scan_area_cache, area);
678 		goto out_unlock;
679 	}
680 
681 	INIT_HLIST_NODE(&area->node);
682 	area->offset = offset;
683 	area->length = length;
684 
685 	hlist_add_head(&area->node, &object->area_list);
686 out_unlock:
687 	spin_unlock_irqrestore(&object->lock, flags);
688 out:
689 	put_object(object);
690 }
691 
692 /*
693  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
694  * pointer. Such object will not be scanned by kmemleak but references to it
695  * are searched.
696  */
697 static void object_no_scan(unsigned long ptr)
698 {
699 	unsigned long flags;
700 	struct kmemleak_object *object;
701 
702 	object = find_and_get_object(ptr, 0);
703 	if (!object) {
704 		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
705 		return;
706 	}
707 
708 	spin_lock_irqsave(&object->lock, flags);
709 	object->flags |= OBJECT_NO_SCAN;
710 	spin_unlock_irqrestore(&object->lock, flags);
711 	put_object(object);
712 }
713 
714 /*
715  * Log an early kmemleak_* call to the early_log buffer. These calls will be
716  * processed later once kmemleak is fully initialized.
717  */
718 static void log_early(int op_type, const void *ptr, size_t size,
719 		      int min_count, unsigned long offset, size_t length)
720 {
721 	unsigned long flags;
722 	struct early_log *log;
723 
724 	if (crt_early_log >= ARRAY_SIZE(early_log)) {
725 		pr_warning("Early log buffer exceeded\n");
726 		kmemleak_disable();
727 		return;
728 	}
729 
730 	/*
731 	 * There is no need for locking since the kernel is still in UP mode
732 	 * at this stage. Disabling the IRQs is enough.
733 	 */
734 	local_irq_save(flags);
735 	log = &early_log[crt_early_log];
736 	log->op_type = op_type;
737 	log->ptr = ptr;
738 	log->size = size;
739 	log->min_count = min_count;
740 	log->offset = offset;
741 	log->length = length;
742 	crt_early_log++;
743 	local_irq_restore(flags);
744 }
745 
746 /*
747  * Memory allocation function callback. This function is called from the
748  * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
749  * vmalloc etc.).
750  */
751 void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp)
752 {
753 	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
754 
755 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
756 		create_object((unsigned long)ptr, size, min_count, gfp);
757 	else if (atomic_read(&kmemleak_early_log))
758 		log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
759 }
760 EXPORT_SYMBOL_GPL(kmemleak_alloc);
761 
762 /*
763  * Memory freeing function callback. This function is called from the kernel
764  * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
765  */
766 void kmemleak_free(const void *ptr)
767 {
768 	pr_debug("%s(0x%p)\n", __func__, ptr);
769 
770 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
771 		delete_object_full((unsigned long)ptr);
772 	else if (atomic_read(&kmemleak_early_log))
773 		log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
774 }
775 EXPORT_SYMBOL_GPL(kmemleak_free);
776 
777 /*
778  * Partial memory freeing function callback. This function is usually called
779  * from bootmem allocator when (part of) a memory block is freed.
780  */
781 void kmemleak_free_part(const void *ptr, size_t size)
782 {
783 	pr_debug("%s(0x%p)\n", __func__, ptr);
784 
785 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
786 		delete_object_part((unsigned long)ptr, size);
787 	else if (atomic_read(&kmemleak_early_log))
788 		log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
789 }
790 EXPORT_SYMBOL_GPL(kmemleak_free_part);
791 
792 /*
793  * Mark an already allocated memory block as a false positive. This will cause
794  * the block to no longer be reported as leak and always be scanned.
795  */
796 void kmemleak_not_leak(const void *ptr)
797 {
798 	pr_debug("%s(0x%p)\n", __func__, ptr);
799 
800 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
801 		make_gray_object((unsigned long)ptr);
802 	else if (atomic_read(&kmemleak_early_log))
803 		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
804 }
805 EXPORT_SYMBOL(kmemleak_not_leak);
806 
807 /*
808  * Ignore a memory block. This is usually done when it is known that the
809  * corresponding block is not a leak and does not contain any references to
810  * other allocated memory blocks.
811  */
812 void kmemleak_ignore(const void *ptr)
813 {
814 	pr_debug("%s(0x%p)\n", __func__, ptr);
815 
816 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
817 		make_black_object((unsigned long)ptr);
818 	else if (atomic_read(&kmemleak_early_log))
819 		log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
820 }
821 EXPORT_SYMBOL(kmemleak_ignore);
822 
823 /*
824  * Limit the range to be scanned in an allocated memory block.
825  */
826 void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length,
827 			gfp_t gfp)
828 {
829 	pr_debug("%s(0x%p)\n", __func__, ptr);
830 
831 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
832 		add_scan_area((unsigned long)ptr, offset, length, gfp);
833 	else if (atomic_read(&kmemleak_early_log))
834 		log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
835 }
836 EXPORT_SYMBOL(kmemleak_scan_area);
837 
838 /*
839  * Inform kmemleak not to scan the given memory block.
840  */
841 void kmemleak_no_scan(const void *ptr)
842 {
843 	pr_debug("%s(0x%p)\n", __func__, ptr);
844 
845 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
846 		object_no_scan((unsigned long)ptr);
847 	else if (atomic_read(&kmemleak_early_log))
848 		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
849 }
850 EXPORT_SYMBOL(kmemleak_no_scan);
851 
852 /*
853  * Memory scanning is a long process and it needs to be interruptable. This
854  * function checks whether such interrupt condition occured.
855  */
856 static int scan_should_stop(void)
857 {
858 	if (!atomic_read(&kmemleak_enabled))
859 		return 1;
860 
861 	/*
862 	 * This function may be called from either process or kthread context,
863 	 * hence the need to check for both stop conditions.
864 	 */
865 	if (current->mm)
866 		return signal_pending(current);
867 	else
868 		return kthread_should_stop();
869 
870 	return 0;
871 }
872 
873 /*
874  * Scan a memory block (exclusive range) for valid pointers and add those
875  * found to the gray list.
876  */
877 static void scan_block(void *_start, void *_end,
878 		       struct kmemleak_object *scanned, int allow_resched)
879 {
880 	unsigned long *ptr;
881 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
882 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
883 
884 	for (ptr = start; ptr < end; ptr++) {
885 		unsigned long flags;
886 		unsigned long pointer = *ptr;
887 		struct kmemleak_object *object;
888 
889 		if (allow_resched)
890 			cond_resched();
891 		if (scan_should_stop())
892 			break;
893 
894 		object = find_and_get_object(pointer, 1);
895 		if (!object)
896 			continue;
897 		if (object == scanned) {
898 			/* self referenced, ignore */
899 			put_object(object);
900 			continue;
901 		}
902 
903 		/*
904 		 * Avoid the lockdep recursive warning on object->lock being
905 		 * previously acquired in scan_object(). These locks are
906 		 * enclosed by scan_mutex.
907 		 */
908 		spin_lock_irqsave_nested(&object->lock, flags,
909 					 SINGLE_DEPTH_NESTING);
910 		if (!color_white(object)) {
911 			/* non-orphan, ignored or new */
912 			spin_unlock_irqrestore(&object->lock, flags);
913 			put_object(object);
914 			continue;
915 		}
916 
917 		/*
918 		 * Increase the object's reference count (number of pointers
919 		 * to the memory block). If this count reaches the required
920 		 * minimum, the object's color will become gray and it will be
921 		 * added to the gray_list.
922 		 */
923 		object->count++;
924 		if (color_gray(object))
925 			list_add_tail(&object->gray_list, &gray_list);
926 		else
927 			put_object(object);
928 		spin_unlock_irqrestore(&object->lock, flags);
929 	}
930 }
931 
932 /*
933  * Scan a memory block corresponding to a kmemleak_object. A condition is
934  * that object->use_count >= 1.
935  */
936 static void scan_object(struct kmemleak_object *object)
937 {
938 	struct kmemleak_scan_area *area;
939 	struct hlist_node *elem;
940 	unsigned long flags;
941 
942 	/*
943 	 * Once the object->lock is aquired, the corresponding memory block
944 	 * cannot be freed (the same lock is aquired in delete_object).
945 	 */
946 	spin_lock_irqsave(&object->lock, flags);
947 	if (object->flags & OBJECT_NO_SCAN)
948 		goto out;
949 	if (!(object->flags & OBJECT_ALLOCATED))
950 		/* already freed object */
951 		goto out;
952 	if (hlist_empty(&object->area_list))
953 		scan_block((void *)object->pointer,
954 			   (void *)(object->pointer + object->size), object, 0);
955 	else
956 		hlist_for_each_entry(area, elem, &object->area_list, node)
957 			scan_block((void *)(object->pointer + area->offset),
958 				   (void *)(object->pointer + area->offset
959 					    + area->length), object, 0);
960 out:
961 	spin_unlock_irqrestore(&object->lock, flags);
962 }
963 
964 /*
965  * Scan data sections and all the referenced memory blocks allocated via the
966  * kernel's standard allocators. This function must be called with the
967  * scan_mutex held.
968  */
969 static void kmemleak_scan(void)
970 {
971 	unsigned long flags;
972 	struct kmemleak_object *object, *tmp;
973 	struct task_struct *task;
974 	int i;
975 	int new_leaks = 0;
976 	int gray_list_pass = 0;
977 
978 	jiffies_last_scan = jiffies;
979 
980 	/* prepare the kmemleak_object's */
981 	rcu_read_lock();
982 	list_for_each_entry_rcu(object, &object_list, object_list) {
983 		spin_lock_irqsave(&object->lock, flags);
984 #ifdef DEBUG
985 		/*
986 		 * With a few exceptions there should be a maximum of
987 		 * 1 reference to any object at this point.
988 		 */
989 		if (atomic_read(&object->use_count) > 1) {
990 			pr_debug("object->use_count = %d\n",
991 				 atomic_read(&object->use_count));
992 			dump_object_info(object);
993 		}
994 #endif
995 		/* reset the reference count (whiten the object) */
996 		object->count = 0;
997 		object->flags &= ~OBJECT_NEW;
998 		if (color_gray(object) && get_object(object))
999 			list_add_tail(&object->gray_list, &gray_list);
1000 
1001 		spin_unlock_irqrestore(&object->lock, flags);
1002 	}
1003 	rcu_read_unlock();
1004 
1005 	/* data/bss scanning */
1006 	scan_block(_sdata, _edata, NULL, 1);
1007 	scan_block(__bss_start, __bss_stop, NULL, 1);
1008 
1009 #ifdef CONFIG_SMP
1010 	/* per-cpu sections scanning */
1011 	for_each_possible_cpu(i)
1012 		scan_block(__per_cpu_start + per_cpu_offset(i),
1013 			   __per_cpu_end + per_cpu_offset(i), NULL, 1);
1014 #endif
1015 
1016 	/*
1017 	 * Struct page scanning for each node. The code below is not yet safe
1018 	 * with MEMORY_HOTPLUG.
1019 	 */
1020 	for_each_online_node(i) {
1021 		pg_data_t *pgdat = NODE_DATA(i);
1022 		unsigned long start_pfn = pgdat->node_start_pfn;
1023 		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1024 		unsigned long pfn;
1025 
1026 		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1027 			struct page *page;
1028 
1029 			if (!pfn_valid(pfn))
1030 				continue;
1031 			page = pfn_to_page(pfn);
1032 			/* only scan if page is in use */
1033 			if (page_count(page) == 0)
1034 				continue;
1035 			scan_block(page, page + 1, NULL, 1);
1036 		}
1037 	}
1038 
1039 	/*
1040 	 * Scanning the task stacks may introduce false negatives and it is
1041 	 * not enabled by default.
1042 	 */
1043 	if (kmemleak_stack_scan) {
1044 		read_lock(&tasklist_lock);
1045 		for_each_process(task)
1046 			scan_block(task_stack_page(task),
1047 				   task_stack_page(task) + THREAD_SIZE,
1048 				   NULL, 0);
1049 		read_unlock(&tasklist_lock);
1050 	}
1051 
1052 	/*
1053 	 * Scan the objects already referenced from the sections scanned
1054 	 * above. More objects will be referenced and, if there are no memory
1055 	 * leaks, all the objects will be scanned. The list traversal is safe
1056 	 * for both tail additions and removals from inside the loop. The
1057 	 * kmemleak objects cannot be freed from outside the loop because their
1058 	 * use_count was increased.
1059 	 */
1060 repeat:
1061 	object = list_entry(gray_list.next, typeof(*object), gray_list);
1062 	while (&object->gray_list != &gray_list) {
1063 		cond_resched();
1064 
1065 		/* may add new objects to the list */
1066 		if (!scan_should_stop())
1067 			scan_object(object);
1068 
1069 		tmp = list_entry(object->gray_list.next, typeof(*object),
1070 				 gray_list);
1071 
1072 		/* remove the object from the list and release it */
1073 		list_del(&object->gray_list);
1074 		put_object(object);
1075 
1076 		object = tmp;
1077 	}
1078 
1079 	if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
1080 		goto scan_end;
1081 
1082 	/*
1083 	 * Check for new objects allocated during this scanning and add them
1084 	 * to the gray list.
1085 	 */
1086 	rcu_read_lock();
1087 	list_for_each_entry_rcu(object, &object_list, object_list) {
1088 		spin_lock_irqsave(&object->lock, flags);
1089 		if ((object->flags & OBJECT_NEW) && !color_black(object) &&
1090 		    get_object(object)) {
1091 			object->flags &= ~OBJECT_NEW;
1092 			list_add_tail(&object->gray_list, &gray_list);
1093 		}
1094 		spin_unlock_irqrestore(&object->lock, flags);
1095 	}
1096 	rcu_read_unlock();
1097 
1098 	if (!list_empty(&gray_list))
1099 		goto repeat;
1100 
1101 scan_end:
1102 	WARN_ON(!list_empty(&gray_list));
1103 
1104 	/*
1105 	 * If scanning was stopped or new objects were being allocated at a
1106 	 * higher rate than gray list scanning, do not report any new
1107 	 * unreferenced objects.
1108 	 */
1109 	if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
1110 		return;
1111 
1112 	/*
1113 	 * Scanning result reporting.
1114 	 */
1115 	rcu_read_lock();
1116 	list_for_each_entry_rcu(object, &object_list, object_list) {
1117 		spin_lock_irqsave(&object->lock, flags);
1118 		if (unreferenced_object(object) &&
1119 		    !(object->flags & OBJECT_REPORTED)) {
1120 			object->flags |= OBJECT_REPORTED;
1121 			new_leaks++;
1122 		}
1123 		spin_unlock_irqrestore(&object->lock, flags);
1124 	}
1125 	rcu_read_unlock();
1126 
1127 	if (new_leaks)
1128 		pr_info("%d new suspected memory leaks (see "
1129 			"/sys/kernel/debug/kmemleak)\n", new_leaks);
1130 
1131 }
1132 
1133 /*
1134  * Thread function performing automatic memory scanning. Unreferenced objects
1135  * at the end of a memory scan are reported but only the first time.
1136  */
1137 static int kmemleak_scan_thread(void *arg)
1138 {
1139 	static int first_run = 1;
1140 
1141 	pr_info("Automatic memory scanning thread started\n");
1142 	set_user_nice(current, 10);
1143 
1144 	/*
1145 	 * Wait before the first scan to allow the system to fully initialize.
1146 	 */
1147 	if (first_run) {
1148 		first_run = 0;
1149 		ssleep(SECS_FIRST_SCAN);
1150 	}
1151 
1152 	while (!kthread_should_stop()) {
1153 		signed long timeout = jiffies_scan_wait;
1154 
1155 		mutex_lock(&scan_mutex);
1156 		kmemleak_scan();
1157 		mutex_unlock(&scan_mutex);
1158 
1159 		/* wait before the next scan */
1160 		while (timeout && !kthread_should_stop())
1161 			timeout = schedule_timeout_interruptible(timeout);
1162 	}
1163 
1164 	pr_info("Automatic memory scanning thread ended\n");
1165 
1166 	return 0;
1167 }
1168 
1169 /*
1170  * Start the automatic memory scanning thread. This function must be called
1171  * with the scan_mutex held.
1172  */
1173 void start_scan_thread(void)
1174 {
1175 	if (scan_thread)
1176 		return;
1177 	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1178 	if (IS_ERR(scan_thread)) {
1179 		pr_warning("Failed to create the scan thread\n");
1180 		scan_thread = NULL;
1181 	}
1182 }
1183 
1184 /*
1185  * Stop the automatic memory scanning thread. This function must be called
1186  * with the scan_mutex held.
1187  */
1188 void stop_scan_thread(void)
1189 {
1190 	if (scan_thread) {
1191 		kthread_stop(scan_thread);
1192 		scan_thread = NULL;
1193 	}
1194 }
1195 
1196 /*
1197  * Iterate over the object_list and return the first valid object at or after
1198  * the required position with its use_count incremented. The function triggers
1199  * a memory scanning when the pos argument points to the first position.
1200  */
1201 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1202 {
1203 	struct kmemleak_object *object;
1204 	loff_t n = *pos;
1205 	int err;
1206 
1207 	err = mutex_lock_interruptible(&scan_mutex);
1208 	if (err < 0)
1209 		return ERR_PTR(err);
1210 
1211 	rcu_read_lock();
1212 	list_for_each_entry_rcu(object, &object_list, object_list) {
1213 		if (n-- > 0)
1214 			continue;
1215 		if (get_object(object))
1216 			goto out;
1217 	}
1218 	object = NULL;
1219 out:
1220 	rcu_read_unlock();
1221 	return object;
1222 }
1223 
1224 /*
1225  * Return the next object in the object_list. The function decrements the
1226  * use_count of the previous object and increases that of the next one.
1227  */
1228 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1229 {
1230 	struct kmemleak_object *prev_obj = v;
1231 	struct kmemleak_object *next_obj = NULL;
1232 	struct list_head *n = &prev_obj->object_list;
1233 
1234 	++(*pos);
1235 
1236 	rcu_read_lock();
1237 	list_for_each_continue_rcu(n, &object_list) {
1238 		next_obj = list_entry(n, struct kmemleak_object, object_list);
1239 		if (get_object(next_obj))
1240 			break;
1241 	}
1242 	rcu_read_unlock();
1243 
1244 	put_object(prev_obj);
1245 	return next_obj;
1246 }
1247 
1248 /*
1249  * Decrement the use_count of the last object required, if any.
1250  */
1251 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1252 {
1253 	if (!IS_ERR(v)) {
1254 		/*
1255 		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1256 		 * waiting was interrupted, so only release it if !IS_ERR.
1257 		 */
1258 		mutex_unlock(&scan_mutex);
1259 		if (v)
1260 			put_object(v);
1261 	}
1262 }
1263 
1264 /*
1265  * Print the information for an unreferenced object to the seq file.
1266  */
1267 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1268 {
1269 	struct kmemleak_object *object = v;
1270 	unsigned long flags;
1271 
1272 	spin_lock_irqsave(&object->lock, flags);
1273 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1274 		print_unreferenced(seq, object);
1275 	spin_unlock_irqrestore(&object->lock, flags);
1276 	return 0;
1277 }
1278 
1279 static const struct seq_operations kmemleak_seq_ops = {
1280 	.start = kmemleak_seq_start,
1281 	.next  = kmemleak_seq_next,
1282 	.stop  = kmemleak_seq_stop,
1283 	.show  = kmemleak_seq_show,
1284 };
1285 
1286 static int kmemleak_open(struct inode *inode, struct file *file)
1287 {
1288 	if (!atomic_read(&kmemleak_enabled))
1289 		return -EBUSY;
1290 
1291 	return seq_open(file, &kmemleak_seq_ops);
1292 }
1293 
1294 static int kmemleak_release(struct inode *inode, struct file *file)
1295 {
1296 	return seq_release(inode, file);
1297 }
1298 
1299 /*
1300  * File write operation to configure kmemleak at run-time. The following
1301  * commands can be written to the /sys/kernel/debug/kmemleak file:
1302  *   off	- disable kmemleak (irreversible)
1303  *   stack=on	- enable the task stacks scanning
1304  *   stack=off	- disable the tasks stacks scanning
1305  *   scan=on	- start the automatic memory scanning thread
1306  *   scan=off	- stop the automatic memory scanning thread
1307  *   scan=...	- set the automatic memory scanning period in seconds (0 to
1308  *		  disable it)
1309  *   scan	- trigger a memory scan
1310  */
1311 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1312 			      size_t size, loff_t *ppos)
1313 {
1314 	char buf[64];
1315 	int buf_size;
1316 	int ret;
1317 
1318 	buf_size = min(size, (sizeof(buf) - 1));
1319 	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1320 		return -EFAULT;
1321 	buf[buf_size] = 0;
1322 
1323 	ret = mutex_lock_interruptible(&scan_mutex);
1324 	if (ret < 0)
1325 		return ret;
1326 
1327 	if (strncmp(buf, "off", 3) == 0)
1328 		kmemleak_disable();
1329 	else if (strncmp(buf, "stack=on", 8) == 0)
1330 		kmemleak_stack_scan = 1;
1331 	else if (strncmp(buf, "stack=off", 9) == 0)
1332 		kmemleak_stack_scan = 0;
1333 	else if (strncmp(buf, "scan=on", 7) == 0)
1334 		start_scan_thread();
1335 	else if (strncmp(buf, "scan=off", 8) == 0)
1336 		stop_scan_thread();
1337 	else if (strncmp(buf, "scan=", 5) == 0) {
1338 		unsigned long secs;
1339 
1340 		ret = strict_strtoul(buf + 5, 0, &secs);
1341 		if (ret < 0)
1342 			goto out;
1343 		stop_scan_thread();
1344 		if (secs) {
1345 			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1346 			start_scan_thread();
1347 		}
1348 	} else if (strncmp(buf, "scan", 4) == 0)
1349 		kmemleak_scan();
1350 	else
1351 		ret = -EINVAL;
1352 
1353 out:
1354 	mutex_unlock(&scan_mutex);
1355 	if (ret < 0)
1356 		return ret;
1357 
1358 	/* ignore the rest of the buffer, only one command at a time */
1359 	*ppos += size;
1360 	return size;
1361 }
1362 
1363 static const struct file_operations kmemleak_fops = {
1364 	.owner		= THIS_MODULE,
1365 	.open		= kmemleak_open,
1366 	.read		= seq_read,
1367 	.write		= kmemleak_write,
1368 	.llseek		= seq_lseek,
1369 	.release	= kmemleak_release,
1370 };
1371 
1372 /*
1373  * Perform the freeing of the kmemleak internal objects after waiting for any
1374  * current memory scan to complete.
1375  */
1376 static int kmemleak_cleanup_thread(void *arg)
1377 {
1378 	struct kmemleak_object *object;
1379 
1380 	mutex_lock(&scan_mutex);
1381 	stop_scan_thread();
1382 
1383 	rcu_read_lock();
1384 	list_for_each_entry_rcu(object, &object_list, object_list)
1385 		delete_object_full(object->pointer);
1386 	rcu_read_unlock();
1387 	mutex_unlock(&scan_mutex);
1388 
1389 	return 0;
1390 }
1391 
1392 /*
1393  * Start the clean-up thread.
1394  */
1395 static void kmemleak_cleanup(void)
1396 {
1397 	struct task_struct *cleanup_thread;
1398 
1399 	cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
1400 				     "kmemleak-clean");
1401 	if (IS_ERR(cleanup_thread))
1402 		pr_warning("Failed to create the clean-up thread\n");
1403 }
1404 
1405 /*
1406  * Disable kmemleak. No memory allocation/freeing will be traced once this
1407  * function is called. Disabling kmemleak is an irreversible operation.
1408  */
1409 static void kmemleak_disable(void)
1410 {
1411 	/* atomically check whether it was already invoked */
1412 	if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1413 		return;
1414 
1415 	/* stop any memory operation tracing */
1416 	atomic_set(&kmemleak_early_log, 0);
1417 	atomic_set(&kmemleak_enabled, 0);
1418 
1419 	/* check whether it is too early for a kernel thread */
1420 	if (atomic_read(&kmemleak_initialized))
1421 		kmemleak_cleanup();
1422 
1423 	pr_info("Kernel memory leak detector disabled\n");
1424 }
1425 
1426 /*
1427  * Allow boot-time kmemleak disabling (enabled by default).
1428  */
1429 static int kmemleak_boot_config(char *str)
1430 {
1431 	if (!str)
1432 		return -EINVAL;
1433 	if (strcmp(str, "off") == 0)
1434 		kmemleak_disable();
1435 	else if (strcmp(str, "on") != 0)
1436 		return -EINVAL;
1437 	return 0;
1438 }
1439 early_param("kmemleak", kmemleak_boot_config);
1440 
1441 /*
1442  * Kmemleak initialization.
1443  */
1444 void __init kmemleak_init(void)
1445 {
1446 	int i;
1447 	unsigned long flags;
1448 
1449 	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1450 	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1451 
1452 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1453 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1454 	INIT_PRIO_TREE_ROOT(&object_tree_root);
1455 
1456 	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1457 	local_irq_save(flags);
1458 	if (!atomic_read(&kmemleak_error)) {
1459 		atomic_set(&kmemleak_enabled, 1);
1460 		atomic_set(&kmemleak_early_log, 0);
1461 	}
1462 	local_irq_restore(flags);
1463 
1464 	/*
1465 	 * This is the point where tracking allocations is safe. Automatic
1466 	 * scanning is started during the late initcall. Add the early logged
1467 	 * callbacks to the kmemleak infrastructure.
1468 	 */
1469 	for (i = 0; i < crt_early_log; i++) {
1470 		struct early_log *log = &early_log[i];
1471 
1472 		switch (log->op_type) {
1473 		case KMEMLEAK_ALLOC:
1474 			kmemleak_alloc(log->ptr, log->size, log->min_count,
1475 				       GFP_KERNEL);
1476 			break;
1477 		case KMEMLEAK_FREE:
1478 			kmemleak_free(log->ptr);
1479 			break;
1480 		case KMEMLEAK_FREE_PART:
1481 			kmemleak_free_part(log->ptr, log->size);
1482 			break;
1483 		case KMEMLEAK_NOT_LEAK:
1484 			kmemleak_not_leak(log->ptr);
1485 			break;
1486 		case KMEMLEAK_IGNORE:
1487 			kmemleak_ignore(log->ptr);
1488 			break;
1489 		case KMEMLEAK_SCAN_AREA:
1490 			kmemleak_scan_area(log->ptr, log->offset, log->length,
1491 					   GFP_KERNEL);
1492 			break;
1493 		case KMEMLEAK_NO_SCAN:
1494 			kmemleak_no_scan(log->ptr);
1495 			break;
1496 		default:
1497 			WARN_ON(1);
1498 		}
1499 	}
1500 }
1501 
1502 /*
1503  * Late initialization function.
1504  */
1505 static int __init kmemleak_late_init(void)
1506 {
1507 	struct dentry *dentry;
1508 
1509 	atomic_set(&kmemleak_initialized, 1);
1510 
1511 	if (atomic_read(&kmemleak_error)) {
1512 		/*
1513 		 * Some error occured and kmemleak was disabled. There is a
1514 		 * small chance that kmemleak_disable() was called immediately
1515 		 * after setting kmemleak_initialized and we may end up with
1516 		 * two clean-up threads but serialized by scan_mutex.
1517 		 */
1518 		kmemleak_cleanup();
1519 		return -ENOMEM;
1520 	}
1521 
1522 	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1523 				     &kmemleak_fops);
1524 	if (!dentry)
1525 		pr_warning("Failed to create the debugfs kmemleak file\n");
1526 	mutex_lock(&scan_mutex);
1527 	start_scan_thread();
1528 	mutex_unlock(&scan_mutex);
1529 
1530 	pr_info("Kernel memory leak detector initialized\n");
1531 
1532 	return 0;
1533 }
1534 late_initcall(kmemleak_late_init);
1535