1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17 * del_state modifications and accesses to the object trees
18 * (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
19 * object_list is the main list holding the metadata (struct
20 * kmemleak_object) for the allocated memory blocks. The object trees are
21 * red black trees used to look-up metadata based on a pointer to the
22 * corresponding memory block. The kmemleak_object structures are added to
23 * the object_list and the object tree root in the create_object() function
24 * called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
25 * delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
26 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
27 * Accesses to the metadata (e.g. count) are protected by this lock. Note
28 * that some members of this structure may be protected by other means
29 * (atomic or kmemleak_lock). This lock is also held when scanning the
30 * corresponding memory block to avoid the kernel freeing it via the
31 * kmemleak_free() callback. This is less heavyweight than holding a global
32 * lock like kmemleak_lock during scanning.
33 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
34 * unreferenced objects at a time. The gray_list contains the objects which
35 * are already referenced or marked as false positives and need to be
36 * scanned. This list is only modified during a scanning episode when the
37 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
38 * Note that the kmemleak_object.use_count is incremented when an object is
39 * added to the gray_list and therefore cannot be freed. This mutex also
40 * prevents multiple users of the "kmemleak" debugfs file together with
41 * modifications to the memory scanning parameters including the scan_thread
42 * pointer
43 *
44 * Locks and mutexes are acquired/nested in the following order:
45 *
46 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47 *
48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
49 * regions.
50 *
51 * The kmemleak_object structures have a use_count incremented or decremented
52 * using the get_object()/put_object() functions. When the use_count becomes
53 * 0, this count can no longer be incremented and put_object() schedules the
54 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
55 * function must be protected by rcu_read_lock() to avoid accessing a freed
56 * structure.
57 */
58
59 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60
61 #include <linux/init.h>
62 #include <linux/kernel.h>
63 #include <linux/list.h>
64 #include <linux/sched/signal.h>
65 #include <linux/sched/task.h>
66 #include <linux/sched/task_stack.h>
67 #include <linux/jiffies.h>
68 #include <linux/delay.h>
69 #include <linux/export.h>
70 #include <linux/kthread.h>
71 #include <linux/rbtree.h>
72 #include <linux/fs.h>
73 #include <linux/debugfs.h>
74 #include <linux/seq_file.h>
75 #include <linux/cpumask.h>
76 #include <linux/spinlock.h>
77 #include <linux/module.h>
78 #include <linux/mutex.h>
79 #include <linux/rcupdate.h>
80 #include <linux/stacktrace.h>
81 #include <linux/stackdepot.h>
82 #include <linux/cache.h>
83 #include <linux/percpu.h>
84 #include <linux/memblock.h>
85 #include <linux/pfn.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
100
101 #include <linux/kasan.h>
102 #include <linux/kfence.h>
103 #include <linux/kmemleak.h>
104 #include <linux/memory_hotplug.h>
105
106 /*
107 * Kmemleak configuration and common defines.
108 */
109 #define MAX_TRACE 16 /* stack trace length */
110 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
111 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
112 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
113 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
114
115 #define BYTES_PER_POINTER sizeof(void *)
116
117 /* scanning area inside a memory block */
118 struct kmemleak_scan_area {
119 struct hlist_node node;
120 unsigned long start;
121 size_t size;
122 };
123
124 #define KMEMLEAK_GREY 0
125 #define KMEMLEAK_BLACK -1
126
127 /*
128 * Structure holding the metadata for each allocated memory block.
129 * Modifications to such objects should be made while holding the
130 * object->lock. Insertions or deletions from object_list, gray_list or
131 * rb_node are already protected by the corresponding locks or mutex (see
132 * the notes on locking above). These objects are reference-counted
133 * (use_count) and freed using the RCU mechanism.
134 */
135 struct kmemleak_object {
136 raw_spinlock_t lock;
137 unsigned int flags; /* object status flags */
138 struct list_head object_list;
139 struct list_head gray_list;
140 struct rb_node rb_node;
141 struct rcu_head rcu; /* object_list lockless traversal */
142 /* object usage count; object freed when use_count == 0 */
143 atomic_t use_count;
144 unsigned int del_state; /* deletion state */
145 unsigned long pointer;
146 size_t size;
147 /* pass surplus references to this pointer */
148 unsigned long excess_ref;
149 /* minimum number of a pointers found before it is considered leak */
150 int min_count;
151 /* the total number of pointers found pointing to this object */
152 int count;
153 /* checksum for detecting modified objects */
154 u32 checksum;
155 depot_stack_handle_t trace_handle;
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list;
158 unsigned long jiffies; /* creation timestamp */
159 pid_t pid; /* pid of the current task */
160 char comm[TASK_COMM_LEN]; /* executable name */
161 };
162
163 /* flag representing the memory block allocation status */
164 #define OBJECT_ALLOCATED (1 << 0)
165 /* flag set after the first reporting of an unreference object */
166 #define OBJECT_REPORTED (1 << 1)
167 /* flag set to not scan the object */
168 #define OBJECT_NO_SCAN (1 << 2)
169 /* flag set to fully scan the object when scan_area allocation failed */
170 #define OBJECT_FULL_SCAN (1 << 3)
171 /* flag set for object allocated with physical address */
172 #define OBJECT_PHYS (1 << 4)
173 /* flag set for per-CPU pointers */
174 #define OBJECT_PERCPU (1 << 5)
175
176 /* set when __remove_object() called */
177 #define DELSTATE_REMOVED (1 << 0)
178 /* set to temporarily prevent deletion from object_list */
179 #define DELSTATE_NO_DELETE (1 << 1)
180
181 #define HEX_PREFIX " "
182 /* number of bytes to print per line; must be 16 or 32 */
183 #define HEX_ROW_SIZE 16
184 /* number of bytes to print at a time (1, 2, 4, 8) */
185 #define HEX_GROUP_SIZE 1
186 /* include ASCII after the hex output */
187 #define HEX_ASCII 1
188 /* max number of lines to be printed */
189 #define HEX_MAX_LINES 2
190
191 /* the list of all allocated objects */
192 static LIST_HEAD(object_list);
193 /* the list of gray-colored objects (see color_gray comment below) */
194 static LIST_HEAD(gray_list);
195 /* memory pool allocation */
196 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
197 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
198 static LIST_HEAD(mem_pool_free_list);
199 /* search tree for object boundaries */
200 static struct rb_root object_tree_root = RB_ROOT;
201 /* search tree for object (with OBJECT_PHYS flag) boundaries */
202 static struct rb_root object_phys_tree_root = RB_ROOT;
203 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
204 static struct rb_root object_percpu_tree_root = RB_ROOT;
205 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
206 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
207
208 /* allocation caches for kmemleak internal data */
209 static struct kmem_cache *object_cache;
210 static struct kmem_cache *scan_area_cache;
211
212 /* set if tracing memory operations is enabled */
213 static int kmemleak_enabled = 1;
214 /* same as above but only for the kmemleak_free() callback */
215 static int kmemleak_free_enabled = 1;
216 /* set in the late_initcall if there were no errors */
217 static int kmemleak_late_initialized;
218 /* set if a kmemleak warning was issued */
219 static int kmemleak_warning;
220 /* set if a fatal kmemleak error has occurred */
221 static int kmemleak_error;
222
223 /* minimum and maximum address that may be valid pointers */
224 static unsigned long min_addr = ULONG_MAX;
225 static unsigned long max_addr;
226
227 /* minimum and maximum address that may be valid per-CPU pointers */
228 static unsigned long min_percpu_addr = ULONG_MAX;
229 static unsigned long max_percpu_addr;
230
231 static struct task_struct *scan_thread;
232 /* used to avoid reporting of recently allocated objects */
233 static unsigned long jiffies_min_age;
234 static unsigned long jiffies_last_scan;
235 /* delay between automatic memory scannings */
236 static unsigned long jiffies_scan_wait;
237 /* enables or disables the task stacks scanning */
238 static int kmemleak_stack_scan = 1;
239 /* protects the memory scanning, parameters and debug/kmemleak file access */
240 static DEFINE_MUTEX(scan_mutex);
241 /* setting kmemleak=on, will set this var, skipping the disable */
242 static int kmemleak_skip_disable;
243 /* If there are leaks that can be reported */
244 static bool kmemleak_found_leaks;
245
246 static bool kmemleak_verbose;
247 module_param_named(verbose, kmemleak_verbose, bool, 0600);
248
249 static void kmemleak_disable(void);
250
251 /*
252 * Print a warning and dump the stack trace.
253 */
254 #define kmemleak_warn(x...) do { \
255 pr_warn(x); \
256 dump_stack(); \
257 kmemleak_warning = 1; \
258 } while (0)
259
260 /*
261 * Macro invoked when a serious kmemleak condition occurred and cannot be
262 * recovered from. Kmemleak will be disabled and further allocation/freeing
263 * tracing no longer available.
264 */
265 #define kmemleak_stop(x...) do { \
266 kmemleak_warn(x); \
267 kmemleak_disable(); \
268 } while (0)
269
270 #define warn_or_seq_printf(seq, fmt, ...) do { \
271 if (seq) \
272 seq_printf(seq, fmt, ##__VA_ARGS__); \
273 else \
274 pr_warn(fmt, ##__VA_ARGS__); \
275 } while (0)
276
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)277 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
278 int rowsize, int groupsize, const void *buf,
279 size_t len, bool ascii)
280 {
281 if (seq)
282 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
283 buf, len, ascii);
284 else
285 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
286 rowsize, groupsize, buf, len, ascii);
287 }
288
289 /*
290 * Printing of the objects hex dump to the seq file. The number of lines to be
291 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
292 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
293 * with the object->lock held.
294 */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)295 static void hex_dump_object(struct seq_file *seq,
296 struct kmemleak_object *object)
297 {
298 const u8 *ptr = (const u8 *)object->pointer;
299 size_t len;
300
301 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
302 return;
303
304 if (object->flags & OBJECT_PERCPU)
305 ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
306
307 /* limit the number of lines to HEX_MAX_LINES */
308 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
309
310 if (object->flags & OBJECT_PERCPU)
311 warn_or_seq_printf(seq, " hex dump (first %zu bytes on cpu %d):\n",
312 len, raw_smp_processor_id());
313 else
314 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
315 kasan_disable_current();
316 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
317 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
318 kasan_enable_current();
319 }
320
321 /*
322 * Object colors, encoded with count and min_count:
323 * - white - orphan object, not enough references to it (count < min_count)
324 * - gray - not orphan, not marked as false positive (min_count == 0) or
325 * sufficient references to it (count >= min_count)
326 * - black - ignore, it doesn't contain references (e.g. text section)
327 * (min_count == -1). No function defined for this color.
328 * Newly created objects don't have any color assigned (object->count == -1)
329 * before the next memory scan when they become white.
330 */
color_white(const struct kmemleak_object * object)331 static bool color_white(const struct kmemleak_object *object)
332 {
333 return object->count != KMEMLEAK_BLACK &&
334 object->count < object->min_count;
335 }
336
color_gray(const struct kmemleak_object * object)337 static bool color_gray(const struct kmemleak_object *object)
338 {
339 return object->min_count != KMEMLEAK_BLACK &&
340 object->count >= object->min_count;
341 }
342
343 /*
344 * Objects are considered unreferenced only if their color is white, they have
345 * not be deleted and have a minimum age to avoid false positives caused by
346 * pointers temporarily stored in CPU registers.
347 */
unreferenced_object(struct kmemleak_object * object)348 static bool unreferenced_object(struct kmemleak_object *object)
349 {
350 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
351 time_before_eq(object->jiffies + jiffies_min_age,
352 jiffies_last_scan);
353 }
354
355 /*
356 * Printing of the unreferenced objects information to the seq file. The
357 * print_unreferenced function must be called with the object->lock held.
358 */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)359 static void print_unreferenced(struct seq_file *seq,
360 struct kmemleak_object *object)
361 {
362 int i;
363 unsigned long *entries;
364 unsigned int nr_entries;
365
366 nr_entries = stack_depot_fetch(object->trace_handle, &entries);
367 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
368 object->pointer, object->size);
369 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
370 object->comm, object->pid, object->jiffies);
371 hex_dump_object(seq, object);
372 warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum);
373
374 for (i = 0; i < nr_entries; i++) {
375 void *ptr = (void *)entries[i];
376 warn_or_seq_printf(seq, " [<%pK>] %pS\n", ptr, ptr);
377 }
378 }
379
380 /*
381 * Print the kmemleak_object information. This function is used mainly for
382 * debugging special cases when kmemleak operations. It must be called with
383 * the object->lock held.
384 */
dump_object_info(struct kmemleak_object * object)385 static void dump_object_info(struct kmemleak_object *object)
386 {
387 pr_notice("Object 0x%08lx (size %zu):\n",
388 object->pointer, object->size);
389 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
390 object->comm, object->pid, object->jiffies);
391 pr_notice(" min_count = %d\n", object->min_count);
392 pr_notice(" count = %d\n", object->count);
393 pr_notice(" flags = 0x%x\n", object->flags);
394 pr_notice(" checksum = %u\n", object->checksum);
395 pr_notice(" backtrace:\n");
396 if (object->trace_handle)
397 stack_depot_print(object->trace_handle);
398 }
399
object_tree(unsigned long objflags)400 static struct rb_root *object_tree(unsigned long objflags)
401 {
402 if (objflags & OBJECT_PHYS)
403 return &object_phys_tree_root;
404 if (objflags & OBJECT_PERCPU)
405 return &object_percpu_tree_root;
406 return &object_tree_root;
407 }
408
409 /*
410 * Look-up a memory block metadata (kmemleak_object) in the object search
411 * tree based on a pointer value. If alias is 0, only values pointing to the
412 * beginning of the memory block are allowed. The kmemleak_lock must be held
413 * when calling this function.
414 */
__lookup_object(unsigned long ptr,int alias,unsigned int objflags)415 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
416 unsigned int objflags)
417 {
418 struct rb_node *rb = object_tree(objflags)->rb_node;
419 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
420
421 while (rb) {
422 struct kmemleak_object *object;
423 unsigned long untagged_objp;
424
425 object = rb_entry(rb, struct kmemleak_object, rb_node);
426 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
427
428 if (untagged_ptr < untagged_objp)
429 rb = object->rb_node.rb_left;
430 else if (untagged_objp + object->size <= untagged_ptr)
431 rb = object->rb_node.rb_right;
432 else if (untagged_objp == untagged_ptr || alias)
433 return object;
434 else {
435 kmemleak_warn("Found object by alias at 0x%08lx\n",
436 ptr);
437 dump_object_info(object);
438 break;
439 }
440 }
441 return NULL;
442 }
443
444 /* Look-up a kmemleak object which allocated with virtual address. */
lookup_object(unsigned long ptr,int alias)445 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
446 {
447 return __lookup_object(ptr, alias, 0);
448 }
449
450 /*
451 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
452 * that once an object's use_count reached 0, the RCU freeing was already
453 * registered and the object should no longer be used. This function must be
454 * called under the protection of rcu_read_lock().
455 */
get_object(struct kmemleak_object * object)456 static int get_object(struct kmemleak_object *object)
457 {
458 return atomic_inc_not_zero(&object->use_count);
459 }
460
461 /*
462 * Memory pool allocation and freeing. kmemleak_lock must not be held.
463 */
mem_pool_alloc(gfp_t gfp)464 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
465 {
466 unsigned long flags;
467 struct kmemleak_object *object;
468
469 /* try the slab allocator first */
470 if (object_cache) {
471 object = kmem_cache_alloc_noprof(object_cache,
472 gfp_nested_mask(gfp));
473 if (object)
474 return object;
475 }
476
477 /* slab allocation failed, try the memory pool */
478 raw_spin_lock_irqsave(&kmemleak_lock, flags);
479 object = list_first_entry_or_null(&mem_pool_free_list,
480 typeof(*object), object_list);
481 if (object)
482 list_del(&object->object_list);
483 else if (mem_pool_free_count)
484 object = &mem_pool[--mem_pool_free_count];
485 else
486 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
487 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
488
489 return object;
490 }
491
492 /*
493 * Return the object to either the slab allocator or the memory pool.
494 */
mem_pool_free(struct kmemleak_object * object)495 static void mem_pool_free(struct kmemleak_object *object)
496 {
497 unsigned long flags;
498
499 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
500 kmem_cache_free(object_cache, object);
501 return;
502 }
503
504 /* add the object to the memory pool free list */
505 raw_spin_lock_irqsave(&kmemleak_lock, flags);
506 list_add(&object->object_list, &mem_pool_free_list);
507 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
508 }
509
510 /*
511 * RCU callback to free a kmemleak_object.
512 */
free_object_rcu(struct rcu_head * rcu)513 static void free_object_rcu(struct rcu_head *rcu)
514 {
515 struct hlist_node *tmp;
516 struct kmemleak_scan_area *area;
517 struct kmemleak_object *object =
518 container_of(rcu, struct kmemleak_object, rcu);
519
520 /*
521 * Once use_count is 0 (guaranteed by put_object), there is no other
522 * code accessing this object, hence no need for locking.
523 */
524 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
525 hlist_del(&area->node);
526 kmem_cache_free(scan_area_cache, area);
527 }
528 mem_pool_free(object);
529 }
530
531 /*
532 * Decrement the object use_count. Once the count is 0, free the object using
533 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
534 * delete_object() path, the delayed RCU freeing ensures that there is no
535 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
536 * is also possible.
537 */
put_object(struct kmemleak_object * object)538 static void put_object(struct kmemleak_object *object)
539 {
540 if (!atomic_dec_and_test(&object->use_count))
541 return;
542
543 /* should only get here after delete_object was called */
544 WARN_ON(object->flags & OBJECT_ALLOCATED);
545
546 /*
547 * It may be too early for the RCU callbacks, however, there is no
548 * concurrent object_list traversal when !object_cache and all objects
549 * came from the memory pool. Free the object directly.
550 */
551 if (object_cache)
552 call_rcu(&object->rcu, free_object_rcu);
553 else
554 free_object_rcu(&object->rcu);
555 }
556
557 /*
558 * Look up an object in the object search tree and increase its use_count.
559 */
__find_and_get_object(unsigned long ptr,int alias,unsigned int objflags)560 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
561 unsigned int objflags)
562 {
563 unsigned long flags;
564 struct kmemleak_object *object;
565
566 rcu_read_lock();
567 raw_spin_lock_irqsave(&kmemleak_lock, flags);
568 object = __lookup_object(ptr, alias, objflags);
569 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
570
571 /* check whether the object is still available */
572 if (object && !get_object(object))
573 object = NULL;
574 rcu_read_unlock();
575
576 return object;
577 }
578
579 /* Look up and get an object which allocated with virtual address. */
find_and_get_object(unsigned long ptr,int alias)580 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
581 {
582 return __find_and_get_object(ptr, alias, 0);
583 }
584
585 /*
586 * Remove an object from its object tree and object_list. Must be called with
587 * the kmemleak_lock held _if_ kmemleak is still enabled.
588 */
__remove_object(struct kmemleak_object * object)589 static void __remove_object(struct kmemleak_object *object)
590 {
591 rb_erase(&object->rb_node, object_tree(object->flags));
592 if (!(object->del_state & DELSTATE_NO_DELETE))
593 list_del_rcu(&object->object_list);
594 object->del_state |= DELSTATE_REMOVED;
595 }
596
__find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)597 static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
598 int alias,
599 unsigned int objflags)
600 {
601 struct kmemleak_object *object;
602
603 object = __lookup_object(ptr, alias, objflags);
604 if (object)
605 __remove_object(object);
606
607 return object;
608 }
609
610 /*
611 * Look up an object in the object search tree and remove it from both object
612 * tree root and object_list. The returned object's use_count should be at
613 * least 1, as initially set by create_object().
614 */
find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)615 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
616 unsigned int objflags)
617 {
618 unsigned long flags;
619 struct kmemleak_object *object;
620
621 raw_spin_lock_irqsave(&kmemleak_lock, flags);
622 object = __find_and_remove_object(ptr, alias, objflags);
623 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
624
625 return object;
626 }
627
set_track_prepare(void)628 static noinline depot_stack_handle_t set_track_prepare(void)
629 {
630 depot_stack_handle_t trace_handle;
631 unsigned long entries[MAX_TRACE];
632 unsigned int nr_entries;
633
634 /*
635 * Use object_cache to determine whether kmemleak_init() has
636 * been invoked. stack_depot_early_init() is called before
637 * kmemleak_init() in mm_core_init().
638 */
639 if (!object_cache)
640 return 0;
641 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
642 trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
643
644 return trace_handle;
645 }
646
__alloc_object(gfp_t gfp)647 static struct kmemleak_object *__alloc_object(gfp_t gfp)
648 {
649 struct kmemleak_object *object;
650
651 object = mem_pool_alloc(gfp);
652 if (!object) {
653 pr_warn("Cannot allocate a kmemleak_object structure\n");
654 kmemleak_disable();
655 return NULL;
656 }
657
658 INIT_LIST_HEAD(&object->object_list);
659 INIT_LIST_HEAD(&object->gray_list);
660 INIT_HLIST_HEAD(&object->area_list);
661 raw_spin_lock_init(&object->lock);
662 atomic_set(&object->use_count, 1);
663 object->excess_ref = 0;
664 object->count = 0; /* white color initially */
665 object->checksum = 0;
666 object->del_state = 0;
667
668 /* task information */
669 if (in_hardirq()) {
670 object->pid = 0;
671 strscpy(object->comm, "hardirq");
672 } else if (in_serving_softirq()) {
673 object->pid = 0;
674 strscpy(object->comm, "softirq");
675 } else {
676 object->pid = current->pid;
677 /*
678 * There is a small chance of a race with set_task_comm(),
679 * however using get_task_comm() here may cause locking
680 * dependency issues with current->alloc_lock. In the worst
681 * case, the command line is not correct.
682 */
683 strscpy(object->comm, current->comm);
684 }
685
686 /* kernel backtrace */
687 object->trace_handle = set_track_prepare();
688
689 return object;
690 }
691
__link_object(struct kmemleak_object * object,unsigned long ptr,size_t size,int min_count,unsigned int objflags)692 static int __link_object(struct kmemleak_object *object, unsigned long ptr,
693 size_t size, int min_count, unsigned int objflags)
694 {
695
696 struct kmemleak_object *parent;
697 struct rb_node **link, *rb_parent;
698 unsigned long untagged_ptr;
699 unsigned long untagged_objp;
700
701 object->flags = OBJECT_ALLOCATED | objflags;
702 object->pointer = ptr;
703 object->size = kfence_ksize((void *)ptr) ?: size;
704 object->min_count = min_count;
705 object->jiffies = jiffies;
706
707 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
708 /*
709 * Only update min_addr and max_addr with object storing virtual
710 * address. And update min_percpu_addr max_percpu_addr for per-CPU
711 * objects.
712 */
713 if (objflags & OBJECT_PERCPU) {
714 min_percpu_addr = min(min_percpu_addr, untagged_ptr);
715 max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
716 } else if (!(objflags & OBJECT_PHYS)) {
717 min_addr = min(min_addr, untagged_ptr);
718 max_addr = max(max_addr, untagged_ptr + size);
719 }
720 link = &object_tree(objflags)->rb_node;
721 rb_parent = NULL;
722 while (*link) {
723 rb_parent = *link;
724 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
725 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
726 if (untagged_ptr + size <= untagged_objp)
727 link = &parent->rb_node.rb_left;
728 else if (untagged_objp + parent->size <= untagged_ptr)
729 link = &parent->rb_node.rb_right;
730 else {
731 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
732 ptr);
733 /*
734 * No need for parent->lock here since "parent" cannot
735 * be freed while the kmemleak_lock is held.
736 */
737 dump_object_info(parent);
738 return -EEXIST;
739 }
740 }
741 rb_link_node(&object->rb_node, rb_parent, link);
742 rb_insert_color(&object->rb_node, object_tree(objflags));
743 list_add_tail_rcu(&object->object_list, &object_list);
744
745 return 0;
746 }
747
748 /*
749 * Create the metadata (struct kmemleak_object) corresponding to an allocated
750 * memory block and add it to the object_list and object tree.
751 */
__create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp,unsigned int objflags)752 static void __create_object(unsigned long ptr, size_t size,
753 int min_count, gfp_t gfp, unsigned int objflags)
754 {
755 struct kmemleak_object *object;
756 unsigned long flags;
757 int ret;
758
759 object = __alloc_object(gfp);
760 if (!object)
761 return;
762
763 raw_spin_lock_irqsave(&kmemleak_lock, flags);
764 ret = __link_object(object, ptr, size, min_count, objflags);
765 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
766 if (ret)
767 mem_pool_free(object);
768 }
769
770 /* Create kmemleak object which allocated with virtual address. */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)771 static void create_object(unsigned long ptr, size_t size,
772 int min_count, gfp_t gfp)
773 {
774 __create_object(ptr, size, min_count, gfp, 0);
775 }
776
777 /* Create kmemleak object which allocated with physical address. */
create_object_phys(unsigned long ptr,size_t size,int min_count,gfp_t gfp)778 static void create_object_phys(unsigned long ptr, size_t size,
779 int min_count, gfp_t gfp)
780 {
781 __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
782 }
783
784 /* Create kmemleak object corresponding to a per-CPU allocation. */
create_object_percpu(unsigned long ptr,size_t size,int min_count,gfp_t gfp)785 static void create_object_percpu(unsigned long ptr, size_t size,
786 int min_count, gfp_t gfp)
787 {
788 __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
789 }
790
791 /*
792 * Mark the object as not allocated and schedule RCU freeing via put_object().
793 */
__delete_object(struct kmemleak_object * object)794 static void __delete_object(struct kmemleak_object *object)
795 {
796 unsigned long flags;
797
798 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
799 WARN_ON(atomic_read(&object->use_count) < 1);
800
801 /*
802 * Locking here also ensures that the corresponding memory block
803 * cannot be freed when it is being scanned.
804 */
805 raw_spin_lock_irqsave(&object->lock, flags);
806 object->flags &= ~OBJECT_ALLOCATED;
807 raw_spin_unlock_irqrestore(&object->lock, flags);
808 put_object(object);
809 }
810
811 /*
812 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
813 * delete it.
814 */
delete_object_full(unsigned long ptr,unsigned int objflags)815 static void delete_object_full(unsigned long ptr, unsigned int objflags)
816 {
817 struct kmemleak_object *object;
818
819 object = find_and_remove_object(ptr, 0, objflags);
820 if (!object) {
821 #ifdef DEBUG
822 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
823 ptr);
824 #endif
825 return;
826 }
827 __delete_object(object);
828 }
829
830 /*
831 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
832 * delete it. If the memory block is partially freed, the function may create
833 * additional metadata for the remaining parts of the block.
834 */
delete_object_part(unsigned long ptr,size_t size,unsigned int objflags)835 static void delete_object_part(unsigned long ptr, size_t size,
836 unsigned int objflags)
837 {
838 struct kmemleak_object *object, *object_l, *object_r;
839 unsigned long start, end, flags;
840
841 object_l = __alloc_object(GFP_KERNEL);
842 if (!object_l)
843 return;
844
845 object_r = __alloc_object(GFP_KERNEL);
846 if (!object_r)
847 goto out;
848
849 raw_spin_lock_irqsave(&kmemleak_lock, flags);
850 object = __find_and_remove_object(ptr, 1, objflags);
851 if (!object) {
852 #ifdef DEBUG
853 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
854 ptr, size);
855 #endif
856 goto unlock;
857 }
858
859 /*
860 * Create one or two objects that may result from the memory block
861 * split. Note that partial freeing is only done by free_bootmem() and
862 * this happens before kmemleak_init() is called.
863 */
864 start = object->pointer;
865 end = object->pointer + object->size;
866 if ((ptr > start) &&
867 !__link_object(object_l, start, ptr - start,
868 object->min_count, objflags))
869 object_l = NULL;
870 if ((ptr + size < end) &&
871 !__link_object(object_r, ptr + size, end - ptr - size,
872 object->min_count, objflags))
873 object_r = NULL;
874
875 unlock:
876 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
877 if (object)
878 __delete_object(object);
879
880 out:
881 if (object_l)
882 mem_pool_free(object_l);
883 if (object_r)
884 mem_pool_free(object_r);
885 }
886
__paint_it(struct kmemleak_object * object,int color)887 static void __paint_it(struct kmemleak_object *object, int color)
888 {
889 object->min_count = color;
890 if (color == KMEMLEAK_BLACK)
891 object->flags |= OBJECT_NO_SCAN;
892 }
893
paint_it(struct kmemleak_object * object,int color)894 static void paint_it(struct kmemleak_object *object, int color)
895 {
896 unsigned long flags;
897
898 raw_spin_lock_irqsave(&object->lock, flags);
899 __paint_it(object, color);
900 raw_spin_unlock_irqrestore(&object->lock, flags);
901 }
902
paint_ptr(unsigned long ptr,int color,unsigned int objflags)903 static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
904 {
905 struct kmemleak_object *object;
906
907 object = __find_and_get_object(ptr, 0, objflags);
908 if (!object) {
909 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
910 ptr,
911 (color == KMEMLEAK_GREY) ? "Grey" :
912 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
913 return;
914 }
915 paint_it(object, color);
916 put_object(object);
917 }
918
919 /*
920 * Mark an object permanently as gray-colored so that it can no longer be
921 * reported as a leak. This is used in general to mark a false positive.
922 */
make_gray_object(unsigned long ptr)923 static void make_gray_object(unsigned long ptr)
924 {
925 paint_ptr(ptr, KMEMLEAK_GREY, 0);
926 }
927
928 /*
929 * Mark the object as black-colored so that it is ignored from scans and
930 * reporting.
931 */
make_black_object(unsigned long ptr,unsigned int objflags)932 static void make_black_object(unsigned long ptr, unsigned int objflags)
933 {
934 paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
935 }
936
937 /*
938 * Add a scanning area to the object. If at least one such area is added,
939 * kmemleak will only scan these ranges rather than the whole memory block.
940 */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)941 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
942 {
943 unsigned long flags;
944 struct kmemleak_object *object;
945 struct kmemleak_scan_area *area = NULL;
946 unsigned long untagged_ptr;
947 unsigned long untagged_objp;
948
949 object = find_and_get_object(ptr, 1);
950 if (!object) {
951 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
952 ptr);
953 return;
954 }
955
956 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
957 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
958
959 if (scan_area_cache)
960 area = kmem_cache_alloc_noprof(scan_area_cache,
961 gfp_nested_mask(gfp));
962
963 raw_spin_lock_irqsave(&object->lock, flags);
964 if (!area) {
965 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
966 /* mark the object for full scan to avoid false positives */
967 object->flags |= OBJECT_FULL_SCAN;
968 goto out_unlock;
969 }
970 if (size == SIZE_MAX) {
971 size = untagged_objp + object->size - untagged_ptr;
972 } else if (untagged_ptr + size > untagged_objp + object->size) {
973 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
974 dump_object_info(object);
975 kmem_cache_free(scan_area_cache, area);
976 goto out_unlock;
977 }
978
979 INIT_HLIST_NODE(&area->node);
980 area->start = ptr;
981 area->size = size;
982
983 hlist_add_head(&area->node, &object->area_list);
984 out_unlock:
985 raw_spin_unlock_irqrestore(&object->lock, flags);
986 put_object(object);
987 }
988
989 /*
990 * Any surplus references (object already gray) to 'ptr' are passed to
991 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
992 * vm_struct may be used as an alternative reference to the vmalloc'ed object
993 * (see free_thread_stack()).
994 */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)995 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
996 {
997 unsigned long flags;
998 struct kmemleak_object *object;
999
1000 object = find_and_get_object(ptr, 0);
1001 if (!object) {
1002 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
1003 ptr);
1004 return;
1005 }
1006
1007 raw_spin_lock_irqsave(&object->lock, flags);
1008 object->excess_ref = excess_ref;
1009 raw_spin_unlock_irqrestore(&object->lock, flags);
1010 put_object(object);
1011 }
1012
1013 /*
1014 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
1015 * pointer. Such object will not be scanned by kmemleak but references to it
1016 * are searched.
1017 */
object_no_scan(unsigned long ptr)1018 static void object_no_scan(unsigned long ptr)
1019 {
1020 unsigned long flags;
1021 struct kmemleak_object *object;
1022
1023 object = find_and_get_object(ptr, 0);
1024 if (!object) {
1025 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1026 return;
1027 }
1028
1029 raw_spin_lock_irqsave(&object->lock, flags);
1030 object->flags |= OBJECT_NO_SCAN;
1031 raw_spin_unlock_irqrestore(&object->lock, flags);
1032 put_object(object);
1033 }
1034
1035 /**
1036 * kmemleak_alloc - register a newly allocated object
1037 * @ptr: pointer to beginning of the object
1038 * @size: size of the object
1039 * @min_count: minimum number of references to this object. If during memory
1040 * scanning a number of references less than @min_count is found,
1041 * the object is reported as a memory leak. If @min_count is 0,
1042 * the object is never reported as a leak. If @min_count is -1,
1043 * the object is ignored (not scanned and not reported as a leak)
1044 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1045 *
1046 * This function is called from the kernel allocators when a new object
1047 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1048 */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)1049 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1050 gfp_t gfp)
1051 {
1052 pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1053
1054 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1055 create_object((unsigned long)ptr, size, min_count, gfp);
1056 }
1057 EXPORT_SYMBOL_GPL(kmemleak_alloc);
1058
1059 /**
1060 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1061 * @ptr: __percpu pointer to beginning of the object
1062 * @size: size of the object
1063 * @gfp: flags used for kmemleak internal memory allocations
1064 *
1065 * This function is called from the kernel percpu allocator when a new object
1066 * (memory block) is allocated (alloc_percpu).
1067 */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)1068 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1069 gfp_t gfp)
1070 {
1071 pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1072
1073 if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1074 create_object_percpu((__force unsigned long)ptr, size, 0, gfp);
1075 }
1076 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1077
1078 /**
1079 * kmemleak_vmalloc - register a newly vmalloc'ed object
1080 * @area: pointer to vm_struct
1081 * @size: size of the object
1082 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
1083 *
1084 * This function is called from the vmalloc() kernel allocator when a new
1085 * object (memory block) is allocated.
1086 */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)1087 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1088 {
1089 pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1090
1091 /*
1092 * A min_count = 2 is needed because vm_struct contains a reference to
1093 * the virtual address of the vmalloc'ed block.
1094 */
1095 if (kmemleak_enabled) {
1096 create_object((unsigned long)area->addr, size, 2, gfp);
1097 object_set_excess_ref((unsigned long)area,
1098 (unsigned long)area->addr);
1099 }
1100 }
1101 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1102
1103 /**
1104 * kmemleak_free - unregister a previously registered object
1105 * @ptr: pointer to beginning of the object
1106 *
1107 * This function is called from the kernel allocators when an object (memory
1108 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1109 */
kmemleak_free(const void * ptr)1110 void __ref kmemleak_free(const void *ptr)
1111 {
1112 pr_debug("%s(0x%px)\n", __func__, ptr);
1113
1114 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1115 delete_object_full((unsigned long)ptr, 0);
1116 }
1117 EXPORT_SYMBOL_GPL(kmemleak_free);
1118
1119 /**
1120 * kmemleak_free_part - partially unregister a previously registered object
1121 * @ptr: pointer to the beginning or inside the object. This also
1122 * represents the start of the range to be freed
1123 * @size: size to be unregistered
1124 *
1125 * This function is called when only a part of a memory block is freed
1126 * (usually from the bootmem allocator).
1127 */
kmemleak_free_part(const void * ptr,size_t size)1128 void __ref kmemleak_free_part(const void *ptr, size_t size)
1129 {
1130 pr_debug("%s(0x%px)\n", __func__, ptr);
1131
1132 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1133 delete_object_part((unsigned long)ptr, size, 0);
1134 }
1135 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1136
1137 /**
1138 * kmemleak_free_percpu - unregister a previously registered __percpu object
1139 * @ptr: __percpu pointer to beginning of the object
1140 *
1141 * This function is called from the kernel percpu allocator when an object
1142 * (memory block) is freed (free_percpu).
1143 */
kmemleak_free_percpu(const void __percpu * ptr)1144 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1145 {
1146 pr_debug("%s(0x%px)\n", __func__, ptr);
1147
1148 if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
1149 delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
1150 }
1151 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1152
1153 /**
1154 * kmemleak_update_trace - update object allocation stack trace
1155 * @ptr: pointer to beginning of the object
1156 *
1157 * Override the object allocation stack trace for cases where the actual
1158 * allocation place is not always useful.
1159 */
kmemleak_update_trace(const void * ptr)1160 void __ref kmemleak_update_trace(const void *ptr)
1161 {
1162 struct kmemleak_object *object;
1163 depot_stack_handle_t trace_handle;
1164 unsigned long flags;
1165
1166 pr_debug("%s(0x%px)\n", __func__, ptr);
1167
1168 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1169 return;
1170
1171 object = find_and_get_object((unsigned long)ptr, 1);
1172 if (!object) {
1173 #ifdef DEBUG
1174 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1175 ptr);
1176 #endif
1177 return;
1178 }
1179
1180 trace_handle = set_track_prepare();
1181 raw_spin_lock_irqsave(&object->lock, flags);
1182 object->trace_handle = trace_handle;
1183 raw_spin_unlock_irqrestore(&object->lock, flags);
1184
1185 put_object(object);
1186 }
1187 EXPORT_SYMBOL(kmemleak_update_trace);
1188
1189 /**
1190 * kmemleak_not_leak - mark an allocated object as false positive
1191 * @ptr: pointer to beginning of the object
1192 *
1193 * Calling this function on an object will cause the memory block to no longer
1194 * be reported as leak and always be scanned.
1195 */
kmemleak_not_leak(const void * ptr)1196 void __ref kmemleak_not_leak(const void *ptr)
1197 {
1198 pr_debug("%s(0x%px)\n", __func__, ptr);
1199
1200 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1201 make_gray_object((unsigned long)ptr);
1202 }
1203 EXPORT_SYMBOL(kmemleak_not_leak);
1204
1205 /**
1206 * kmemleak_ignore - ignore an allocated object
1207 * @ptr: pointer to beginning of the object
1208 *
1209 * Calling this function on an object will cause the memory block to be
1210 * ignored (not scanned and not reported as a leak). This is usually done when
1211 * it is known that the corresponding block is not a leak and does not contain
1212 * any references to other allocated memory blocks.
1213 */
kmemleak_ignore(const void * ptr)1214 void __ref kmemleak_ignore(const void *ptr)
1215 {
1216 pr_debug("%s(0x%px)\n", __func__, ptr);
1217
1218 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1219 make_black_object((unsigned long)ptr, 0);
1220 }
1221 EXPORT_SYMBOL(kmemleak_ignore);
1222
1223 /**
1224 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1225 * @ptr: pointer to beginning or inside the object. This also
1226 * represents the start of the scan area
1227 * @size: size of the scan area
1228 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1229 *
1230 * This function is used when it is known that only certain parts of an object
1231 * contain references to other objects. Kmemleak will only scan these areas
1232 * reducing the number false negatives.
1233 */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1234 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1235 {
1236 pr_debug("%s(0x%px)\n", __func__, ptr);
1237
1238 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1239 add_scan_area((unsigned long)ptr, size, gfp);
1240 }
1241 EXPORT_SYMBOL(kmemleak_scan_area);
1242
1243 /**
1244 * kmemleak_no_scan - do not scan an allocated object
1245 * @ptr: pointer to beginning of the object
1246 *
1247 * This function notifies kmemleak not to scan the given memory block. Useful
1248 * in situations where it is known that the given object does not contain any
1249 * references to other objects. Kmemleak will not scan such objects reducing
1250 * the number of false negatives.
1251 */
kmemleak_no_scan(const void * ptr)1252 void __ref kmemleak_no_scan(const void *ptr)
1253 {
1254 pr_debug("%s(0x%px)\n", __func__, ptr);
1255
1256 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1257 object_no_scan((unsigned long)ptr);
1258 }
1259 EXPORT_SYMBOL(kmemleak_no_scan);
1260
1261 /**
1262 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1263 * address argument
1264 * @phys: physical address of the object
1265 * @size: size of the object
1266 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1267 */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,gfp_t gfp)1268 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1269 {
1270 pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1271
1272 if (kmemleak_enabled)
1273 /*
1274 * Create object with OBJECT_PHYS flag and
1275 * assume min_count 0.
1276 */
1277 create_object_phys((unsigned long)phys, size, 0, gfp);
1278 }
1279 EXPORT_SYMBOL(kmemleak_alloc_phys);
1280
1281 /**
1282 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1283 * physical address argument
1284 * @phys: physical address if the beginning or inside an object. This
1285 * also represents the start of the range to be freed
1286 * @size: size to be unregistered
1287 */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1288 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1289 {
1290 pr_debug("%s(0x%px)\n", __func__, &phys);
1291
1292 if (kmemleak_enabled)
1293 delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1294 }
1295 EXPORT_SYMBOL(kmemleak_free_part_phys);
1296
1297 /**
1298 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1299 * address argument
1300 * @phys: physical address of the object
1301 */
kmemleak_ignore_phys(phys_addr_t phys)1302 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1303 {
1304 pr_debug("%s(0x%px)\n", __func__, &phys);
1305
1306 if (kmemleak_enabled)
1307 make_black_object((unsigned long)phys, OBJECT_PHYS);
1308 }
1309 EXPORT_SYMBOL(kmemleak_ignore_phys);
1310
1311 /*
1312 * Update an object's checksum and return true if it was modified.
1313 */
update_checksum(struct kmemleak_object * object)1314 static bool update_checksum(struct kmemleak_object *object)
1315 {
1316 u32 old_csum = object->checksum;
1317
1318 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1319 return false;
1320
1321 kasan_disable_current();
1322 kcsan_disable_current();
1323 if (object->flags & OBJECT_PERCPU) {
1324 unsigned int cpu;
1325
1326 object->checksum = 0;
1327 for_each_possible_cpu(cpu) {
1328 void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1329
1330 object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
1331 }
1332 } else {
1333 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1334 }
1335 kasan_enable_current();
1336 kcsan_enable_current();
1337
1338 return object->checksum != old_csum;
1339 }
1340
1341 /*
1342 * Update an object's references. object->lock must be held by the caller.
1343 */
update_refs(struct kmemleak_object * object)1344 static void update_refs(struct kmemleak_object *object)
1345 {
1346 if (!color_white(object)) {
1347 /* non-orphan, ignored or new */
1348 return;
1349 }
1350
1351 /*
1352 * Increase the object's reference count (number of pointers to the
1353 * memory block). If this count reaches the required minimum, the
1354 * object's color will become gray and it will be added to the
1355 * gray_list.
1356 */
1357 object->count++;
1358 if (color_gray(object)) {
1359 /* put_object() called when removing from gray_list */
1360 WARN_ON(!get_object(object));
1361 list_add_tail(&object->gray_list, &gray_list);
1362 }
1363 }
1364
pointer_update_refs(struct kmemleak_object * scanned,unsigned long pointer,unsigned int objflags)1365 static void pointer_update_refs(struct kmemleak_object *scanned,
1366 unsigned long pointer, unsigned int objflags)
1367 {
1368 struct kmemleak_object *object;
1369 unsigned long untagged_ptr;
1370 unsigned long excess_ref;
1371
1372 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1373 if (objflags & OBJECT_PERCPU) {
1374 if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
1375 return;
1376 } else {
1377 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1378 return;
1379 }
1380
1381 /*
1382 * No need for get_object() here since we hold kmemleak_lock.
1383 * object->use_count cannot be dropped to 0 while the object
1384 * is still present in object_tree_root and object_list
1385 * (with updates protected by kmemleak_lock).
1386 */
1387 object = __lookup_object(pointer, 1, objflags);
1388 if (!object)
1389 return;
1390 if (object == scanned)
1391 /* self referenced, ignore */
1392 return;
1393
1394 /*
1395 * Avoid the lockdep recursive warning on object->lock being
1396 * previously acquired in scan_object(). These locks are
1397 * enclosed by scan_mutex.
1398 */
1399 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1400 /* only pass surplus references (object already gray) */
1401 if (color_gray(object)) {
1402 excess_ref = object->excess_ref;
1403 /* no need for update_refs() if object already gray */
1404 } else {
1405 excess_ref = 0;
1406 update_refs(object);
1407 }
1408 raw_spin_unlock(&object->lock);
1409
1410 if (excess_ref) {
1411 object = lookup_object(excess_ref, 0);
1412 if (!object)
1413 return;
1414 if (object == scanned)
1415 /* circular reference, ignore */
1416 return;
1417 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1418 update_refs(object);
1419 raw_spin_unlock(&object->lock);
1420 }
1421 }
1422
1423 /*
1424 * Memory scanning is a long process and it needs to be interruptible. This
1425 * function checks whether such interrupt condition occurred.
1426 */
scan_should_stop(void)1427 static int scan_should_stop(void)
1428 {
1429 if (!kmemleak_enabled)
1430 return 1;
1431
1432 /*
1433 * This function may be called from either process or kthread context,
1434 * hence the need to check for both stop conditions.
1435 */
1436 if (current->mm)
1437 return signal_pending(current);
1438 else
1439 return kthread_should_stop();
1440
1441 return 0;
1442 }
1443
1444 /*
1445 * Scan a memory block (exclusive range) for valid pointers and add those
1446 * found to the gray list.
1447 */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1448 static void scan_block(void *_start, void *_end,
1449 struct kmemleak_object *scanned)
1450 {
1451 unsigned long *ptr;
1452 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1453 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1454 unsigned long flags;
1455
1456 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1457 for (ptr = start; ptr < end; ptr++) {
1458 unsigned long pointer;
1459
1460 if (scan_should_stop())
1461 break;
1462
1463 kasan_disable_current();
1464 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1465 kasan_enable_current();
1466
1467 pointer_update_refs(scanned, pointer, 0);
1468 pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
1469 }
1470 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1471 }
1472
1473 /*
1474 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1475 */
1476 #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1477 static void scan_large_block(void *start, void *end)
1478 {
1479 void *next;
1480
1481 while (start < end) {
1482 next = min(start + MAX_SCAN_SIZE, end);
1483 scan_block(start, next, NULL);
1484 start = next;
1485 cond_resched();
1486 }
1487 }
1488 #endif
1489
1490 /*
1491 * Scan a memory block corresponding to a kmemleak_object. A condition is
1492 * that object->use_count >= 1.
1493 */
scan_object(struct kmemleak_object * object)1494 static void scan_object(struct kmemleak_object *object)
1495 {
1496 struct kmemleak_scan_area *area;
1497 unsigned long flags;
1498
1499 /*
1500 * Once the object->lock is acquired, the corresponding memory block
1501 * cannot be freed (the same lock is acquired in delete_object).
1502 */
1503 raw_spin_lock_irqsave(&object->lock, flags);
1504 if (object->flags & OBJECT_NO_SCAN)
1505 goto out;
1506 if (!(object->flags & OBJECT_ALLOCATED))
1507 /* already freed object */
1508 goto out;
1509
1510 if (object->flags & OBJECT_PERCPU) {
1511 unsigned int cpu;
1512
1513 for_each_possible_cpu(cpu) {
1514 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1515 void *end = start + object->size;
1516
1517 scan_block(start, end, object);
1518
1519 raw_spin_unlock_irqrestore(&object->lock, flags);
1520 cond_resched();
1521 raw_spin_lock_irqsave(&object->lock, flags);
1522 if (!(object->flags & OBJECT_ALLOCATED))
1523 break;
1524 }
1525 } else if (hlist_empty(&object->area_list) ||
1526 object->flags & OBJECT_FULL_SCAN) {
1527 void *start = object->flags & OBJECT_PHYS ?
1528 __va((phys_addr_t)object->pointer) :
1529 (void *)object->pointer;
1530 void *end = start + object->size;
1531 void *next;
1532
1533 do {
1534 next = min(start + MAX_SCAN_SIZE, end);
1535 scan_block(start, next, object);
1536
1537 start = next;
1538 if (start >= end)
1539 break;
1540
1541 raw_spin_unlock_irqrestore(&object->lock, flags);
1542 cond_resched();
1543 raw_spin_lock_irqsave(&object->lock, flags);
1544 } while (object->flags & OBJECT_ALLOCATED);
1545 } else {
1546 hlist_for_each_entry(area, &object->area_list, node)
1547 scan_block((void *)area->start,
1548 (void *)(area->start + area->size),
1549 object);
1550 }
1551 out:
1552 raw_spin_unlock_irqrestore(&object->lock, flags);
1553 }
1554
1555 /*
1556 * Scan the objects already referenced (gray objects). More objects will be
1557 * referenced and, if there are no memory leaks, all the objects are scanned.
1558 */
scan_gray_list(void)1559 static void scan_gray_list(void)
1560 {
1561 struct kmemleak_object *object, *tmp;
1562
1563 /*
1564 * The list traversal is safe for both tail additions and removals
1565 * from inside the loop. The kmemleak objects cannot be freed from
1566 * outside the loop because their use_count was incremented.
1567 */
1568 object = list_entry(gray_list.next, typeof(*object), gray_list);
1569 while (&object->gray_list != &gray_list) {
1570 cond_resched();
1571
1572 /* may add new objects to the list */
1573 if (!scan_should_stop())
1574 scan_object(object);
1575
1576 tmp = list_entry(object->gray_list.next, typeof(*object),
1577 gray_list);
1578
1579 /* remove the object from the list and release it */
1580 list_del(&object->gray_list);
1581 put_object(object);
1582
1583 object = tmp;
1584 }
1585 WARN_ON(!list_empty(&gray_list));
1586 }
1587
1588 /*
1589 * Conditionally call resched() in an object iteration loop while making sure
1590 * that the given object won't go away without RCU read lock by performing a
1591 * get_object() if necessaary.
1592 */
kmemleak_cond_resched(struct kmemleak_object * object)1593 static void kmemleak_cond_resched(struct kmemleak_object *object)
1594 {
1595 if (!get_object(object))
1596 return; /* Try next object */
1597
1598 raw_spin_lock_irq(&kmemleak_lock);
1599 if (object->del_state & DELSTATE_REMOVED)
1600 goto unlock_put; /* Object removed */
1601 object->del_state |= DELSTATE_NO_DELETE;
1602 raw_spin_unlock_irq(&kmemleak_lock);
1603
1604 rcu_read_unlock();
1605 cond_resched();
1606 rcu_read_lock();
1607
1608 raw_spin_lock_irq(&kmemleak_lock);
1609 if (object->del_state & DELSTATE_REMOVED)
1610 list_del_rcu(&object->object_list);
1611 object->del_state &= ~DELSTATE_NO_DELETE;
1612 unlock_put:
1613 raw_spin_unlock_irq(&kmemleak_lock);
1614 put_object(object);
1615 }
1616
1617 /*
1618 * Scan data sections and all the referenced memory blocks allocated via the
1619 * kernel's standard allocators. This function must be called with the
1620 * scan_mutex held.
1621 */
kmemleak_scan(void)1622 static void kmemleak_scan(void)
1623 {
1624 struct kmemleak_object *object;
1625 struct zone *zone;
1626 int __maybe_unused i;
1627 int new_leaks = 0;
1628
1629 jiffies_last_scan = jiffies;
1630
1631 /* prepare the kmemleak_object's */
1632 rcu_read_lock();
1633 list_for_each_entry_rcu(object, &object_list, object_list) {
1634 raw_spin_lock_irq(&object->lock);
1635 #ifdef DEBUG
1636 /*
1637 * With a few exceptions there should be a maximum of
1638 * 1 reference to any object at this point.
1639 */
1640 if (atomic_read(&object->use_count) > 1) {
1641 pr_debug("object->use_count = %d\n",
1642 atomic_read(&object->use_count));
1643 dump_object_info(object);
1644 }
1645 #endif
1646
1647 /* ignore objects outside lowmem (paint them black) */
1648 if ((object->flags & OBJECT_PHYS) &&
1649 !(object->flags & OBJECT_NO_SCAN)) {
1650 unsigned long phys = object->pointer;
1651
1652 if (PHYS_PFN(phys) < min_low_pfn ||
1653 PHYS_PFN(phys + object->size) >= max_low_pfn)
1654 __paint_it(object, KMEMLEAK_BLACK);
1655 }
1656
1657 /* reset the reference count (whiten the object) */
1658 object->count = 0;
1659 if (color_gray(object) && get_object(object))
1660 list_add_tail(&object->gray_list, &gray_list);
1661
1662 raw_spin_unlock_irq(&object->lock);
1663
1664 if (need_resched())
1665 kmemleak_cond_resched(object);
1666 }
1667 rcu_read_unlock();
1668
1669 #ifdef CONFIG_SMP
1670 /* per-cpu sections scanning */
1671 for_each_possible_cpu(i)
1672 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1673 __per_cpu_end + per_cpu_offset(i));
1674 #endif
1675
1676 /*
1677 * Struct page scanning for each node.
1678 */
1679 get_online_mems();
1680 for_each_populated_zone(zone) {
1681 unsigned long start_pfn = zone->zone_start_pfn;
1682 unsigned long end_pfn = zone_end_pfn(zone);
1683 unsigned long pfn;
1684
1685 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1686 struct page *page = pfn_to_online_page(pfn);
1687
1688 if (!(pfn & 63))
1689 cond_resched();
1690
1691 if (!page)
1692 continue;
1693
1694 /* only scan pages belonging to this zone */
1695 if (page_zone(page) != zone)
1696 continue;
1697 /* only scan if page is in use */
1698 if (page_count(page) == 0)
1699 continue;
1700 scan_block(page, page + 1, NULL);
1701 }
1702 }
1703 put_online_mems();
1704
1705 /*
1706 * Scanning the task stacks (may introduce false negatives).
1707 */
1708 if (kmemleak_stack_scan) {
1709 struct task_struct *p, *g;
1710
1711 rcu_read_lock();
1712 for_each_process_thread(g, p) {
1713 void *stack = try_get_task_stack(p);
1714 if (stack) {
1715 scan_block(stack, stack + THREAD_SIZE, NULL);
1716 put_task_stack(p);
1717 }
1718 }
1719 rcu_read_unlock();
1720 }
1721
1722 /*
1723 * Scan the objects already referenced from the sections scanned
1724 * above.
1725 */
1726 scan_gray_list();
1727
1728 /*
1729 * Check for new or unreferenced objects modified since the previous
1730 * scan and color them gray until the next scan.
1731 */
1732 rcu_read_lock();
1733 list_for_each_entry_rcu(object, &object_list, object_list) {
1734 if (need_resched())
1735 kmemleak_cond_resched(object);
1736
1737 /*
1738 * This is racy but we can save the overhead of lock/unlock
1739 * calls. The missed objects, if any, should be caught in
1740 * the next scan.
1741 */
1742 if (!color_white(object))
1743 continue;
1744 raw_spin_lock_irq(&object->lock);
1745 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1746 && update_checksum(object) && get_object(object)) {
1747 /* color it gray temporarily */
1748 object->count = object->min_count;
1749 list_add_tail(&object->gray_list, &gray_list);
1750 }
1751 raw_spin_unlock_irq(&object->lock);
1752 }
1753 rcu_read_unlock();
1754
1755 /*
1756 * Re-scan the gray list for modified unreferenced objects.
1757 */
1758 scan_gray_list();
1759
1760 /*
1761 * If scanning was stopped do not report any new unreferenced objects.
1762 */
1763 if (scan_should_stop())
1764 return;
1765
1766 /*
1767 * Scanning result reporting.
1768 */
1769 rcu_read_lock();
1770 list_for_each_entry_rcu(object, &object_list, object_list) {
1771 if (need_resched())
1772 kmemleak_cond_resched(object);
1773
1774 /*
1775 * This is racy but we can save the overhead of lock/unlock
1776 * calls. The missed objects, if any, should be caught in
1777 * the next scan.
1778 */
1779 if (!color_white(object))
1780 continue;
1781 raw_spin_lock_irq(&object->lock);
1782 if (unreferenced_object(object) &&
1783 !(object->flags & OBJECT_REPORTED)) {
1784 object->flags |= OBJECT_REPORTED;
1785
1786 if (kmemleak_verbose)
1787 print_unreferenced(NULL, object);
1788
1789 new_leaks++;
1790 }
1791 raw_spin_unlock_irq(&object->lock);
1792 }
1793 rcu_read_unlock();
1794
1795 if (new_leaks) {
1796 kmemleak_found_leaks = true;
1797
1798 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1799 new_leaks);
1800 }
1801
1802 }
1803
1804 /*
1805 * Thread function performing automatic memory scanning. Unreferenced objects
1806 * at the end of a memory scan are reported but only the first time.
1807 */
kmemleak_scan_thread(void * arg)1808 static int kmemleak_scan_thread(void *arg)
1809 {
1810 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1811
1812 pr_info("Automatic memory scanning thread started\n");
1813 set_user_nice(current, 10);
1814
1815 /*
1816 * Wait before the first scan to allow the system to fully initialize.
1817 */
1818 if (first_run) {
1819 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1820 first_run = 0;
1821 while (timeout && !kthread_should_stop())
1822 timeout = schedule_timeout_interruptible(timeout);
1823 }
1824
1825 while (!kthread_should_stop()) {
1826 signed long timeout = READ_ONCE(jiffies_scan_wait);
1827
1828 mutex_lock(&scan_mutex);
1829 kmemleak_scan();
1830 mutex_unlock(&scan_mutex);
1831
1832 /* wait before the next scan */
1833 while (timeout && !kthread_should_stop())
1834 timeout = schedule_timeout_interruptible(timeout);
1835 }
1836
1837 pr_info("Automatic memory scanning thread ended\n");
1838
1839 return 0;
1840 }
1841
1842 /*
1843 * Start the automatic memory scanning thread. This function must be called
1844 * with the scan_mutex held.
1845 */
start_scan_thread(void)1846 static void start_scan_thread(void)
1847 {
1848 if (scan_thread)
1849 return;
1850 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1851 if (IS_ERR(scan_thread)) {
1852 pr_warn("Failed to create the scan thread\n");
1853 scan_thread = NULL;
1854 }
1855 }
1856
1857 /*
1858 * Stop the automatic memory scanning thread.
1859 */
stop_scan_thread(void)1860 static void stop_scan_thread(void)
1861 {
1862 if (scan_thread) {
1863 kthread_stop(scan_thread);
1864 scan_thread = NULL;
1865 }
1866 }
1867
1868 /*
1869 * Iterate over the object_list and return the first valid object at or after
1870 * the required position with its use_count incremented. The function triggers
1871 * a memory scanning when the pos argument points to the first position.
1872 */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1873 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1874 {
1875 struct kmemleak_object *object;
1876 loff_t n = *pos;
1877 int err;
1878
1879 err = mutex_lock_interruptible(&scan_mutex);
1880 if (err < 0)
1881 return ERR_PTR(err);
1882
1883 rcu_read_lock();
1884 list_for_each_entry_rcu(object, &object_list, object_list) {
1885 if (n-- > 0)
1886 continue;
1887 if (get_object(object))
1888 goto out;
1889 }
1890 object = NULL;
1891 out:
1892 return object;
1893 }
1894
1895 /*
1896 * Return the next object in the object_list. The function decrements the
1897 * use_count of the previous object and increases that of the next one.
1898 */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1899 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1900 {
1901 struct kmemleak_object *prev_obj = v;
1902 struct kmemleak_object *next_obj = NULL;
1903 struct kmemleak_object *obj = prev_obj;
1904
1905 ++(*pos);
1906
1907 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1908 if (get_object(obj)) {
1909 next_obj = obj;
1910 break;
1911 }
1912 }
1913
1914 put_object(prev_obj);
1915 return next_obj;
1916 }
1917
1918 /*
1919 * Decrement the use_count of the last object required, if any.
1920 */
kmemleak_seq_stop(struct seq_file * seq,void * v)1921 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1922 {
1923 if (!IS_ERR(v)) {
1924 /*
1925 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1926 * waiting was interrupted, so only release it if !IS_ERR.
1927 */
1928 rcu_read_unlock();
1929 mutex_unlock(&scan_mutex);
1930 if (v)
1931 put_object(v);
1932 }
1933 }
1934
1935 /*
1936 * Print the information for an unreferenced object to the seq file.
1937 */
kmemleak_seq_show(struct seq_file * seq,void * v)1938 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1939 {
1940 struct kmemleak_object *object = v;
1941 unsigned long flags;
1942
1943 raw_spin_lock_irqsave(&object->lock, flags);
1944 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1945 print_unreferenced(seq, object);
1946 raw_spin_unlock_irqrestore(&object->lock, flags);
1947 return 0;
1948 }
1949
1950 static const struct seq_operations kmemleak_seq_ops = {
1951 .start = kmemleak_seq_start,
1952 .next = kmemleak_seq_next,
1953 .stop = kmemleak_seq_stop,
1954 .show = kmemleak_seq_show,
1955 };
1956
kmemleak_open(struct inode * inode,struct file * file)1957 static int kmemleak_open(struct inode *inode, struct file *file)
1958 {
1959 return seq_open(file, &kmemleak_seq_ops);
1960 }
1961
dump_str_object_info(const char * str)1962 static int dump_str_object_info(const char *str)
1963 {
1964 unsigned long flags;
1965 struct kmemleak_object *object;
1966 unsigned long addr;
1967
1968 if (kstrtoul(str, 0, &addr))
1969 return -EINVAL;
1970 object = find_and_get_object(addr, 0);
1971 if (!object) {
1972 pr_info("Unknown object at 0x%08lx\n", addr);
1973 return -EINVAL;
1974 }
1975
1976 raw_spin_lock_irqsave(&object->lock, flags);
1977 dump_object_info(object);
1978 raw_spin_unlock_irqrestore(&object->lock, flags);
1979
1980 put_object(object);
1981 return 0;
1982 }
1983
1984 /*
1985 * We use grey instead of black to ensure we can do future scans on the same
1986 * objects. If we did not do future scans these black objects could
1987 * potentially contain references to newly allocated objects in the future and
1988 * we'd end up with false positives.
1989 */
kmemleak_clear(void)1990 static void kmemleak_clear(void)
1991 {
1992 struct kmemleak_object *object;
1993
1994 rcu_read_lock();
1995 list_for_each_entry_rcu(object, &object_list, object_list) {
1996 raw_spin_lock_irq(&object->lock);
1997 if ((object->flags & OBJECT_REPORTED) &&
1998 unreferenced_object(object))
1999 __paint_it(object, KMEMLEAK_GREY);
2000 raw_spin_unlock_irq(&object->lock);
2001 }
2002 rcu_read_unlock();
2003
2004 kmemleak_found_leaks = false;
2005 }
2006
2007 static void __kmemleak_do_cleanup(void);
2008
2009 /*
2010 * File write operation to configure kmemleak at run-time. The following
2011 * commands can be written to the /sys/kernel/debug/kmemleak file:
2012 * off - disable kmemleak (irreversible)
2013 * stack=on - enable the task stacks scanning
2014 * stack=off - disable the tasks stacks scanning
2015 * scan=on - start the automatic memory scanning thread
2016 * scan=off - stop the automatic memory scanning thread
2017 * scan=... - set the automatic memory scanning period in seconds (0 to
2018 * disable it)
2019 * scan - trigger a memory scan
2020 * clear - mark all current reported unreferenced kmemleak objects as
2021 * grey to ignore printing them, or free all kmemleak objects
2022 * if kmemleak has been disabled.
2023 * dump=... - dump information about the object found at the given address
2024 */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)2025 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
2026 size_t size, loff_t *ppos)
2027 {
2028 char buf[64];
2029 int buf_size;
2030 int ret;
2031
2032 buf_size = min(size, (sizeof(buf) - 1));
2033 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2034 return -EFAULT;
2035 buf[buf_size] = 0;
2036
2037 ret = mutex_lock_interruptible(&scan_mutex);
2038 if (ret < 0)
2039 return ret;
2040
2041 if (strncmp(buf, "clear", 5) == 0) {
2042 if (kmemleak_enabled)
2043 kmemleak_clear();
2044 else
2045 __kmemleak_do_cleanup();
2046 goto out;
2047 }
2048
2049 if (!kmemleak_enabled) {
2050 ret = -EPERM;
2051 goto out;
2052 }
2053
2054 if (strncmp(buf, "off", 3) == 0)
2055 kmemleak_disable();
2056 else if (strncmp(buf, "stack=on", 8) == 0)
2057 kmemleak_stack_scan = 1;
2058 else if (strncmp(buf, "stack=off", 9) == 0)
2059 kmemleak_stack_scan = 0;
2060 else if (strncmp(buf, "scan=on", 7) == 0)
2061 start_scan_thread();
2062 else if (strncmp(buf, "scan=off", 8) == 0)
2063 stop_scan_thread();
2064 else if (strncmp(buf, "scan=", 5) == 0) {
2065 unsigned secs;
2066 unsigned long msecs;
2067
2068 ret = kstrtouint(buf + 5, 0, &secs);
2069 if (ret < 0)
2070 goto out;
2071
2072 msecs = secs * MSEC_PER_SEC;
2073 if (msecs > UINT_MAX)
2074 msecs = UINT_MAX;
2075
2076 stop_scan_thread();
2077 if (msecs) {
2078 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2079 start_scan_thread();
2080 }
2081 } else if (strncmp(buf, "scan", 4) == 0)
2082 kmemleak_scan();
2083 else if (strncmp(buf, "dump=", 5) == 0)
2084 ret = dump_str_object_info(buf + 5);
2085 else
2086 ret = -EINVAL;
2087
2088 out:
2089 mutex_unlock(&scan_mutex);
2090 if (ret < 0)
2091 return ret;
2092
2093 /* ignore the rest of the buffer, only one command at a time */
2094 *ppos += size;
2095 return size;
2096 }
2097
2098 static const struct file_operations kmemleak_fops = {
2099 .owner = THIS_MODULE,
2100 .open = kmemleak_open,
2101 .read = seq_read,
2102 .write = kmemleak_write,
2103 .llseek = seq_lseek,
2104 .release = seq_release,
2105 };
2106
__kmemleak_do_cleanup(void)2107 static void __kmemleak_do_cleanup(void)
2108 {
2109 struct kmemleak_object *object, *tmp;
2110
2111 /*
2112 * Kmemleak has already been disabled, no need for RCU list traversal
2113 * or kmemleak_lock held.
2114 */
2115 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2116 __remove_object(object);
2117 __delete_object(object);
2118 }
2119 }
2120
2121 /*
2122 * Stop the memory scanning thread and free the kmemleak internal objects if
2123 * no previous scan thread (otherwise, kmemleak may still have some useful
2124 * information on memory leaks).
2125 */
kmemleak_do_cleanup(struct work_struct * work)2126 static void kmemleak_do_cleanup(struct work_struct *work)
2127 {
2128 stop_scan_thread();
2129
2130 mutex_lock(&scan_mutex);
2131 /*
2132 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2133 * longer track object freeing. Ordering of the scan thread stopping and
2134 * the memory accesses below is guaranteed by the kthread_stop()
2135 * function.
2136 */
2137 kmemleak_free_enabled = 0;
2138 mutex_unlock(&scan_mutex);
2139
2140 if (!kmemleak_found_leaks)
2141 __kmemleak_do_cleanup();
2142 else
2143 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2144 }
2145
2146 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2147
2148 /*
2149 * Disable kmemleak. No memory allocation/freeing will be traced once this
2150 * function is called. Disabling kmemleak is an irreversible operation.
2151 */
kmemleak_disable(void)2152 static void kmemleak_disable(void)
2153 {
2154 /* atomically check whether it was already invoked */
2155 if (cmpxchg(&kmemleak_error, 0, 1))
2156 return;
2157
2158 /* stop any memory operation tracing */
2159 kmemleak_enabled = 0;
2160
2161 /* check whether it is too early for a kernel thread */
2162 if (kmemleak_late_initialized)
2163 schedule_work(&cleanup_work);
2164 else
2165 kmemleak_free_enabled = 0;
2166
2167 pr_info("Kernel memory leak detector disabled\n");
2168 }
2169
2170 /*
2171 * Allow boot-time kmemleak disabling (enabled by default).
2172 */
kmemleak_boot_config(char * str)2173 static int __init kmemleak_boot_config(char *str)
2174 {
2175 if (!str)
2176 return -EINVAL;
2177 if (strcmp(str, "off") == 0)
2178 kmemleak_disable();
2179 else if (strcmp(str, "on") == 0) {
2180 kmemleak_skip_disable = 1;
2181 stack_depot_request_early_init();
2182 }
2183 else
2184 return -EINVAL;
2185 return 0;
2186 }
2187 early_param("kmemleak", kmemleak_boot_config);
2188
2189 /*
2190 * Kmemleak initialization.
2191 */
kmemleak_init(void)2192 void __init kmemleak_init(void)
2193 {
2194 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2195 if (!kmemleak_skip_disable) {
2196 kmemleak_disable();
2197 return;
2198 }
2199 #endif
2200
2201 if (kmemleak_error)
2202 return;
2203
2204 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2205 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2206
2207 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2208 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2209
2210 /* register the data/bss sections */
2211 create_object((unsigned long)_sdata, _edata - _sdata,
2212 KMEMLEAK_GREY, GFP_ATOMIC);
2213 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2214 KMEMLEAK_GREY, GFP_ATOMIC);
2215 /* only register .data..ro_after_init if not within .data */
2216 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2217 create_object((unsigned long)__start_ro_after_init,
2218 __end_ro_after_init - __start_ro_after_init,
2219 KMEMLEAK_GREY, GFP_ATOMIC);
2220 }
2221
2222 /*
2223 * Late initialization function.
2224 */
kmemleak_late_init(void)2225 static int __init kmemleak_late_init(void)
2226 {
2227 kmemleak_late_initialized = 1;
2228
2229 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2230
2231 if (kmemleak_error) {
2232 /*
2233 * Some error occurred and kmemleak was disabled. There is a
2234 * small chance that kmemleak_disable() was called immediately
2235 * after setting kmemleak_late_initialized and we may end up with
2236 * two clean-up threads but serialized by scan_mutex.
2237 */
2238 schedule_work(&cleanup_work);
2239 return -ENOMEM;
2240 }
2241
2242 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2243 mutex_lock(&scan_mutex);
2244 start_scan_thread();
2245 mutex_unlock(&scan_mutex);
2246 }
2247
2248 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2249 mem_pool_free_count);
2250
2251 return 0;
2252 }
2253 late_initcall(kmemleak_late_init);
2254