1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17 * del_state modifications and accesses to the object trees
18 * (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
19 * object_list is the main list holding the metadata (struct
20 * kmemleak_object) for the allocated memory blocks. The object trees are
21 * red black trees used to look-up metadata based on a pointer to the
22 * corresponding memory block. The kmemleak_object structures are added to
23 * the object_list and the object tree root in the create_object() function
24 * called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
25 * delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
26 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
27 * Accesses to the metadata (e.g. count) are protected by this lock. Note
28 * that some members of this structure may be protected by other means
29 * (atomic or kmemleak_lock). This lock is also held when scanning the
30 * corresponding memory block to avoid the kernel freeing it via the
31 * kmemleak_free() callback. This is less heavyweight than holding a global
32 * lock like kmemleak_lock during scanning.
33 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
34 * unreferenced objects at a time. The gray_list contains the objects which
35 * are already referenced or marked as false positives and need to be
36 * scanned. This list is only modified during a scanning episode when the
37 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
38 * Note that the kmemleak_object.use_count is incremented when an object is
39 * added to the gray_list and therefore cannot be freed. This mutex also
40 * prevents multiple users of the "kmemleak" debugfs file together with
41 * modifications to the memory scanning parameters including the scan_thread
42 * pointer
43 *
44 * Locks and mutexes are acquired/nested in the following order:
45 *
46 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47 *
48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
49 * regions.
50 *
51 * The kmemleak_object structures have a use_count incremented or decremented
52 * using the get_object()/put_object() functions. When the use_count becomes
53 * 0, this count can no longer be incremented and put_object() schedules the
54 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
55 * function must be protected by rcu_read_lock() to avoid accessing a freed
56 * structure.
57 */
58
59 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60
61 #include <linux/init.h>
62 #include <linux/kernel.h>
63 #include <linux/list.h>
64 #include <linux/sched/signal.h>
65 #include <linux/sched/task.h>
66 #include <linux/sched/task_stack.h>
67 #include <linux/jiffies.h>
68 #include <linux/delay.h>
69 #include <linux/export.h>
70 #include <linux/kthread.h>
71 #include <linux/rbtree.h>
72 #include <linux/fs.h>
73 #include <linux/debugfs.h>
74 #include <linux/seq_file.h>
75 #include <linux/cpumask.h>
76 #include <linux/spinlock.h>
77 #include <linux/module.h>
78 #include <linux/mutex.h>
79 #include <linux/rcupdate.h>
80 #include <linux/stacktrace.h>
81 #include <linux/stackdepot.h>
82 #include <linux/cache.h>
83 #include <linux/percpu.h>
84 #include <linux/memblock.h>
85 #include <linux/pfn.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
100
101 #include <linux/kasan.h>
102 #include <linux/kfence.h>
103 #include <linux/kmemleak.h>
104 #include <linux/memory_hotplug.h>
105
106 /*
107 * Kmemleak configuration and common defines.
108 */
109 #define MAX_TRACE 16 /* stack trace length */
110 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
111 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
112 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
113 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
114
115 #define BYTES_PER_POINTER sizeof(void *)
116
117 /* scanning area inside a memory block */
118 struct kmemleak_scan_area {
119 struct hlist_node node;
120 unsigned long start;
121 size_t size;
122 };
123
124 #define KMEMLEAK_GREY 0
125 #define KMEMLEAK_BLACK -1
126
127 /*
128 * Structure holding the metadata for each allocated memory block.
129 * Modifications to such objects should be made while holding the
130 * object->lock. Insertions or deletions from object_list, gray_list or
131 * rb_node are already protected by the corresponding locks or mutex (see
132 * the notes on locking above). These objects are reference-counted
133 * (use_count) and freed using the RCU mechanism.
134 */
135 struct kmemleak_object {
136 raw_spinlock_t lock;
137 unsigned int flags; /* object status flags */
138 struct list_head object_list;
139 struct list_head gray_list;
140 struct rb_node rb_node;
141 struct rcu_head rcu; /* object_list lockless traversal */
142 /* object usage count; object freed when use_count == 0 */
143 atomic_t use_count;
144 unsigned int del_state; /* deletion state */
145 unsigned long pointer;
146 size_t size;
147 /* pass surplus references to this pointer */
148 unsigned long excess_ref;
149 /* minimum number of a pointers found before it is considered leak */
150 int min_count;
151 /* the total number of pointers found pointing to this object */
152 int count;
153 /* checksum for detecting modified objects */
154 u32 checksum;
155 depot_stack_handle_t trace_handle;
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list;
158 unsigned long jiffies; /* creation timestamp */
159 pid_t pid; /* pid of the current task */
160 char comm[TASK_COMM_LEN]; /* executable name */
161 };
162
163 /* flag representing the memory block allocation status */
164 #define OBJECT_ALLOCATED (1 << 0)
165 /* flag set after the first reporting of an unreference object */
166 #define OBJECT_REPORTED (1 << 1)
167 /* flag set to not scan the object */
168 #define OBJECT_NO_SCAN (1 << 2)
169 /* flag set to fully scan the object when scan_area allocation failed */
170 #define OBJECT_FULL_SCAN (1 << 3)
171 /* flag set for object allocated with physical address */
172 #define OBJECT_PHYS (1 << 4)
173 /* flag set for per-CPU pointers */
174 #define OBJECT_PERCPU (1 << 5)
175
176 /* set when __remove_object() called */
177 #define DELSTATE_REMOVED (1 << 0)
178 /* set to temporarily prevent deletion from object_list */
179 #define DELSTATE_NO_DELETE (1 << 1)
180
181 #define HEX_PREFIX " "
182 /* number of bytes to print per line; must be 16 or 32 */
183 #define HEX_ROW_SIZE 16
184 /* number of bytes to print at a time (1, 2, 4, 8) */
185 #define HEX_GROUP_SIZE 1
186 /* include ASCII after the hex output */
187 #define HEX_ASCII 1
188 /* max number of lines to be printed */
189 #define HEX_MAX_LINES 2
190
191 /* the list of all allocated objects */
192 static LIST_HEAD(object_list);
193 /* the list of gray-colored objects (see color_gray comment below) */
194 static LIST_HEAD(gray_list);
195 /* memory pool allocation */
196 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
197 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
198 static LIST_HEAD(mem_pool_free_list);
199 /* search tree for object boundaries */
200 static struct rb_root object_tree_root = RB_ROOT;
201 /* search tree for object (with OBJECT_PHYS flag) boundaries */
202 static struct rb_root object_phys_tree_root = RB_ROOT;
203 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
204 static struct rb_root object_percpu_tree_root = RB_ROOT;
205 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
206 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
207
208 /* allocation caches for kmemleak internal data */
209 static struct kmem_cache *object_cache;
210 static struct kmem_cache *scan_area_cache;
211
212 /* set if tracing memory operations is enabled */
213 static int kmemleak_enabled __read_mostly = 1;
214 /* same as above but only for the kmemleak_free() callback */
215 static int kmemleak_free_enabled __read_mostly = 1;
216 /* set in the late_initcall if there were no errors */
217 static int kmemleak_late_initialized;
218 /* set if a fatal kmemleak error has occurred */
219 static int kmemleak_error;
220
221 /* minimum and maximum address that may be valid pointers */
222 static unsigned long min_addr = ULONG_MAX;
223 static unsigned long max_addr;
224
225 /* minimum and maximum address that may be valid per-CPU pointers */
226 static unsigned long min_percpu_addr = ULONG_MAX;
227 static unsigned long max_percpu_addr;
228
229 static struct task_struct *scan_thread;
230 /* used to avoid reporting of recently allocated objects */
231 static unsigned long jiffies_min_age;
232 static unsigned long jiffies_last_scan;
233 /* delay between automatic memory scannings */
234 static unsigned long jiffies_scan_wait;
235 /* enables or disables the task stacks scanning */
236 static int kmemleak_stack_scan = 1;
237 /* protects the memory scanning, parameters and debug/kmemleak file access */
238 static DEFINE_MUTEX(scan_mutex);
239 /* setting kmemleak=on, will set this var, skipping the disable */
240 static int kmemleak_skip_disable;
241 /* If there are leaks that can be reported */
242 static bool kmemleak_found_leaks;
243
244 static bool kmemleak_verbose;
245 module_param_named(verbose, kmemleak_verbose, bool, 0600);
246
247 static void kmemleak_disable(void);
248
249 /*
250 * Print a warning and dump the stack trace.
251 */
252 #define kmemleak_warn(x...) do { \
253 pr_warn(x); \
254 dump_stack(); \
255 } while (0)
256
257 /*
258 * Macro invoked when a serious kmemleak condition occurred and cannot be
259 * recovered from. Kmemleak will be disabled and further allocation/freeing
260 * tracing no longer available.
261 */
262 #define kmemleak_stop(x...) do { \
263 kmemleak_warn(x); \
264 kmemleak_disable(); \
265 } while (0)
266
267 #define warn_or_seq_printf(seq, fmt, ...) do { \
268 if (seq) \
269 seq_printf(seq, fmt, ##__VA_ARGS__); \
270 else \
271 pr_warn(fmt, ##__VA_ARGS__); \
272 } while (0)
273
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)274 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
275 int rowsize, int groupsize, const void *buf,
276 size_t len, bool ascii)
277 {
278 if (seq)
279 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
280 buf, len, ascii);
281 else
282 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
283 rowsize, groupsize, buf, len, ascii);
284 }
285
286 /*
287 * Printing of the objects hex dump to the seq file. The number of lines to be
288 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
289 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
290 * with the object->lock held.
291 */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)292 static void hex_dump_object(struct seq_file *seq,
293 struct kmemleak_object *object)
294 {
295 const u8 *ptr = (const u8 *)object->pointer;
296 size_t len;
297
298 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
299 return;
300
301 if (object->flags & OBJECT_PERCPU)
302 ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
303
304 /* limit the number of lines to HEX_MAX_LINES */
305 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
306
307 if (object->flags & OBJECT_PERCPU)
308 warn_or_seq_printf(seq, " hex dump (first %zu bytes on cpu %d):\n",
309 len, raw_smp_processor_id());
310 else
311 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
312 kasan_disable_current();
313 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
314 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
315 kasan_enable_current();
316 }
317
318 /*
319 * Object colors, encoded with count and min_count:
320 * - white - orphan object, not enough references to it (count < min_count)
321 * - gray - not orphan, not marked as false positive (min_count == 0) or
322 * sufficient references to it (count >= min_count)
323 * - black - ignore, it doesn't contain references (e.g. text section)
324 * (min_count == -1). No function defined for this color.
325 */
color_white(const struct kmemleak_object * object)326 static bool color_white(const struct kmemleak_object *object)
327 {
328 return object->count != KMEMLEAK_BLACK &&
329 object->count < object->min_count;
330 }
331
color_gray(const struct kmemleak_object * object)332 static bool color_gray(const struct kmemleak_object *object)
333 {
334 return object->min_count != KMEMLEAK_BLACK &&
335 object->count >= object->min_count;
336 }
337
338 /*
339 * Objects are considered unreferenced only if their color is white, they have
340 * not be deleted and have a minimum age to avoid false positives caused by
341 * pointers temporarily stored in CPU registers.
342 */
unreferenced_object(struct kmemleak_object * object)343 static bool unreferenced_object(struct kmemleak_object *object)
344 {
345 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
346 time_before_eq(object->jiffies + jiffies_min_age,
347 jiffies_last_scan);
348 }
349
__object_type_str(struct kmemleak_object * object)350 static const char *__object_type_str(struct kmemleak_object *object)
351 {
352 if (object->flags & OBJECT_PHYS)
353 return " (phys)";
354 if (object->flags & OBJECT_PERCPU)
355 return " (percpu)";
356 return "";
357 }
358
359 /*
360 * Printing of the unreferenced objects information to the seq file. The
361 * print_unreferenced function must be called with the object->lock held.
362 */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)363 static void print_unreferenced(struct seq_file *seq,
364 struct kmemleak_object *object)
365 {
366 int i;
367 unsigned long *entries;
368 unsigned int nr_entries;
369
370 nr_entries = stack_depot_fetch(object->trace_handle, &entries);
371 warn_or_seq_printf(seq, "unreferenced object%s 0x%08lx (size %zu):\n",
372 __object_type_str(object),
373 object->pointer, object->size);
374 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
375 object->comm, object->pid, object->jiffies);
376 hex_dump_object(seq, object);
377 warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum);
378
379 for (i = 0; i < nr_entries; i++) {
380 void *ptr = (void *)entries[i];
381 warn_or_seq_printf(seq, " %pS\n", ptr);
382 }
383 }
384
385 /*
386 * Print the kmemleak_object information. This function is used mainly for
387 * debugging special cases when kmemleak operations. It must be called with
388 * the object->lock held.
389 */
dump_object_info(struct kmemleak_object * object)390 static void dump_object_info(struct kmemleak_object *object)
391 {
392 pr_notice("Object%s 0x%08lx (size %zu):\n",
393 __object_type_str(object), object->pointer, object->size);
394 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
395 object->comm, object->pid, object->jiffies);
396 pr_notice(" min_count = %d\n", object->min_count);
397 pr_notice(" count = %d\n", object->count);
398 pr_notice(" flags = 0x%x\n", object->flags);
399 pr_notice(" checksum = %u\n", object->checksum);
400 pr_notice(" backtrace:\n");
401 if (object->trace_handle)
402 stack_depot_print(object->trace_handle);
403 }
404
object_tree(unsigned long objflags)405 static struct rb_root *object_tree(unsigned long objflags)
406 {
407 if (objflags & OBJECT_PHYS)
408 return &object_phys_tree_root;
409 if (objflags & OBJECT_PERCPU)
410 return &object_percpu_tree_root;
411 return &object_tree_root;
412 }
413
414 /*
415 * Look-up a memory block metadata (kmemleak_object) in the object search
416 * tree based on a pointer value. If alias is 0, only values pointing to the
417 * beginning of the memory block are allowed. The kmemleak_lock must be held
418 * when calling this function.
419 */
__lookup_object(unsigned long ptr,int alias,unsigned int objflags)420 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
421 unsigned int objflags)
422 {
423 struct rb_node *rb = object_tree(objflags)->rb_node;
424 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
425
426 while (rb) {
427 struct kmemleak_object *object;
428 unsigned long untagged_objp;
429
430 object = rb_entry(rb, struct kmemleak_object, rb_node);
431 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
432
433 if (untagged_ptr < untagged_objp)
434 rb = object->rb_node.rb_left;
435 else if (untagged_objp + object->size <= untagged_ptr)
436 rb = object->rb_node.rb_right;
437 else if (untagged_objp == untagged_ptr || alias)
438 return object;
439 else {
440 kmemleak_warn("Found object by alias at 0x%08lx\n",
441 ptr);
442 dump_object_info(object);
443 break;
444 }
445 }
446 return NULL;
447 }
448
449 /* Look-up a kmemleak object which allocated with virtual address. */
lookup_object(unsigned long ptr,int alias)450 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
451 {
452 return __lookup_object(ptr, alias, 0);
453 }
454
455 /*
456 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
457 * that once an object's use_count reached 0, the RCU freeing was already
458 * registered and the object should no longer be used. This function must be
459 * called under the protection of rcu_read_lock().
460 */
get_object(struct kmemleak_object * object)461 static int get_object(struct kmemleak_object *object)
462 {
463 return atomic_inc_not_zero(&object->use_count);
464 }
465
466 /*
467 * Memory pool allocation and freeing. kmemleak_lock must not be held.
468 */
mem_pool_alloc(gfp_t gfp)469 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
470 {
471 unsigned long flags;
472 struct kmemleak_object *object;
473
474 /* try the slab allocator first */
475 if (object_cache) {
476 object = kmem_cache_alloc_noprof(object_cache,
477 gfp_nested_mask(gfp));
478 if (object)
479 return object;
480 }
481
482 /* slab allocation failed, try the memory pool */
483 raw_spin_lock_irqsave(&kmemleak_lock, flags);
484 object = list_first_entry_or_null(&mem_pool_free_list,
485 typeof(*object), object_list);
486 if (object)
487 list_del(&object->object_list);
488 else if (mem_pool_free_count)
489 object = &mem_pool[--mem_pool_free_count];
490 else
491 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
492 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
493
494 return object;
495 }
496
497 /*
498 * Return the object to either the slab allocator or the memory pool.
499 */
mem_pool_free(struct kmemleak_object * object)500 static void mem_pool_free(struct kmemleak_object *object)
501 {
502 unsigned long flags;
503
504 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
505 kmem_cache_free(object_cache, object);
506 return;
507 }
508
509 /* add the object to the memory pool free list */
510 raw_spin_lock_irqsave(&kmemleak_lock, flags);
511 list_add(&object->object_list, &mem_pool_free_list);
512 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
513 }
514
515 /*
516 * RCU callback to free a kmemleak_object.
517 */
free_object_rcu(struct rcu_head * rcu)518 static void free_object_rcu(struct rcu_head *rcu)
519 {
520 struct hlist_node *tmp;
521 struct kmemleak_scan_area *area;
522 struct kmemleak_object *object =
523 container_of(rcu, struct kmemleak_object, rcu);
524
525 /*
526 * Once use_count is 0 (guaranteed by put_object), there is no other
527 * code accessing this object, hence no need for locking.
528 */
529 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
530 hlist_del(&area->node);
531 kmem_cache_free(scan_area_cache, area);
532 }
533 mem_pool_free(object);
534 }
535
536 /*
537 * Decrement the object use_count. Once the count is 0, free the object using
538 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
539 * delete_object() path, the delayed RCU freeing ensures that there is no
540 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
541 * is also possible.
542 */
put_object(struct kmemleak_object * object)543 static void put_object(struct kmemleak_object *object)
544 {
545 if (!atomic_dec_and_test(&object->use_count))
546 return;
547
548 /* should only get here after delete_object was called */
549 WARN_ON(object->flags & OBJECT_ALLOCATED);
550
551 /*
552 * It may be too early for the RCU callbacks, however, there is no
553 * concurrent object_list traversal when !object_cache and all objects
554 * came from the memory pool. Free the object directly.
555 */
556 if (object_cache)
557 call_rcu(&object->rcu, free_object_rcu);
558 else
559 free_object_rcu(&object->rcu);
560 }
561
562 /*
563 * Look up an object in the object search tree and increase its use_count.
564 */
__find_and_get_object(unsigned long ptr,int alias,unsigned int objflags)565 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
566 unsigned int objflags)
567 {
568 unsigned long flags;
569 struct kmemleak_object *object;
570
571 rcu_read_lock();
572 raw_spin_lock_irqsave(&kmemleak_lock, flags);
573 object = __lookup_object(ptr, alias, objflags);
574 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
575
576 /* check whether the object is still available */
577 if (object && !get_object(object))
578 object = NULL;
579 rcu_read_unlock();
580
581 return object;
582 }
583
584 /* Look up and get an object which allocated with virtual address. */
find_and_get_object(unsigned long ptr,int alias)585 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
586 {
587 return __find_and_get_object(ptr, alias, 0);
588 }
589
590 /*
591 * Remove an object from its object tree and object_list. Must be called with
592 * the kmemleak_lock held _if_ kmemleak is still enabled.
593 */
__remove_object(struct kmemleak_object * object)594 static void __remove_object(struct kmemleak_object *object)
595 {
596 rb_erase(&object->rb_node, object_tree(object->flags));
597 if (!(object->del_state & DELSTATE_NO_DELETE))
598 list_del_rcu(&object->object_list);
599 object->del_state |= DELSTATE_REMOVED;
600 }
601
__find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)602 static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
603 int alias,
604 unsigned int objflags)
605 {
606 struct kmemleak_object *object;
607
608 object = __lookup_object(ptr, alias, objflags);
609 if (object)
610 __remove_object(object);
611
612 return object;
613 }
614
615 /*
616 * Look up an object in the object search tree and remove it from both object
617 * tree root and object_list. The returned object's use_count should be at
618 * least 1, as initially set by create_object().
619 */
find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)620 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
621 unsigned int objflags)
622 {
623 unsigned long flags;
624 struct kmemleak_object *object;
625
626 raw_spin_lock_irqsave(&kmemleak_lock, flags);
627 object = __find_and_remove_object(ptr, alias, objflags);
628 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
629
630 return object;
631 }
632
set_track_prepare(void)633 static noinline depot_stack_handle_t set_track_prepare(void)
634 {
635 depot_stack_handle_t trace_handle;
636 unsigned long entries[MAX_TRACE];
637 unsigned int nr_entries;
638
639 /*
640 * Use object_cache to determine whether kmemleak_init() has
641 * been invoked. stack_depot_early_init() is called before
642 * kmemleak_init() in mm_core_init().
643 */
644 if (!object_cache)
645 return 0;
646 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
647 trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
648
649 return trace_handle;
650 }
651
__alloc_object(gfp_t gfp)652 static struct kmemleak_object *__alloc_object(gfp_t gfp)
653 {
654 struct kmemleak_object *object;
655
656 object = mem_pool_alloc(gfp);
657 if (!object) {
658 pr_warn("Cannot allocate a kmemleak_object structure\n");
659 kmemleak_disable();
660 return NULL;
661 }
662
663 INIT_LIST_HEAD(&object->object_list);
664 INIT_LIST_HEAD(&object->gray_list);
665 INIT_HLIST_HEAD(&object->area_list);
666 raw_spin_lock_init(&object->lock);
667 atomic_set(&object->use_count, 1);
668 object->excess_ref = 0;
669 object->count = 0; /* white color initially */
670 object->checksum = 0;
671 object->del_state = 0;
672
673 /* task information */
674 if (in_hardirq()) {
675 object->pid = 0;
676 strscpy(object->comm, "hardirq");
677 } else if (in_serving_softirq()) {
678 object->pid = 0;
679 strscpy(object->comm, "softirq");
680 } else {
681 object->pid = current->pid;
682 /*
683 * There is a small chance of a race with set_task_comm(),
684 * however using get_task_comm() here may cause locking
685 * dependency issues with current->alloc_lock. In the worst
686 * case, the command line is not correct.
687 */
688 strscpy(object->comm, current->comm);
689 }
690
691 /* kernel backtrace */
692 object->trace_handle = set_track_prepare();
693
694 return object;
695 }
696
__link_object(struct kmemleak_object * object,unsigned long ptr,size_t size,int min_count,unsigned int objflags)697 static int __link_object(struct kmemleak_object *object, unsigned long ptr,
698 size_t size, int min_count, unsigned int objflags)
699 {
700
701 struct kmemleak_object *parent;
702 struct rb_node **link, *rb_parent;
703 unsigned long untagged_ptr;
704 unsigned long untagged_objp;
705
706 object->flags = OBJECT_ALLOCATED | objflags;
707 object->pointer = ptr;
708 object->size = kfence_ksize((void *)ptr) ?: size;
709 object->min_count = min_count;
710 object->jiffies = jiffies;
711
712 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
713 /*
714 * Only update min_addr and max_addr with object storing virtual
715 * address. And update min_percpu_addr max_percpu_addr for per-CPU
716 * objects.
717 */
718 if (objflags & OBJECT_PERCPU) {
719 min_percpu_addr = min(min_percpu_addr, untagged_ptr);
720 max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
721 } else if (!(objflags & OBJECT_PHYS)) {
722 min_addr = min(min_addr, untagged_ptr);
723 max_addr = max(max_addr, untagged_ptr + size);
724 }
725 link = &object_tree(objflags)->rb_node;
726 rb_parent = NULL;
727 while (*link) {
728 rb_parent = *link;
729 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
730 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
731 if (untagged_ptr + size <= untagged_objp)
732 link = &parent->rb_node.rb_left;
733 else if (untagged_objp + parent->size <= untagged_ptr)
734 link = &parent->rb_node.rb_right;
735 else {
736 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
737 ptr);
738 /*
739 * No need for parent->lock here since "parent" cannot
740 * be freed while the kmemleak_lock is held.
741 */
742 dump_object_info(parent);
743 return -EEXIST;
744 }
745 }
746 rb_link_node(&object->rb_node, rb_parent, link);
747 rb_insert_color(&object->rb_node, object_tree(objflags));
748 list_add_tail_rcu(&object->object_list, &object_list);
749
750 return 0;
751 }
752
753 /*
754 * Create the metadata (struct kmemleak_object) corresponding to an allocated
755 * memory block and add it to the object_list and object tree.
756 */
__create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp,unsigned int objflags)757 static void __create_object(unsigned long ptr, size_t size,
758 int min_count, gfp_t gfp, unsigned int objflags)
759 {
760 struct kmemleak_object *object;
761 unsigned long flags;
762 int ret;
763
764 object = __alloc_object(gfp);
765 if (!object)
766 return;
767
768 raw_spin_lock_irqsave(&kmemleak_lock, flags);
769 ret = __link_object(object, ptr, size, min_count, objflags);
770 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
771 if (ret)
772 mem_pool_free(object);
773 }
774
775 /* Create kmemleak object which allocated with virtual address. */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)776 static void create_object(unsigned long ptr, size_t size,
777 int min_count, gfp_t gfp)
778 {
779 __create_object(ptr, size, min_count, gfp, 0);
780 }
781
782 /* Create kmemleak object which allocated with physical address. */
create_object_phys(unsigned long ptr,size_t size,int min_count,gfp_t gfp)783 static void create_object_phys(unsigned long ptr, size_t size,
784 int min_count, gfp_t gfp)
785 {
786 __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
787 }
788
789 /* Create kmemleak object corresponding to a per-CPU allocation. */
create_object_percpu(unsigned long ptr,size_t size,int min_count,gfp_t gfp)790 static void create_object_percpu(unsigned long ptr, size_t size,
791 int min_count, gfp_t gfp)
792 {
793 __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
794 }
795
796 /*
797 * Mark the object as not allocated and schedule RCU freeing via put_object().
798 */
__delete_object(struct kmemleak_object * object)799 static void __delete_object(struct kmemleak_object *object)
800 {
801 unsigned long flags;
802
803 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
804 WARN_ON(atomic_read(&object->use_count) < 1);
805
806 /*
807 * Locking here also ensures that the corresponding memory block
808 * cannot be freed when it is being scanned.
809 */
810 raw_spin_lock_irqsave(&object->lock, flags);
811 object->flags &= ~OBJECT_ALLOCATED;
812 raw_spin_unlock_irqrestore(&object->lock, flags);
813 put_object(object);
814 }
815
816 /*
817 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
818 * delete it.
819 */
delete_object_full(unsigned long ptr,unsigned int objflags)820 static void delete_object_full(unsigned long ptr, unsigned int objflags)
821 {
822 struct kmemleak_object *object;
823
824 object = find_and_remove_object(ptr, 0, objflags);
825 if (!object) {
826 #ifdef DEBUG
827 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
828 ptr);
829 #endif
830 return;
831 }
832 __delete_object(object);
833 }
834
835 /*
836 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
837 * delete it. If the memory block is partially freed, the function may create
838 * additional metadata for the remaining parts of the block.
839 */
delete_object_part(unsigned long ptr,size_t size,unsigned int objflags)840 static void delete_object_part(unsigned long ptr, size_t size,
841 unsigned int objflags)
842 {
843 struct kmemleak_object *object, *object_l, *object_r;
844 unsigned long start, end, flags;
845
846 object_l = __alloc_object(GFP_KERNEL);
847 if (!object_l)
848 return;
849
850 object_r = __alloc_object(GFP_KERNEL);
851 if (!object_r)
852 goto out;
853
854 raw_spin_lock_irqsave(&kmemleak_lock, flags);
855 object = __find_and_remove_object(ptr, 1, objflags);
856 if (!object) {
857 #ifdef DEBUG
858 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
859 ptr, size);
860 #endif
861 goto unlock;
862 }
863
864 /*
865 * Create one or two objects that may result from the memory block
866 * split. Note that partial freeing is only done by free_bootmem() and
867 * this happens before kmemleak_init() is called.
868 */
869 start = object->pointer;
870 end = object->pointer + object->size;
871 if ((ptr > start) &&
872 !__link_object(object_l, start, ptr - start,
873 object->min_count, objflags))
874 object_l = NULL;
875 if ((ptr + size < end) &&
876 !__link_object(object_r, ptr + size, end - ptr - size,
877 object->min_count, objflags))
878 object_r = NULL;
879
880 unlock:
881 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
882 if (object)
883 __delete_object(object);
884
885 out:
886 if (object_l)
887 mem_pool_free(object_l);
888 if (object_r)
889 mem_pool_free(object_r);
890 }
891
__paint_it(struct kmemleak_object * object,int color)892 static void __paint_it(struct kmemleak_object *object, int color)
893 {
894 object->min_count = color;
895 if (color == KMEMLEAK_BLACK)
896 object->flags |= OBJECT_NO_SCAN;
897 }
898
paint_it(struct kmemleak_object * object,int color)899 static void paint_it(struct kmemleak_object *object, int color)
900 {
901 unsigned long flags;
902
903 raw_spin_lock_irqsave(&object->lock, flags);
904 __paint_it(object, color);
905 raw_spin_unlock_irqrestore(&object->lock, flags);
906 }
907
paint_ptr(unsigned long ptr,int color,unsigned int objflags)908 static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
909 {
910 struct kmemleak_object *object;
911
912 object = __find_and_get_object(ptr, 0, objflags);
913 if (!object) {
914 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
915 ptr,
916 (color == KMEMLEAK_GREY) ? "Grey" :
917 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
918 return;
919 }
920 paint_it(object, color);
921 put_object(object);
922 }
923
924 /*
925 * Mark an object permanently as gray-colored so that it can no longer be
926 * reported as a leak. This is used in general to mark a false positive.
927 */
make_gray_object(unsigned long ptr)928 static void make_gray_object(unsigned long ptr)
929 {
930 paint_ptr(ptr, KMEMLEAK_GREY, 0);
931 }
932
933 /*
934 * Mark the object as black-colored so that it is ignored from scans and
935 * reporting.
936 */
make_black_object(unsigned long ptr,unsigned int objflags)937 static void make_black_object(unsigned long ptr, unsigned int objflags)
938 {
939 paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
940 }
941
942 /*
943 * Reset the checksum of an object. The immediate effect is that it will not
944 * be reported as a leak during the next scan until its checksum is updated.
945 */
reset_checksum(unsigned long ptr)946 static void reset_checksum(unsigned long ptr)
947 {
948 unsigned long flags;
949 struct kmemleak_object *object;
950
951 object = find_and_get_object(ptr, 0);
952 if (!object) {
953 kmemleak_warn("Not resetting the checksum of an unknown object at 0x%08lx\n",
954 ptr);
955 return;
956 }
957
958 raw_spin_lock_irqsave(&object->lock, flags);
959 object->checksum = 0;
960 raw_spin_unlock_irqrestore(&object->lock, flags);
961 put_object(object);
962 }
963
964 /*
965 * Add a scanning area to the object. If at least one such area is added,
966 * kmemleak will only scan these ranges rather than the whole memory block.
967 */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)968 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
969 {
970 unsigned long flags;
971 struct kmemleak_object *object;
972 struct kmemleak_scan_area *area = NULL;
973 unsigned long untagged_ptr;
974 unsigned long untagged_objp;
975
976 object = find_and_get_object(ptr, 1);
977 if (!object) {
978 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
979 ptr);
980 return;
981 }
982
983 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
984 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
985
986 if (scan_area_cache)
987 area = kmem_cache_alloc_noprof(scan_area_cache,
988 gfp_nested_mask(gfp));
989
990 raw_spin_lock_irqsave(&object->lock, flags);
991 if (!area) {
992 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
993 /* mark the object for full scan to avoid false positives */
994 object->flags |= OBJECT_FULL_SCAN;
995 goto out_unlock;
996 }
997 if (size == SIZE_MAX) {
998 size = untagged_objp + object->size - untagged_ptr;
999 } else if (untagged_ptr + size > untagged_objp + object->size) {
1000 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
1001 dump_object_info(object);
1002 kmem_cache_free(scan_area_cache, area);
1003 goto out_unlock;
1004 }
1005
1006 INIT_HLIST_NODE(&area->node);
1007 area->start = ptr;
1008 area->size = size;
1009
1010 hlist_add_head(&area->node, &object->area_list);
1011 out_unlock:
1012 raw_spin_unlock_irqrestore(&object->lock, flags);
1013 put_object(object);
1014 }
1015
1016 /*
1017 * Any surplus references (object already gray) to 'ptr' are passed to
1018 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
1019 * vm_struct may be used as an alternative reference to the vmalloc'ed object
1020 * (see free_thread_stack()).
1021 */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)1022 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
1023 {
1024 unsigned long flags;
1025 struct kmemleak_object *object;
1026
1027 object = find_and_get_object(ptr, 0);
1028 if (!object) {
1029 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
1030 ptr);
1031 return;
1032 }
1033
1034 raw_spin_lock_irqsave(&object->lock, flags);
1035 object->excess_ref = excess_ref;
1036 raw_spin_unlock_irqrestore(&object->lock, flags);
1037 put_object(object);
1038 }
1039
1040 /*
1041 * Set the OBJECT_NO_SCAN flag for the object corresponding to the given
1042 * pointer. Such object will not be scanned by kmemleak but references to it
1043 * are searched.
1044 */
object_no_scan(unsigned long ptr)1045 static void object_no_scan(unsigned long ptr)
1046 {
1047 unsigned long flags;
1048 struct kmemleak_object *object;
1049
1050 object = find_and_get_object(ptr, 0);
1051 if (!object) {
1052 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1053 return;
1054 }
1055
1056 raw_spin_lock_irqsave(&object->lock, flags);
1057 object->flags |= OBJECT_NO_SCAN;
1058 raw_spin_unlock_irqrestore(&object->lock, flags);
1059 put_object(object);
1060 }
1061
1062 /**
1063 * kmemleak_alloc - register a newly allocated object
1064 * @ptr: pointer to beginning of the object
1065 * @size: size of the object
1066 * @min_count: minimum number of references to this object. If during memory
1067 * scanning a number of references less than @min_count is found,
1068 * the object is reported as a memory leak. If @min_count is 0,
1069 * the object is never reported as a leak. If @min_count is -1,
1070 * the object is ignored (not scanned and not reported as a leak)
1071 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1072 *
1073 * This function is called from the kernel allocators when a new object
1074 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1075 */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)1076 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1077 gfp_t gfp)
1078 {
1079 pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1080
1081 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1082 create_object((unsigned long)ptr, size, min_count, gfp);
1083 }
1084 EXPORT_SYMBOL_GPL(kmemleak_alloc);
1085
1086 /**
1087 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1088 * @ptr: __percpu pointer to beginning of the object
1089 * @size: size of the object
1090 * @gfp: flags used for kmemleak internal memory allocations
1091 *
1092 * This function is called from the kernel percpu allocator when a new object
1093 * (memory block) is allocated (alloc_percpu).
1094 */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)1095 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1096 gfp_t gfp)
1097 {
1098 pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1099
1100 if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1101 create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
1102 }
1103 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1104
1105 /**
1106 * kmemleak_vmalloc - register a newly vmalloc'ed object
1107 * @area: pointer to vm_struct
1108 * @size: size of the object
1109 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
1110 *
1111 * This function is called from the vmalloc() kernel allocator when a new
1112 * object (memory block) is allocated.
1113 */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)1114 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1115 {
1116 pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1117
1118 /*
1119 * A min_count = 2 is needed because vm_struct contains a reference to
1120 * the virtual address of the vmalloc'ed block.
1121 */
1122 if (kmemleak_enabled) {
1123 create_object((unsigned long)area->addr, size, 2, gfp);
1124 object_set_excess_ref((unsigned long)area,
1125 (unsigned long)area->addr);
1126 }
1127 }
1128 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1129
1130 /**
1131 * kmemleak_free - unregister a previously registered object
1132 * @ptr: pointer to beginning of the object
1133 *
1134 * This function is called from the kernel allocators when an object (memory
1135 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1136 */
kmemleak_free(const void * ptr)1137 void __ref kmemleak_free(const void *ptr)
1138 {
1139 pr_debug("%s(0x%px)\n", __func__, ptr);
1140
1141 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1142 delete_object_full((unsigned long)ptr, 0);
1143 }
1144 EXPORT_SYMBOL_GPL(kmemleak_free);
1145
1146 /**
1147 * kmemleak_free_part - partially unregister a previously registered object
1148 * @ptr: pointer to the beginning or inside the object. This also
1149 * represents the start of the range to be freed
1150 * @size: size to be unregistered
1151 *
1152 * This function is called when only a part of a memory block is freed
1153 * (usually from the bootmem allocator).
1154 */
kmemleak_free_part(const void * ptr,size_t size)1155 void __ref kmemleak_free_part(const void *ptr, size_t size)
1156 {
1157 pr_debug("%s(0x%px)\n", __func__, ptr);
1158
1159 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1160 delete_object_part((unsigned long)ptr, size, 0);
1161 }
1162 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1163
1164 /**
1165 * kmemleak_free_percpu - unregister a previously registered __percpu object
1166 * @ptr: __percpu pointer to beginning of the object
1167 *
1168 * This function is called from the kernel percpu allocator when an object
1169 * (memory block) is freed (free_percpu).
1170 */
kmemleak_free_percpu(const void __percpu * ptr)1171 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1172 {
1173 pr_debug("%s(0x%px)\n", __func__, ptr);
1174
1175 if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
1176 delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
1177 }
1178 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1179
1180 /**
1181 * kmemleak_update_trace - update object allocation stack trace
1182 * @ptr: pointer to beginning of the object
1183 *
1184 * Override the object allocation stack trace for cases where the actual
1185 * allocation place is not always useful.
1186 */
kmemleak_update_trace(const void * ptr)1187 void __ref kmemleak_update_trace(const void *ptr)
1188 {
1189 struct kmemleak_object *object;
1190 depot_stack_handle_t trace_handle;
1191 unsigned long flags;
1192
1193 pr_debug("%s(0x%px)\n", __func__, ptr);
1194
1195 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1196 return;
1197
1198 object = find_and_get_object((unsigned long)ptr, 1);
1199 if (!object) {
1200 #ifdef DEBUG
1201 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1202 ptr);
1203 #endif
1204 return;
1205 }
1206
1207 trace_handle = set_track_prepare();
1208 raw_spin_lock_irqsave(&object->lock, flags);
1209 object->trace_handle = trace_handle;
1210 raw_spin_unlock_irqrestore(&object->lock, flags);
1211
1212 put_object(object);
1213 }
1214 EXPORT_SYMBOL(kmemleak_update_trace);
1215
1216 /**
1217 * kmemleak_not_leak - mark an allocated object as false positive
1218 * @ptr: pointer to beginning of the object
1219 *
1220 * Calling this function on an object will cause the memory block to no longer
1221 * be reported as leak and always be scanned.
1222 */
kmemleak_not_leak(const void * ptr)1223 void __ref kmemleak_not_leak(const void *ptr)
1224 {
1225 pr_debug("%s(0x%px)\n", __func__, ptr);
1226
1227 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1228 make_gray_object((unsigned long)ptr);
1229 }
1230 EXPORT_SYMBOL(kmemleak_not_leak);
1231
1232 /**
1233 * kmemleak_transient_leak - mark an allocated object as transient false positive
1234 * @ptr: pointer to beginning of the object
1235 *
1236 * Calling this function on an object will cause the memory block to not be
1237 * reported as a leak temporarily. This may happen, for example, if the object
1238 * is part of a singly linked list and the ->next reference to it is changed.
1239 */
kmemleak_transient_leak(const void * ptr)1240 void __ref kmemleak_transient_leak(const void *ptr)
1241 {
1242 pr_debug("%s(0x%px)\n", __func__, ptr);
1243
1244 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1245 reset_checksum((unsigned long)ptr);
1246 }
1247 EXPORT_SYMBOL(kmemleak_transient_leak);
1248
1249 /**
1250 * kmemleak_ignore_percpu - similar to kmemleak_ignore but taking a percpu
1251 * address argument
1252 * @ptr: percpu address of the object
1253 */
kmemleak_ignore_percpu(const void __percpu * ptr)1254 void __ref kmemleak_ignore_percpu(const void __percpu *ptr)
1255 {
1256 pr_debug("%s(0x%px)\n", __func__, ptr);
1257
1258 if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1259 make_black_object((unsigned long)ptr, OBJECT_PERCPU);
1260 }
1261 EXPORT_SYMBOL_GPL(kmemleak_ignore_percpu);
1262
1263 /**
1264 * kmemleak_ignore - ignore an allocated object
1265 * @ptr: pointer to beginning of the object
1266 *
1267 * Calling this function on an object will cause the memory block to be
1268 * ignored (not scanned and not reported as a leak). This is usually done when
1269 * it is known that the corresponding block is not a leak and does not contain
1270 * any references to other allocated memory blocks.
1271 */
kmemleak_ignore(const void * ptr)1272 void __ref kmemleak_ignore(const void *ptr)
1273 {
1274 pr_debug("%s(0x%px)\n", __func__, ptr);
1275
1276 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1277 make_black_object((unsigned long)ptr, 0);
1278 }
1279 EXPORT_SYMBOL(kmemleak_ignore);
1280
1281 /**
1282 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1283 * @ptr: pointer to beginning or inside the object. This also
1284 * represents the start of the scan area
1285 * @size: size of the scan area
1286 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1287 *
1288 * This function is used when it is known that only certain parts of an object
1289 * contain references to other objects. Kmemleak will only scan these areas
1290 * reducing the number false negatives.
1291 */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1292 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1293 {
1294 pr_debug("%s(0x%px)\n", __func__, ptr);
1295
1296 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1297 add_scan_area((unsigned long)ptr, size, gfp);
1298 }
1299 EXPORT_SYMBOL(kmemleak_scan_area);
1300
1301 /**
1302 * kmemleak_no_scan - do not scan an allocated object
1303 * @ptr: pointer to beginning of the object
1304 *
1305 * This function notifies kmemleak not to scan the given memory block. Useful
1306 * in situations where it is known that the given object does not contain any
1307 * references to other objects. Kmemleak will not scan such objects reducing
1308 * the number of false negatives.
1309 */
kmemleak_no_scan(const void * ptr)1310 void __ref kmemleak_no_scan(const void *ptr)
1311 {
1312 pr_debug("%s(0x%px)\n", __func__, ptr);
1313
1314 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1315 object_no_scan((unsigned long)ptr);
1316 }
1317 EXPORT_SYMBOL(kmemleak_no_scan);
1318
1319 /**
1320 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1321 * address argument
1322 * @phys: physical address of the object
1323 * @size: size of the object
1324 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1325 */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,gfp_t gfp)1326 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1327 {
1328 pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1329
1330 if (kmemleak_enabled)
1331 /*
1332 * Create object with OBJECT_PHYS flag and
1333 * assume min_count 0.
1334 */
1335 create_object_phys((unsigned long)phys, size, 0, gfp);
1336 }
1337 EXPORT_SYMBOL(kmemleak_alloc_phys);
1338
1339 /**
1340 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1341 * physical address argument
1342 * @phys: physical address if the beginning or inside an object. This
1343 * also represents the start of the range to be freed
1344 * @size: size to be unregistered
1345 */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1346 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1347 {
1348 pr_debug("%s(0x%px)\n", __func__, &phys);
1349
1350 if (kmemleak_enabled)
1351 delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1352 }
1353 EXPORT_SYMBOL(kmemleak_free_part_phys);
1354
1355 /**
1356 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1357 * address argument
1358 * @phys: physical address of the object
1359 */
kmemleak_ignore_phys(phys_addr_t phys)1360 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1361 {
1362 pr_debug("%s(0x%px)\n", __func__, &phys);
1363
1364 if (kmemleak_enabled)
1365 make_black_object((unsigned long)phys, OBJECT_PHYS);
1366 }
1367 EXPORT_SYMBOL(kmemleak_ignore_phys);
1368
1369 /*
1370 * Update an object's checksum and return true if it was modified.
1371 */
update_checksum(struct kmemleak_object * object)1372 static bool update_checksum(struct kmemleak_object *object)
1373 {
1374 u32 old_csum = object->checksum;
1375
1376 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1377 return false;
1378
1379 kasan_disable_current();
1380 kcsan_disable_current();
1381 if (object->flags & OBJECT_PERCPU) {
1382 unsigned int cpu;
1383
1384 object->checksum = 0;
1385 for_each_possible_cpu(cpu) {
1386 void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1387
1388 object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
1389 }
1390 } else {
1391 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1392 }
1393 kasan_enable_current();
1394 kcsan_enable_current();
1395
1396 return object->checksum != old_csum;
1397 }
1398
1399 /*
1400 * Update an object's references. object->lock must be held by the caller.
1401 */
update_refs(struct kmemleak_object * object)1402 static void update_refs(struct kmemleak_object *object)
1403 {
1404 if (!color_white(object)) {
1405 /* non-orphan, ignored or new */
1406 return;
1407 }
1408
1409 /*
1410 * Increase the object's reference count (number of pointers to the
1411 * memory block). If this count reaches the required minimum, the
1412 * object's color will become gray and it will be added to the
1413 * gray_list.
1414 */
1415 object->count++;
1416 if (color_gray(object)) {
1417 /* put_object() called when removing from gray_list */
1418 WARN_ON(!get_object(object));
1419 list_add_tail(&object->gray_list, &gray_list);
1420 }
1421 }
1422
pointer_update_refs(struct kmemleak_object * scanned,unsigned long pointer,unsigned int objflags)1423 static void pointer_update_refs(struct kmemleak_object *scanned,
1424 unsigned long pointer, unsigned int objflags)
1425 {
1426 struct kmemleak_object *object;
1427 unsigned long untagged_ptr;
1428 unsigned long excess_ref;
1429
1430 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1431 if (objflags & OBJECT_PERCPU) {
1432 if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
1433 return;
1434 } else {
1435 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1436 return;
1437 }
1438
1439 /*
1440 * No need for get_object() here since we hold kmemleak_lock.
1441 * object->use_count cannot be dropped to 0 while the object
1442 * is still present in object_tree_root and object_list
1443 * (with updates protected by kmemleak_lock).
1444 */
1445 object = __lookup_object(pointer, 1, objflags);
1446 if (!object)
1447 return;
1448 if (object == scanned)
1449 /* self referenced, ignore */
1450 return;
1451
1452 /*
1453 * Avoid the lockdep recursive warning on object->lock being
1454 * previously acquired in scan_object(). These locks are
1455 * enclosed by scan_mutex.
1456 */
1457 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1458 /* only pass surplus references (object already gray) */
1459 if (color_gray(object)) {
1460 excess_ref = object->excess_ref;
1461 /* no need for update_refs() if object already gray */
1462 } else {
1463 excess_ref = 0;
1464 update_refs(object);
1465 }
1466 raw_spin_unlock(&object->lock);
1467
1468 if (excess_ref) {
1469 object = lookup_object(excess_ref, 0);
1470 if (!object)
1471 return;
1472 if (object == scanned)
1473 /* circular reference, ignore */
1474 return;
1475 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1476 update_refs(object);
1477 raw_spin_unlock(&object->lock);
1478 }
1479 }
1480
1481 /*
1482 * Memory scanning is a long process and it needs to be interruptible. This
1483 * function checks whether such interrupt condition occurred.
1484 */
scan_should_stop(void)1485 static int scan_should_stop(void)
1486 {
1487 if (!kmemleak_enabled)
1488 return 1;
1489
1490 /*
1491 * This function may be called from either process or kthread context,
1492 * hence the need to check for both stop conditions.
1493 */
1494 if (current->mm)
1495 return signal_pending(current);
1496 else
1497 return kthread_should_stop();
1498
1499 return 0;
1500 }
1501
1502 /*
1503 * Scan a memory block (exclusive range) for valid pointers and add those
1504 * found to the gray list.
1505 */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1506 static void scan_block(void *_start, void *_end,
1507 struct kmemleak_object *scanned)
1508 {
1509 unsigned long *ptr;
1510 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1511 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1512 unsigned long flags;
1513
1514 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1515 for (ptr = start; ptr < end; ptr++) {
1516 unsigned long pointer;
1517
1518 if (scan_should_stop())
1519 break;
1520
1521 kasan_disable_current();
1522 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1523 kasan_enable_current();
1524
1525 pointer_update_refs(scanned, pointer, 0);
1526 pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
1527 }
1528 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1529 }
1530
1531 /*
1532 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1533 */
1534 #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1535 static void scan_large_block(void *start, void *end)
1536 {
1537 void *next;
1538
1539 while (start < end) {
1540 next = min(start + MAX_SCAN_SIZE, end);
1541 scan_block(start, next, NULL);
1542 start = next;
1543 cond_resched();
1544 }
1545 }
1546 #endif
1547
1548 /*
1549 * Scan a memory block corresponding to a kmemleak_object. A condition is
1550 * that object->use_count >= 1.
1551 */
scan_object(struct kmemleak_object * object)1552 static void scan_object(struct kmemleak_object *object)
1553 {
1554 struct kmemleak_scan_area *area;
1555 unsigned long flags;
1556
1557 /*
1558 * Once the object->lock is acquired, the corresponding memory block
1559 * cannot be freed (the same lock is acquired in delete_object).
1560 */
1561 raw_spin_lock_irqsave(&object->lock, flags);
1562 if (object->flags & OBJECT_NO_SCAN)
1563 goto out;
1564 if (!(object->flags & OBJECT_ALLOCATED))
1565 /* already freed object */
1566 goto out;
1567
1568 if (object->flags & OBJECT_PERCPU) {
1569 unsigned int cpu;
1570
1571 for_each_possible_cpu(cpu) {
1572 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1573 void *end = start + object->size;
1574
1575 scan_block(start, end, object);
1576
1577 raw_spin_unlock_irqrestore(&object->lock, flags);
1578 cond_resched();
1579 raw_spin_lock_irqsave(&object->lock, flags);
1580 if (!(object->flags & OBJECT_ALLOCATED))
1581 break;
1582 }
1583 } else if (hlist_empty(&object->area_list) ||
1584 object->flags & OBJECT_FULL_SCAN) {
1585 void *start = object->flags & OBJECT_PHYS ?
1586 __va((phys_addr_t)object->pointer) :
1587 (void *)object->pointer;
1588 void *end = start + object->size;
1589 void *next;
1590
1591 do {
1592 next = min(start + MAX_SCAN_SIZE, end);
1593 scan_block(start, next, object);
1594
1595 start = next;
1596 if (start >= end)
1597 break;
1598
1599 raw_spin_unlock_irqrestore(&object->lock, flags);
1600 cond_resched();
1601 raw_spin_lock_irqsave(&object->lock, flags);
1602 } while (object->flags & OBJECT_ALLOCATED);
1603 } else {
1604 hlist_for_each_entry(area, &object->area_list, node)
1605 scan_block((void *)area->start,
1606 (void *)(area->start + area->size),
1607 object);
1608 }
1609 out:
1610 raw_spin_unlock_irqrestore(&object->lock, flags);
1611 }
1612
1613 /*
1614 * Scan the objects already referenced (gray objects). More objects will be
1615 * referenced and, if there are no memory leaks, all the objects are scanned.
1616 */
scan_gray_list(void)1617 static void scan_gray_list(void)
1618 {
1619 struct kmemleak_object *object, *tmp;
1620
1621 /*
1622 * The list traversal is safe for both tail additions and removals
1623 * from inside the loop. The kmemleak objects cannot be freed from
1624 * outside the loop because their use_count was incremented.
1625 */
1626 object = list_entry(gray_list.next, typeof(*object), gray_list);
1627 while (&object->gray_list != &gray_list) {
1628 cond_resched();
1629
1630 /* may add new objects to the list */
1631 if (!scan_should_stop())
1632 scan_object(object);
1633
1634 tmp = list_entry(object->gray_list.next, typeof(*object),
1635 gray_list);
1636
1637 /* remove the object from the list and release it */
1638 list_del(&object->gray_list);
1639 put_object(object);
1640
1641 object = tmp;
1642 }
1643 WARN_ON(!list_empty(&gray_list));
1644 }
1645
1646 /*
1647 * Conditionally call resched() in an object iteration loop while making sure
1648 * that the given object won't go away without RCU read lock by performing a
1649 * get_object() if necessaary.
1650 */
kmemleak_cond_resched(struct kmemleak_object * object)1651 static void kmemleak_cond_resched(struct kmemleak_object *object)
1652 {
1653 if (!get_object(object))
1654 return; /* Try next object */
1655
1656 raw_spin_lock_irq(&kmemleak_lock);
1657 if (object->del_state & DELSTATE_REMOVED)
1658 goto unlock_put; /* Object removed */
1659 object->del_state |= DELSTATE_NO_DELETE;
1660 raw_spin_unlock_irq(&kmemleak_lock);
1661
1662 rcu_read_unlock();
1663 cond_resched();
1664 rcu_read_lock();
1665
1666 raw_spin_lock_irq(&kmemleak_lock);
1667 if (object->del_state & DELSTATE_REMOVED)
1668 list_del_rcu(&object->object_list);
1669 object->del_state &= ~DELSTATE_NO_DELETE;
1670 unlock_put:
1671 raw_spin_unlock_irq(&kmemleak_lock);
1672 put_object(object);
1673 }
1674
1675 /*
1676 * Scan data sections and all the referenced memory blocks allocated via the
1677 * kernel's standard allocators. This function must be called with the
1678 * scan_mutex held.
1679 */
kmemleak_scan(void)1680 static void kmemleak_scan(void)
1681 {
1682 struct kmemleak_object *object;
1683 struct zone *zone;
1684 int __maybe_unused i;
1685 int new_leaks = 0;
1686
1687 jiffies_last_scan = jiffies;
1688
1689 /* prepare the kmemleak_object's */
1690 rcu_read_lock();
1691 list_for_each_entry_rcu(object, &object_list, object_list) {
1692 raw_spin_lock_irq(&object->lock);
1693 #ifdef DEBUG
1694 /*
1695 * With a few exceptions there should be a maximum of
1696 * 1 reference to any object at this point.
1697 */
1698 if (atomic_read(&object->use_count) > 1) {
1699 pr_debug("object->use_count = %d\n",
1700 atomic_read(&object->use_count));
1701 dump_object_info(object);
1702 }
1703 #endif
1704
1705 /* ignore objects outside lowmem (paint them black) */
1706 if ((object->flags & OBJECT_PHYS) &&
1707 !(object->flags & OBJECT_NO_SCAN)) {
1708 unsigned long phys = object->pointer;
1709
1710 if (PHYS_PFN(phys) < min_low_pfn ||
1711 PHYS_PFN(phys + object->size) > max_low_pfn)
1712 __paint_it(object, KMEMLEAK_BLACK);
1713 }
1714
1715 /* reset the reference count (whiten the object) */
1716 object->count = 0;
1717 if (color_gray(object) && get_object(object))
1718 list_add_tail(&object->gray_list, &gray_list);
1719
1720 raw_spin_unlock_irq(&object->lock);
1721
1722 if (need_resched())
1723 kmemleak_cond_resched(object);
1724 }
1725 rcu_read_unlock();
1726
1727 #ifdef CONFIG_SMP
1728 /* per-cpu sections scanning */
1729 for_each_possible_cpu(i)
1730 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1731 __per_cpu_end + per_cpu_offset(i));
1732 #endif
1733
1734 /*
1735 * Struct page scanning for each node.
1736 */
1737 get_online_mems();
1738 for_each_populated_zone(zone) {
1739 unsigned long start_pfn = zone->zone_start_pfn;
1740 unsigned long end_pfn = zone_end_pfn(zone);
1741 unsigned long pfn;
1742
1743 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1744 struct page *page = pfn_to_online_page(pfn);
1745
1746 if (!(pfn & 63))
1747 cond_resched();
1748
1749 if (!page)
1750 continue;
1751
1752 /* only scan pages belonging to this zone */
1753 if (page_zone(page) != zone)
1754 continue;
1755 /* only scan if page is in use */
1756 if (page_count(page) == 0)
1757 continue;
1758 scan_block(page, page + 1, NULL);
1759 }
1760 }
1761 put_online_mems();
1762
1763 /*
1764 * Scanning the task stacks (may introduce false negatives).
1765 */
1766 if (kmemleak_stack_scan) {
1767 struct task_struct *p, *g;
1768
1769 rcu_read_lock();
1770 for_each_process_thread(g, p) {
1771 void *stack = try_get_task_stack(p);
1772 if (stack) {
1773 scan_block(stack, stack + THREAD_SIZE, NULL);
1774 put_task_stack(p);
1775 }
1776 }
1777 rcu_read_unlock();
1778 }
1779
1780 /*
1781 * Scan the objects already referenced from the sections scanned
1782 * above.
1783 */
1784 scan_gray_list();
1785
1786 /*
1787 * Check for new or unreferenced objects modified since the previous
1788 * scan and color them gray until the next scan.
1789 */
1790 rcu_read_lock();
1791 list_for_each_entry_rcu(object, &object_list, object_list) {
1792 if (need_resched())
1793 kmemleak_cond_resched(object);
1794
1795 /*
1796 * This is racy but we can save the overhead of lock/unlock
1797 * calls. The missed objects, if any, should be caught in
1798 * the next scan.
1799 */
1800 if (!color_white(object))
1801 continue;
1802 raw_spin_lock_irq(&object->lock);
1803 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1804 && update_checksum(object) && get_object(object)) {
1805 /* color it gray temporarily */
1806 object->count = object->min_count;
1807 list_add_tail(&object->gray_list, &gray_list);
1808 }
1809 raw_spin_unlock_irq(&object->lock);
1810 }
1811 rcu_read_unlock();
1812
1813 /*
1814 * Re-scan the gray list for modified unreferenced objects.
1815 */
1816 scan_gray_list();
1817
1818 /*
1819 * If scanning was stopped do not report any new unreferenced objects.
1820 */
1821 if (scan_should_stop())
1822 return;
1823
1824 /*
1825 * Scanning result reporting.
1826 */
1827 rcu_read_lock();
1828 list_for_each_entry_rcu(object, &object_list, object_list) {
1829 if (need_resched())
1830 kmemleak_cond_resched(object);
1831
1832 /*
1833 * This is racy but we can save the overhead of lock/unlock
1834 * calls. The missed objects, if any, should be caught in
1835 * the next scan.
1836 */
1837 if (!color_white(object))
1838 continue;
1839 raw_spin_lock_irq(&object->lock);
1840 if (unreferenced_object(object) &&
1841 !(object->flags & OBJECT_REPORTED)) {
1842 object->flags |= OBJECT_REPORTED;
1843
1844 if (kmemleak_verbose)
1845 print_unreferenced(NULL, object);
1846
1847 new_leaks++;
1848 }
1849 raw_spin_unlock_irq(&object->lock);
1850 }
1851 rcu_read_unlock();
1852
1853 if (new_leaks) {
1854 kmemleak_found_leaks = true;
1855
1856 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1857 new_leaks);
1858 }
1859
1860 }
1861
1862 /*
1863 * Thread function performing automatic memory scanning. Unreferenced objects
1864 * at the end of a memory scan are reported but only the first time.
1865 */
kmemleak_scan_thread(void * arg)1866 static int kmemleak_scan_thread(void *arg)
1867 {
1868 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1869
1870 pr_info("Automatic memory scanning thread started\n");
1871 set_user_nice(current, 10);
1872
1873 /*
1874 * Wait before the first scan to allow the system to fully initialize.
1875 */
1876 if (first_run) {
1877 signed long timeout = secs_to_jiffies(SECS_FIRST_SCAN);
1878 first_run = 0;
1879 while (timeout && !kthread_should_stop())
1880 timeout = schedule_timeout_interruptible(timeout);
1881 }
1882
1883 while (!kthread_should_stop()) {
1884 signed long timeout = READ_ONCE(jiffies_scan_wait);
1885
1886 mutex_lock(&scan_mutex);
1887 kmemleak_scan();
1888 mutex_unlock(&scan_mutex);
1889
1890 /* wait before the next scan */
1891 while (timeout && !kthread_should_stop())
1892 timeout = schedule_timeout_interruptible(timeout);
1893 }
1894
1895 pr_info("Automatic memory scanning thread ended\n");
1896
1897 return 0;
1898 }
1899
1900 /*
1901 * Start the automatic memory scanning thread. This function must be called
1902 * with the scan_mutex held.
1903 */
start_scan_thread(void)1904 static void start_scan_thread(void)
1905 {
1906 if (scan_thread)
1907 return;
1908 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1909 if (IS_ERR(scan_thread)) {
1910 pr_warn("Failed to create the scan thread\n");
1911 scan_thread = NULL;
1912 }
1913 }
1914
1915 /*
1916 * Stop the automatic memory scanning thread.
1917 */
stop_scan_thread(void)1918 static void stop_scan_thread(void)
1919 {
1920 if (scan_thread) {
1921 kthread_stop(scan_thread);
1922 scan_thread = NULL;
1923 }
1924 }
1925
1926 /*
1927 * Iterate over the object_list and return the first valid object at or after
1928 * the required position with its use_count incremented. The function triggers
1929 * a memory scanning when the pos argument points to the first position.
1930 */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1931 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1932 {
1933 struct kmemleak_object *object;
1934 loff_t n = *pos;
1935 int err;
1936
1937 err = mutex_lock_interruptible(&scan_mutex);
1938 if (err < 0)
1939 return ERR_PTR(err);
1940
1941 rcu_read_lock();
1942 list_for_each_entry_rcu(object, &object_list, object_list) {
1943 if (n-- > 0)
1944 continue;
1945 if (get_object(object))
1946 goto out;
1947 }
1948 object = NULL;
1949 out:
1950 return object;
1951 }
1952
1953 /*
1954 * Return the next object in the object_list. The function decrements the
1955 * use_count of the previous object and increases that of the next one.
1956 */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1957 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1958 {
1959 struct kmemleak_object *prev_obj = v;
1960 struct kmemleak_object *next_obj = NULL;
1961 struct kmemleak_object *obj = prev_obj;
1962
1963 ++(*pos);
1964
1965 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1966 if (get_object(obj)) {
1967 next_obj = obj;
1968 break;
1969 }
1970 }
1971
1972 put_object(prev_obj);
1973 return next_obj;
1974 }
1975
1976 /*
1977 * Decrement the use_count of the last object required, if any.
1978 */
kmemleak_seq_stop(struct seq_file * seq,void * v)1979 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1980 {
1981 if (!IS_ERR(v)) {
1982 /*
1983 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1984 * waiting was interrupted, so only release it if !IS_ERR.
1985 */
1986 rcu_read_unlock();
1987 mutex_unlock(&scan_mutex);
1988 if (v)
1989 put_object(v);
1990 }
1991 }
1992
1993 /*
1994 * Print the information for an unreferenced object to the seq file.
1995 */
kmemleak_seq_show(struct seq_file * seq,void * v)1996 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1997 {
1998 struct kmemleak_object *object = v;
1999 unsigned long flags;
2000
2001 raw_spin_lock_irqsave(&object->lock, flags);
2002 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
2003 print_unreferenced(seq, object);
2004 raw_spin_unlock_irqrestore(&object->lock, flags);
2005 return 0;
2006 }
2007
2008 static const struct seq_operations kmemleak_seq_ops = {
2009 .start = kmemleak_seq_start,
2010 .next = kmemleak_seq_next,
2011 .stop = kmemleak_seq_stop,
2012 .show = kmemleak_seq_show,
2013 };
2014
kmemleak_open(struct inode * inode,struct file * file)2015 static int kmemleak_open(struct inode *inode, struct file *file)
2016 {
2017 return seq_open(file, &kmemleak_seq_ops);
2018 }
2019
__dump_str_object_info(unsigned long addr,unsigned int objflags)2020 static bool __dump_str_object_info(unsigned long addr, unsigned int objflags)
2021 {
2022 unsigned long flags;
2023 struct kmemleak_object *object;
2024
2025 object = __find_and_get_object(addr, 1, objflags);
2026 if (!object)
2027 return false;
2028
2029 raw_spin_lock_irqsave(&object->lock, flags);
2030 dump_object_info(object);
2031 raw_spin_unlock_irqrestore(&object->lock, flags);
2032
2033 put_object(object);
2034
2035 return true;
2036 }
2037
dump_str_object_info(const char * str)2038 static int dump_str_object_info(const char *str)
2039 {
2040 unsigned long addr;
2041 bool found = false;
2042
2043 if (kstrtoul(str, 0, &addr))
2044 return -EINVAL;
2045
2046 found |= __dump_str_object_info(addr, 0);
2047 found |= __dump_str_object_info(addr, OBJECT_PHYS);
2048 found |= __dump_str_object_info(addr, OBJECT_PERCPU);
2049
2050 if (!found) {
2051 pr_info("Unknown object at 0x%08lx\n", addr);
2052 return -EINVAL;
2053 }
2054
2055 return 0;
2056 }
2057
2058 /*
2059 * We use grey instead of black to ensure we can do future scans on the same
2060 * objects. If we did not do future scans these black objects could
2061 * potentially contain references to newly allocated objects in the future and
2062 * we'd end up with false positives.
2063 */
kmemleak_clear(void)2064 static void kmemleak_clear(void)
2065 {
2066 struct kmemleak_object *object;
2067
2068 rcu_read_lock();
2069 list_for_each_entry_rcu(object, &object_list, object_list) {
2070 raw_spin_lock_irq(&object->lock);
2071 if ((object->flags & OBJECT_REPORTED) &&
2072 unreferenced_object(object))
2073 __paint_it(object, KMEMLEAK_GREY);
2074 raw_spin_unlock_irq(&object->lock);
2075 }
2076 rcu_read_unlock();
2077
2078 kmemleak_found_leaks = false;
2079 }
2080
2081 static void __kmemleak_do_cleanup(void);
2082
2083 /*
2084 * File write operation to configure kmemleak at run-time. The following
2085 * commands can be written to the /sys/kernel/debug/kmemleak file:
2086 * off - disable kmemleak (irreversible)
2087 * stack=on - enable the task stacks scanning
2088 * stack=off - disable the tasks stacks scanning
2089 * scan=on - start the automatic memory scanning thread
2090 * scan=off - stop the automatic memory scanning thread
2091 * scan=... - set the automatic memory scanning period in seconds (0 to
2092 * disable it)
2093 * scan - trigger a memory scan
2094 * clear - mark all current reported unreferenced kmemleak objects as
2095 * grey to ignore printing them, or free all kmemleak objects
2096 * if kmemleak has been disabled.
2097 * dump=... - dump information about the object found at the given address
2098 */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)2099 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
2100 size_t size, loff_t *ppos)
2101 {
2102 char buf[64];
2103 int buf_size;
2104 int ret;
2105
2106 buf_size = min(size, (sizeof(buf) - 1));
2107 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2108 return -EFAULT;
2109 buf[buf_size] = 0;
2110
2111 ret = mutex_lock_interruptible(&scan_mutex);
2112 if (ret < 0)
2113 return ret;
2114
2115 if (strncmp(buf, "clear", 5) == 0) {
2116 if (kmemleak_enabled)
2117 kmemleak_clear();
2118 else
2119 __kmemleak_do_cleanup();
2120 goto out;
2121 }
2122
2123 if (!kmemleak_enabled) {
2124 ret = -EPERM;
2125 goto out;
2126 }
2127
2128 if (strncmp(buf, "off", 3) == 0)
2129 kmemleak_disable();
2130 else if (strncmp(buf, "stack=on", 8) == 0)
2131 kmemleak_stack_scan = 1;
2132 else if (strncmp(buf, "stack=off", 9) == 0)
2133 kmemleak_stack_scan = 0;
2134 else if (strncmp(buf, "scan=on", 7) == 0)
2135 start_scan_thread();
2136 else if (strncmp(buf, "scan=off", 8) == 0)
2137 stop_scan_thread();
2138 else if (strncmp(buf, "scan=", 5) == 0) {
2139 unsigned secs;
2140 unsigned long msecs;
2141
2142 ret = kstrtouint(buf + 5, 0, &secs);
2143 if (ret < 0)
2144 goto out;
2145
2146 msecs = secs * MSEC_PER_SEC;
2147 if (msecs > UINT_MAX)
2148 msecs = UINT_MAX;
2149
2150 stop_scan_thread();
2151 if (msecs) {
2152 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2153 start_scan_thread();
2154 }
2155 } else if (strncmp(buf, "scan", 4) == 0)
2156 kmemleak_scan();
2157 else if (strncmp(buf, "dump=", 5) == 0)
2158 ret = dump_str_object_info(buf + 5);
2159 else
2160 ret = -EINVAL;
2161
2162 out:
2163 mutex_unlock(&scan_mutex);
2164 if (ret < 0)
2165 return ret;
2166
2167 /* ignore the rest of the buffer, only one command at a time */
2168 *ppos += size;
2169 return size;
2170 }
2171
2172 static const struct file_operations kmemleak_fops = {
2173 .owner = THIS_MODULE,
2174 .open = kmemleak_open,
2175 .read = seq_read,
2176 .write = kmemleak_write,
2177 .llseek = seq_lseek,
2178 .release = seq_release,
2179 };
2180
__kmemleak_do_cleanup(void)2181 static void __kmemleak_do_cleanup(void)
2182 {
2183 struct kmemleak_object *object, *tmp;
2184
2185 /*
2186 * Kmemleak has already been disabled, no need for RCU list traversal
2187 * or kmemleak_lock held.
2188 */
2189 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2190 __remove_object(object);
2191 __delete_object(object);
2192 }
2193 }
2194
2195 /*
2196 * Stop the memory scanning thread and free the kmemleak internal objects if
2197 * no previous scan thread (otherwise, kmemleak may still have some useful
2198 * information on memory leaks).
2199 */
kmemleak_do_cleanup(struct work_struct * work)2200 static void kmemleak_do_cleanup(struct work_struct *work)
2201 {
2202 stop_scan_thread();
2203
2204 mutex_lock(&scan_mutex);
2205 /*
2206 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2207 * longer track object freeing. Ordering of the scan thread stopping and
2208 * the memory accesses below is guaranteed by the kthread_stop()
2209 * function.
2210 */
2211 kmemleak_free_enabled = 0;
2212 mutex_unlock(&scan_mutex);
2213
2214 if (!kmemleak_found_leaks)
2215 __kmemleak_do_cleanup();
2216 else
2217 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2218 }
2219
2220 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2221
2222 /*
2223 * Disable kmemleak. No memory allocation/freeing will be traced once this
2224 * function is called. Disabling kmemleak is an irreversible operation.
2225 */
kmemleak_disable(void)2226 static void kmemleak_disable(void)
2227 {
2228 /* atomically check whether it was already invoked */
2229 if (cmpxchg(&kmemleak_error, 0, 1))
2230 return;
2231
2232 /* stop any memory operation tracing */
2233 kmemleak_enabled = 0;
2234
2235 /* check whether it is too early for a kernel thread */
2236 if (kmemleak_late_initialized)
2237 schedule_work(&cleanup_work);
2238 else
2239 kmemleak_free_enabled = 0;
2240
2241 pr_info("Kernel memory leak detector disabled\n");
2242 }
2243
2244 /*
2245 * Allow boot-time kmemleak disabling (enabled by default).
2246 */
kmemleak_boot_config(char * str)2247 static int __init kmemleak_boot_config(char *str)
2248 {
2249 if (!str)
2250 return -EINVAL;
2251 if (strcmp(str, "off") == 0)
2252 kmemleak_disable();
2253 else if (strcmp(str, "on") == 0) {
2254 kmemleak_skip_disable = 1;
2255 stack_depot_request_early_init();
2256 }
2257 else
2258 return -EINVAL;
2259 return 0;
2260 }
2261 early_param("kmemleak", kmemleak_boot_config);
2262
2263 /*
2264 * Kmemleak initialization.
2265 */
kmemleak_init(void)2266 void __init kmemleak_init(void)
2267 {
2268 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2269 if (!kmemleak_skip_disable) {
2270 kmemleak_disable();
2271 return;
2272 }
2273 #endif
2274
2275 if (kmemleak_error)
2276 return;
2277
2278 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2279 jiffies_scan_wait = secs_to_jiffies(SECS_SCAN_WAIT);
2280
2281 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2282 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2283
2284 /* register the data/bss sections */
2285 create_object((unsigned long)_sdata, _edata - _sdata,
2286 KMEMLEAK_GREY, GFP_ATOMIC);
2287 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2288 KMEMLEAK_GREY, GFP_ATOMIC);
2289 /* only register .data..ro_after_init if not within .data */
2290 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2291 create_object((unsigned long)__start_ro_after_init,
2292 __end_ro_after_init - __start_ro_after_init,
2293 KMEMLEAK_GREY, GFP_ATOMIC);
2294 }
2295
2296 /*
2297 * Late initialization function.
2298 */
kmemleak_late_init(void)2299 static int __init kmemleak_late_init(void)
2300 {
2301 kmemleak_late_initialized = 1;
2302
2303 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2304
2305 if (kmemleak_error) {
2306 /*
2307 * Some error occurred and kmemleak was disabled. There is a
2308 * small chance that kmemleak_disable() was called immediately
2309 * after setting kmemleak_late_initialized and we may end up with
2310 * two clean-up threads but serialized by scan_mutex.
2311 */
2312 schedule_work(&cleanup_work);
2313 return -ENOMEM;
2314 }
2315
2316 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2317 mutex_lock(&scan_mutex);
2318 start_scan_thread();
2319 mutex_unlock(&scan_mutex);
2320 }
2321
2322 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2323 mem_pool_free_count);
2324
2325 return 0;
2326 }
2327 late_initcall(kmemleak_late_init);
2328