1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic infrastructure for lifetime debugging of objects.
4 *
5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
6 */
7
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9
10 #include <linux/cpu.h>
11 #include <linux/debugobjects.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/kmemleak.h>
15 #include <linux/sched.h>
16 #include <linux/sched/loadavg.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/static_key.h>
21
22 #define ODEBUG_HASH_BITS 14
23 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
24
25 /* Must be power of two */
26 #define ODEBUG_BATCH_SIZE 16
27
28 /* Initial values. Must all be a multiple of batch size */
29 #define ODEBUG_POOL_SIZE (64 * ODEBUG_BATCH_SIZE)
30 #define ODEBUG_POOL_MIN_LEVEL (ODEBUG_POOL_SIZE / 4)
31
32 #define ODEBUG_POOL_PERCPU_SIZE (8 * ODEBUG_BATCH_SIZE)
33
34 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
35 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
36 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
37
38 /*
39 * We limit the freeing of debug objects via workqueue at a maximum
40 * frequency of 10Hz and about 1024 objects for each freeing operation.
41 * So it is freeing at most 10k debug objects per second.
42 */
43 #define ODEBUG_FREE_WORK_MAX (1024 / ODEBUG_BATCH_SIZE)
44 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
45
46 struct debug_bucket {
47 struct hlist_head list;
48 raw_spinlock_t lock;
49 };
50
51 struct pool_stats {
52 unsigned int cur_used;
53 unsigned int max_used;
54 unsigned int min_fill;
55 };
56
57 struct obj_pool {
58 struct hlist_head objects;
59 unsigned int cnt;
60 unsigned int min_cnt;
61 unsigned int max_cnt;
62 struct pool_stats stats;
63 } ____cacheline_aligned;
64
65
66 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu) = {
67 .max_cnt = ODEBUG_POOL_PERCPU_SIZE,
68 };
69
70 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
71
72 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
73
74 static DEFINE_RAW_SPINLOCK(pool_lock);
75
76 static struct obj_pool pool_global = {
77 .min_cnt = ODEBUG_POOL_MIN_LEVEL,
78 .max_cnt = ODEBUG_POOL_SIZE,
79 .stats = {
80 .min_fill = ODEBUG_POOL_SIZE,
81 },
82 };
83
84 static struct obj_pool pool_to_free = {
85 .max_cnt = UINT_MAX,
86 };
87
88 static HLIST_HEAD(pool_boot);
89
90 static unsigned long avg_usage;
91 static bool obj_freeing;
92
93 static int __data_racy debug_objects_maxchain __read_mostly;
94 static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly;
95 static int __data_racy debug_objects_fixups __read_mostly;
96 static int __data_racy debug_objects_warnings __read_mostly;
97 static bool __data_racy debug_objects_enabled __read_mostly
98 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
99
100 static const struct debug_obj_descr *descr_test __read_mostly;
101 static struct kmem_cache *obj_cache __ro_after_init;
102
103 /*
104 * Track numbers of kmem_cache_alloc()/free() calls done.
105 */
106 static int __data_racy debug_objects_allocated;
107 static int __data_racy debug_objects_freed;
108
109 static void free_obj_work(struct work_struct *work);
110 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
111
112 static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled);
113
enable_object_debug(char * str)114 static int __init enable_object_debug(char *str)
115 {
116 debug_objects_enabled = true;
117 return 0;
118 }
119 early_param("debug_objects", enable_object_debug);
120
disable_object_debug(char * str)121 static int __init disable_object_debug(char *str)
122 {
123 debug_objects_enabled = false;
124 return 0;
125 }
126 early_param("no_debug_objects", disable_object_debug);
127
128 static const char *obj_states[ODEBUG_STATE_MAX] = {
129 [ODEBUG_STATE_NONE] = "none",
130 [ODEBUG_STATE_INIT] = "initialized",
131 [ODEBUG_STATE_INACTIVE] = "inactive",
132 [ODEBUG_STATE_ACTIVE] = "active",
133 [ODEBUG_STATE_DESTROYED] = "destroyed",
134 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
135 };
136
pool_count(struct obj_pool * pool)137 static __always_inline unsigned int pool_count(struct obj_pool *pool)
138 {
139 return READ_ONCE(pool->cnt);
140 }
141
pool_should_refill(struct obj_pool * pool)142 static __always_inline bool pool_should_refill(struct obj_pool *pool)
143 {
144 return pool_count(pool) < pool->min_cnt;
145 }
146
pool_must_refill(struct obj_pool * pool)147 static __always_inline bool pool_must_refill(struct obj_pool *pool)
148 {
149 return pool_count(pool) < pool->min_cnt / 2;
150 }
151
pool_move_batch(struct obj_pool * dst,struct obj_pool * src)152 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
153 {
154 struct hlist_node *last, *next_batch, *first_batch;
155 struct debug_obj *obj;
156
157 if (dst->cnt >= dst->max_cnt || !src->cnt)
158 return false;
159
160 first_batch = src->objects.first;
161 obj = hlist_entry(first_batch, typeof(*obj), node);
162 last = obj->batch_last;
163 next_batch = last->next;
164
165 /* Move the next batch to the front of the source pool */
166 src->objects.first = next_batch;
167 if (next_batch)
168 next_batch->pprev = &src->objects.first;
169
170 /* Add the extracted batch to the destination pool */
171 last->next = dst->objects.first;
172 if (last->next)
173 last->next->pprev = &last->next;
174 first_batch->pprev = &dst->objects.first;
175 dst->objects.first = first_batch;
176
177 WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
178 WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
179 return true;
180 }
181
pool_push_batch(struct obj_pool * dst,struct hlist_head * head)182 static bool pool_push_batch(struct obj_pool *dst, struct hlist_head *head)
183 {
184 struct hlist_node *last;
185 struct debug_obj *obj;
186
187 if (dst->cnt >= dst->max_cnt)
188 return false;
189
190 obj = hlist_entry(head->first, typeof(*obj), node);
191 last = obj->batch_last;
192
193 hlist_splice_init(head, last, &dst->objects);
194 WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
195 return true;
196 }
197
pool_pop_batch(struct hlist_head * head,struct obj_pool * src)198 static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
199 {
200 struct hlist_node *last, *next;
201 struct debug_obj *obj;
202
203 if (!src->cnt)
204 return false;
205
206 /* Move the complete list to the head */
207 hlist_move_list(&src->objects, head);
208
209 obj = hlist_entry(head->first, typeof(*obj), node);
210 last = obj->batch_last;
211 next = last->next;
212 /* Disconnect the batch from the list */
213 last->next = NULL;
214
215 /* Move the node after last back to the source pool. */
216 src->objects.first = next;
217 if (next)
218 next->pprev = &src->objects.first;
219
220 WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
221 return true;
222 }
223
__alloc_object(struct hlist_head * list)224 static struct debug_obj *__alloc_object(struct hlist_head *list)
225 {
226 struct debug_obj *obj;
227
228 if (unlikely(!list->first))
229 return NULL;
230
231 obj = hlist_entry(list->first, typeof(*obj), node);
232 hlist_del(&obj->node);
233 return obj;
234 }
235
pcpu_refill_stats(void)236 static void pcpu_refill_stats(void)
237 {
238 struct pool_stats *stats = &pool_global.stats;
239
240 WRITE_ONCE(stats->cur_used, stats->cur_used + ODEBUG_BATCH_SIZE);
241
242 if (stats->cur_used > stats->max_used)
243 stats->max_used = stats->cur_used;
244
245 if (pool_global.cnt < stats->min_fill)
246 stats->min_fill = pool_global.cnt;
247 }
248
pcpu_alloc(void)249 static struct debug_obj *pcpu_alloc(void)
250 {
251 struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
252
253 lockdep_assert_irqs_disabled();
254
255 for (;;) {
256 struct debug_obj *obj = __alloc_object(&pcp->objects);
257
258 if (likely(obj)) {
259 pcp->cnt--;
260 /*
261 * If this emptied a batch try to refill from the
262 * free pool. Don't do that if this was the top-most
263 * batch as pcpu_free() expects the per CPU pool
264 * to be less than ODEBUG_POOL_PERCPU_SIZE.
265 */
266 if (unlikely(pcp->cnt < (ODEBUG_POOL_PERCPU_SIZE - ODEBUG_BATCH_SIZE) &&
267 !(pcp->cnt % ODEBUG_BATCH_SIZE))) {
268 /*
269 * Don't try to allocate from the regular pool here
270 * to not exhaust it prematurely.
271 */
272 if (pool_count(&pool_to_free)) {
273 guard(raw_spinlock)(&pool_lock);
274 pool_move_batch(pcp, &pool_to_free);
275 pcpu_refill_stats();
276 }
277 }
278 return obj;
279 }
280
281 guard(raw_spinlock)(&pool_lock);
282 if (!pool_move_batch(pcp, &pool_to_free)) {
283 if (!pool_move_batch(pcp, &pool_global))
284 return NULL;
285 }
286 pcpu_refill_stats();
287 }
288 }
289
pcpu_free(struct debug_obj * obj)290 static void pcpu_free(struct debug_obj *obj)
291 {
292 struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
293 struct debug_obj *first;
294
295 lockdep_assert_irqs_disabled();
296
297 if (!(pcp->cnt % ODEBUG_BATCH_SIZE)) {
298 obj->batch_last = &obj->node;
299 } else {
300 first = hlist_entry(pcp->objects.first, typeof(*first), node);
301 obj->batch_last = first->batch_last;
302 }
303 hlist_add_head(&obj->node, &pcp->objects);
304 pcp->cnt++;
305
306 /* Pool full ? */
307 if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
308 return;
309
310 /* Remove a batch from the per CPU pool */
311 guard(raw_spinlock)(&pool_lock);
312 /* Try to fit the batch into the pool_global first */
313 if (!pool_move_batch(&pool_global, pcp))
314 pool_move_batch(&pool_to_free, pcp);
315 WRITE_ONCE(pool_global.stats.cur_used, pool_global.stats.cur_used - ODEBUG_BATCH_SIZE);
316 }
317
free_object_list(struct hlist_head * head)318 static void free_object_list(struct hlist_head *head)
319 {
320 struct hlist_node *tmp;
321 struct debug_obj *obj;
322 int cnt = 0;
323
324 hlist_for_each_entry_safe(obj, tmp, head, node) {
325 hlist_del(&obj->node);
326 kmem_cache_free(obj_cache, obj);
327 cnt++;
328 }
329 debug_objects_freed += cnt;
330 }
331
fill_pool_from_freelist(void)332 static void fill_pool_from_freelist(void)
333 {
334 static unsigned long state;
335
336 /*
337 * Reuse objs from the global obj_to_free list; they will be
338 * reinitialized when allocating.
339 */
340 if (!pool_count(&pool_to_free))
341 return;
342
343 /*
344 * Prevent the context from being scheduled or interrupted after
345 * setting the state flag;
346 */
347 guard(irqsave)();
348
349 /*
350 * Avoid lock contention on &pool_lock and avoid making the cache
351 * line exclusive by testing the bit before attempting to set it.
352 */
353 if (test_bit(0, &state) || test_and_set_bit(0, &state))
354 return;
355
356 /* Avoid taking the lock when there is no work to do */
357 while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
358 guard(raw_spinlock)(&pool_lock);
359 /* Move a batch if possible */
360 pool_move_batch(&pool_global, &pool_to_free);
361 }
362 clear_bit(0, &state);
363 }
364
kmem_alloc_batch(struct hlist_head * head,struct kmem_cache * cache,gfp_t gfp)365 static bool kmem_alloc_batch(struct hlist_head *head, struct kmem_cache *cache, gfp_t gfp)
366 {
367 struct hlist_node *last = NULL;
368 struct debug_obj *obj;
369
370 for (int cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
371 obj = kmem_cache_zalloc(cache, gfp);
372 if (!obj) {
373 free_object_list(head);
374 return false;
375 }
376 debug_objects_allocated++;
377
378 if (!last)
379 last = &obj->node;
380 obj->batch_last = last;
381
382 hlist_add_head(&obj->node, head);
383 }
384 return true;
385 }
386
fill_pool(void)387 static void fill_pool(void)
388 {
389 static atomic_t cpus_allocating;
390
391 /*
392 * Avoid allocation and lock contention when:
393 * - One other CPU is already allocating
394 * - the global pool has not reached the critical level yet
395 */
396 if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
397 return;
398
399 atomic_inc(&cpus_allocating);
400 while (pool_should_refill(&pool_global)) {
401 HLIST_HEAD(head);
402
403 if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
404 break;
405
406 guard(raw_spinlock_irqsave)(&pool_lock);
407 if (!pool_push_batch(&pool_global, &head))
408 pool_push_batch(&pool_to_free, &head);
409 }
410 atomic_dec(&cpus_allocating);
411 }
412
413 /*
414 * Lookup an object in the hash bucket.
415 */
lookup_object(void * addr,struct debug_bucket * b)416 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
417 {
418 struct debug_obj *obj;
419 int cnt = 0;
420
421 hlist_for_each_entry(obj, &b->list, node) {
422 cnt++;
423 if (obj->object == addr)
424 return obj;
425 }
426 if (cnt > debug_objects_maxchain)
427 debug_objects_maxchain = cnt;
428
429 return NULL;
430 }
431
calc_usage(void)432 static void calc_usage(void)
433 {
434 static DEFINE_RAW_SPINLOCK(avg_lock);
435 static unsigned long avg_period;
436 unsigned long cur, now = jiffies;
437
438 if (!time_after_eq(now, READ_ONCE(avg_period)))
439 return;
440
441 if (!raw_spin_trylock(&avg_lock))
442 return;
443
444 WRITE_ONCE(avg_period, now + msecs_to_jiffies(10));
445 cur = READ_ONCE(pool_global.stats.cur_used) * ODEBUG_FREE_WORK_MAX;
446 WRITE_ONCE(avg_usage, calc_load(avg_usage, EXP_5, cur));
447 raw_spin_unlock(&avg_lock);
448 }
449
alloc_object(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr)450 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
451 const struct debug_obj_descr *descr)
452 {
453 struct debug_obj *obj;
454
455 calc_usage();
456
457 if (static_branch_likely(&obj_cache_enabled))
458 obj = pcpu_alloc();
459 else
460 obj = __alloc_object(&pool_boot);
461
462 if (likely(obj)) {
463 obj->object = addr;
464 obj->descr = descr;
465 obj->state = ODEBUG_STATE_NONE;
466 obj->astate = 0;
467 hlist_add_head(&obj->node, &b->list);
468 }
469 return obj;
470 }
471
472 /* workqueue function to free objects. */
free_obj_work(struct work_struct * work)473 static void free_obj_work(struct work_struct *work)
474 {
475 static unsigned long last_use_avg;
476 unsigned long cur_used, last_used, delta;
477 unsigned int max_free = 0;
478
479 WRITE_ONCE(obj_freeing, false);
480
481 /* Rate limit freeing based on current use average */
482 cur_used = READ_ONCE(avg_usage);
483 last_used = last_use_avg;
484 last_use_avg = cur_used;
485
486 if (!pool_count(&pool_to_free))
487 return;
488
489 if (cur_used <= last_used) {
490 delta = (last_used - cur_used) / ODEBUG_FREE_WORK_MAX;
491 max_free = min(delta, ODEBUG_FREE_WORK_MAX);
492 }
493
494 for (int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
495 HLIST_HEAD(tofree);
496
497 /* Acquire and drop the lock for each batch */
498 scoped_guard(raw_spinlock_irqsave, &pool_lock) {
499 if (!pool_to_free.cnt)
500 return;
501
502 /* Refill the global pool if possible */
503 if (pool_move_batch(&pool_global, &pool_to_free)) {
504 /* Don't free as there seems to be demand */
505 max_free = 0;
506 } else if (max_free) {
507 pool_pop_batch(&tofree, &pool_to_free);
508 max_free--;
509 } else {
510 return;
511 }
512 }
513 free_object_list(&tofree);
514 }
515 }
516
__free_object(struct debug_obj * obj)517 static void __free_object(struct debug_obj *obj)
518 {
519 guard(irqsave)();
520 if (static_branch_likely(&obj_cache_enabled))
521 pcpu_free(obj);
522 else
523 hlist_add_head(&obj->node, &pool_boot);
524 }
525
526 /*
527 * Put the object back into the pool and schedule work to free objects
528 * if necessary.
529 */
free_object(struct debug_obj * obj)530 static void free_object(struct debug_obj *obj)
531 {
532 __free_object(obj);
533 if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
534 WRITE_ONCE(obj_freeing, true);
535 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
536 }
537 }
538
put_objects(struct hlist_head * list)539 static void put_objects(struct hlist_head *list)
540 {
541 struct hlist_node *tmp;
542 struct debug_obj *obj;
543
544 /*
545 * Using free_object() puts the objects into reuse or schedules
546 * them for freeing and it get's all the accounting correct.
547 */
548 hlist_for_each_entry_safe(obj, tmp, list, node) {
549 hlist_del(&obj->node);
550 free_object(obj);
551 }
552 }
553
554 #ifdef CONFIG_HOTPLUG_CPU
object_cpu_offline(unsigned int cpu)555 static int object_cpu_offline(unsigned int cpu)
556 {
557 /* Remote access is safe as the CPU is dead already */
558 struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
559
560 put_objects(&pcp->objects);
561 pcp->cnt = 0;
562 return 0;
563 }
564 #endif
565
566 /* Out of memory. Free all objects from hash */
debug_objects_oom(void)567 static void debug_objects_oom(void)
568 {
569 struct debug_bucket *db = obj_hash;
570 HLIST_HEAD(freelist);
571
572 pr_warn("Out of memory. ODEBUG disabled\n");
573
574 for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
575 scoped_guard(raw_spinlock_irqsave, &db->lock)
576 hlist_move_list(&db->list, &freelist);
577
578 put_objects(&freelist);
579 }
580 }
581
582 /*
583 * We use the pfn of the address for the hash. That way we can check
584 * for freed objects simply by checking the affected bucket.
585 */
get_bucket(unsigned long addr)586 static struct debug_bucket *get_bucket(unsigned long addr)
587 {
588 unsigned long hash;
589
590 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
591 return &obj_hash[hash];
592 }
593
debug_print_object(struct debug_obj * obj,char * msg)594 static void debug_print_object(struct debug_obj *obj, char *msg)
595 {
596 const struct debug_obj_descr *descr = obj->descr;
597 static int limit;
598
599 /*
600 * Don't report if lookup_object_or_alloc() by the current thread
601 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
602 * concurrent thread turned off debug_objects_enabled and cleared
603 * the hash buckets.
604 */
605 if (!debug_objects_enabled)
606 return;
607
608 if (limit < 5 && descr != descr_test) {
609 void *hint = descr->debug_hint ?
610 descr->debug_hint(obj->object) : NULL;
611 limit++;
612 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
613 "object: %p object type: %s hint: %pS\n",
614 msg, obj_states[obj->state], obj->astate,
615 obj->object, descr->name, hint);
616 }
617 debug_objects_warnings++;
618 }
619
620 /*
621 * Try to repair the damage, so we have a better chance to get useful
622 * debug output.
623 */
624 static bool
debug_object_fixup(bool (* fixup)(void * addr,enum debug_obj_state state),void * addr,enum debug_obj_state state)625 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
626 void * addr, enum debug_obj_state state)
627 {
628 if (fixup && fixup(addr, state)) {
629 debug_objects_fixups++;
630 return true;
631 }
632 return false;
633 }
634
debug_object_is_on_stack(void * addr,int onstack)635 static void debug_object_is_on_stack(void *addr, int onstack)
636 {
637 int is_on_stack;
638 static int limit;
639
640 if (limit > 4)
641 return;
642
643 is_on_stack = object_is_on_stack(addr);
644 if (is_on_stack == onstack)
645 return;
646
647 limit++;
648 if (is_on_stack)
649 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
650 task_stack_page(current));
651 else
652 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
653 task_stack_page(current));
654
655 WARN_ON(1);
656 }
657
lookup_object_or_alloc(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr,bool onstack,bool alloc_ifstatic)658 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
659 const struct debug_obj_descr *descr,
660 bool onstack, bool alloc_ifstatic)
661 {
662 struct debug_obj *obj = lookup_object(addr, b);
663 enum debug_obj_state state = ODEBUG_STATE_NONE;
664
665 if (likely(obj))
666 return obj;
667
668 /*
669 * debug_object_init() unconditionally allocates untracked
670 * objects. It does not matter whether it is a static object or
671 * not.
672 *
673 * debug_object_assert_init() and debug_object_activate() allow
674 * allocation only if the descriptor callback confirms that the
675 * object is static and considered initialized. For non-static
676 * objects the allocation needs to be done from the fixup callback.
677 */
678 if (unlikely(alloc_ifstatic)) {
679 if (!descr->is_static_object || !descr->is_static_object(addr))
680 return ERR_PTR(-ENOENT);
681 /* Statically allocated objects are considered initialized */
682 state = ODEBUG_STATE_INIT;
683 }
684
685 obj = alloc_object(addr, b, descr);
686 if (likely(obj)) {
687 obj->state = state;
688 debug_object_is_on_stack(addr, onstack);
689 return obj;
690 }
691
692 /* Out of memory. Do the cleanup outside of the locked region */
693 debug_objects_enabled = false;
694 return NULL;
695 }
696
debug_objects_fill_pool(void)697 static void debug_objects_fill_pool(void)
698 {
699 if (!static_branch_likely(&obj_cache_enabled))
700 return;
701
702 if (likely(!pool_should_refill(&pool_global)))
703 return;
704
705 /* Try reusing objects from obj_to_free_list */
706 fill_pool_from_freelist();
707
708 if (likely(!pool_should_refill(&pool_global)))
709 return;
710
711 /*
712 * On RT enabled kernels the pool refill must happen in preemptible
713 * context -- for !RT kernels we rely on the fact that spinlock_t and
714 * raw_spinlock_t are basically the same type and this lock-type
715 * inversion works just fine.
716 */
717 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
718 /*
719 * Annotate away the spinlock_t inside raw_spinlock_t warning
720 * by temporarily raising the wait-type to WAIT_SLEEP, matching
721 * the preemptible() condition above.
722 */
723 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
724 lock_map_acquire_try(&fill_pool_map);
725 fill_pool();
726 lock_map_release(&fill_pool_map);
727 }
728 }
729
730 static void
__debug_object_init(void * addr,const struct debug_obj_descr * descr,int onstack)731 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
732 {
733 struct debug_obj *obj, o;
734 struct debug_bucket *db;
735 unsigned long flags;
736
737 debug_objects_fill_pool();
738
739 db = get_bucket((unsigned long) addr);
740
741 raw_spin_lock_irqsave(&db->lock, flags);
742
743 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
744 if (unlikely(!obj)) {
745 raw_spin_unlock_irqrestore(&db->lock, flags);
746 debug_objects_oom();
747 return;
748 }
749
750 switch (obj->state) {
751 case ODEBUG_STATE_NONE:
752 case ODEBUG_STATE_INIT:
753 case ODEBUG_STATE_INACTIVE:
754 obj->state = ODEBUG_STATE_INIT;
755 raw_spin_unlock_irqrestore(&db->lock, flags);
756 return;
757 default:
758 break;
759 }
760
761 o = *obj;
762 raw_spin_unlock_irqrestore(&db->lock, flags);
763 debug_print_object(&o, "init");
764
765 if (o.state == ODEBUG_STATE_ACTIVE)
766 debug_object_fixup(descr->fixup_init, addr, o.state);
767 }
768
769 /**
770 * debug_object_init - debug checks when an object is initialized
771 * @addr: address of the object
772 * @descr: pointer to an object specific debug description structure
773 */
debug_object_init(void * addr,const struct debug_obj_descr * descr)774 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
775 {
776 if (!debug_objects_enabled)
777 return;
778
779 __debug_object_init(addr, descr, 0);
780 }
781 EXPORT_SYMBOL_GPL(debug_object_init);
782
783 /**
784 * debug_object_init_on_stack - debug checks when an object on stack is
785 * initialized
786 * @addr: address of the object
787 * @descr: pointer to an object specific debug description structure
788 */
debug_object_init_on_stack(void * addr,const struct debug_obj_descr * descr)789 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
790 {
791 if (!debug_objects_enabled)
792 return;
793
794 __debug_object_init(addr, descr, 1);
795 }
796 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
797
798 /**
799 * debug_object_activate - debug checks when an object is activated
800 * @addr: address of the object
801 * @descr: pointer to an object specific debug description structure
802 * Returns 0 for success, -EINVAL for check failed.
803 */
debug_object_activate(void * addr,const struct debug_obj_descr * descr)804 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
805 {
806 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
807 struct debug_bucket *db;
808 struct debug_obj *obj;
809 unsigned long flags;
810
811 if (!debug_objects_enabled)
812 return 0;
813
814 debug_objects_fill_pool();
815
816 db = get_bucket((unsigned long) addr);
817
818 raw_spin_lock_irqsave(&db->lock, flags);
819
820 obj = lookup_object_or_alloc(addr, db, descr, false, true);
821 if (unlikely(!obj)) {
822 raw_spin_unlock_irqrestore(&db->lock, flags);
823 debug_objects_oom();
824 return 0;
825 } else if (likely(!IS_ERR(obj))) {
826 switch (obj->state) {
827 case ODEBUG_STATE_ACTIVE:
828 case ODEBUG_STATE_DESTROYED:
829 o = *obj;
830 break;
831 case ODEBUG_STATE_INIT:
832 case ODEBUG_STATE_INACTIVE:
833 obj->state = ODEBUG_STATE_ACTIVE;
834 fallthrough;
835 default:
836 raw_spin_unlock_irqrestore(&db->lock, flags);
837 return 0;
838 }
839 }
840
841 raw_spin_unlock_irqrestore(&db->lock, flags);
842 debug_print_object(&o, "activate");
843
844 switch (o.state) {
845 case ODEBUG_STATE_ACTIVE:
846 case ODEBUG_STATE_NOTAVAILABLE:
847 if (debug_object_fixup(descr->fixup_activate, addr, o.state))
848 return 0;
849 fallthrough;
850 default:
851 return -EINVAL;
852 }
853 }
854 EXPORT_SYMBOL_GPL(debug_object_activate);
855
856 /**
857 * debug_object_deactivate - debug checks when an object is deactivated
858 * @addr: address of the object
859 * @descr: pointer to an object specific debug description structure
860 */
debug_object_deactivate(void * addr,const struct debug_obj_descr * descr)861 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
862 {
863 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
864 struct debug_bucket *db;
865 struct debug_obj *obj;
866 unsigned long flags;
867
868 if (!debug_objects_enabled)
869 return;
870
871 db = get_bucket((unsigned long) addr);
872
873 raw_spin_lock_irqsave(&db->lock, flags);
874
875 obj = lookup_object(addr, db);
876 if (obj) {
877 switch (obj->state) {
878 case ODEBUG_STATE_DESTROYED:
879 break;
880 case ODEBUG_STATE_INIT:
881 case ODEBUG_STATE_INACTIVE:
882 case ODEBUG_STATE_ACTIVE:
883 if (obj->astate)
884 break;
885 obj->state = ODEBUG_STATE_INACTIVE;
886 fallthrough;
887 default:
888 raw_spin_unlock_irqrestore(&db->lock, flags);
889 return;
890 }
891 o = *obj;
892 }
893
894 raw_spin_unlock_irqrestore(&db->lock, flags);
895 debug_print_object(&o, "deactivate");
896 }
897 EXPORT_SYMBOL_GPL(debug_object_deactivate);
898
899 /**
900 * debug_object_destroy - debug checks when an object is destroyed
901 * @addr: address of the object
902 * @descr: pointer to an object specific debug description structure
903 */
debug_object_destroy(void * addr,const struct debug_obj_descr * descr)904 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
905 {
906 struct debug_obj *obj, o;
907 struct debug_bucket *db;
908 unsigned long flags;
909
910 if (!debug_objects_enabled)
911 return;
912
913 db = get_bucket((unsigned long) addr);
914
915 raw_spin_lock_irqsave(&db->lock, flags);
916
917 obj = lookup_object(addr, db);
918 if (!obj) {
919 raw_spin_unlock_irqrestore(&db->lock, flags);
920 return;
921 }
922
923 switch (obj->state) {
924 case ODEBUG_STATE_ACTIVE:
925 case ODEBUG_STATE_DESTROYED:
926 break;
927 case ODEBUG_STATE_NONE:
928 case ODEBUG_STATE_INIT:
929 case ODEBUG_STATE_INACTIVE:
930 obj->state = ODEBUG_STATE_DESTROYED;
931 fallthrough;
932 default:
933 raw_spin_unlock_irqrestore(&db->lock, flags);
934 return;
935 }
936
937 o = *obj;
938 raw_spin_unlock_irqrestore(&db->lock, flags);
939 debug_print_object(&o, "destroy");
940
941 if (o.state == ODEBUG_STATE_ACTIVE)
942 debug_object_fixup(descr->fixup_destroy, addr, o.state);
943 }
944 EXPORT_SYMBOL_GPL(debug_object_destroy);
945
946 /**
947 * debug_object_free - debug checks when an object is freed
948 * @addr: address of the object
949 * @descr: pointer to an object specific debug description structure
950 */
debug_object_free(void * addr,const struct debug_obj_descr * descr)951 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
952 {
953 struct debug_obj *obj, o;
954 struct debug_bucket *db;
955 unsigned long flags;
956
957 if (!debug_objects_enabled)
958 return;
959
960 db = get_bucket((unsigned long) addr);
961
962 raw_spin_lock_irqsave(&db->lock, flags);
963
964 obj = lookup_object(addr, db);
965 if (!obj) {
966 raw_spin_unlock_irqrestore(&db->lock, flags);
967 return;
968 }
969
970 switch (obj->state) {
971 case ODEBUG_STATE_ACTIVE:
972 break;
973 default:
974 hlist_del(&obj->node);
975 raw_spin_unlock_irqrestore(&db->lock, flags);
976 free_object(obj);
977 return;
978 }
979
980 o = *obj;
981 raw_spin_unlock_irqrestore(&db->lock, flags);
982 debug_print_object(&o, "free");
983
984 debug_object_fixup(descr->fixup_free, addr, o.state);
985 }
986 EXPORT_SYMBOL_GPL(debug_object_free);
987
988 /**
989 * debug_object_assert_init - debug checks when object should be init-ed
990 * @addr: address of the object
991 * @descr: pointer to an object specific debug description structure
992 */
debug_object_assert_init(void * addr,const struct debug_obj_descr * descr)993 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
994 {
995 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
996 struct debug_bucket *db;
997 struct debug_obj *obj;
998 unsigned long flags;
999
1000 if (!debug_objects_enabled)
1001 return;
1002
1003 debug_objects_fill_pool();
1004
1005 db = get_bucket((unsigned long) addr);
1006
1007 raw_spin_lock_irqsave(&db->lock, flags);
1008 obj = lookup_object_or_alloc(addr, db, descr, false, true);
1009 raw_spin_unlock_irqrestore(&db->lock, flags);
1010 if (likely(!IS_ERR_OR_NULL(obj)))
1011 return;
1012
1013 /* If NULL the allocation has hit OOM */
1014 if (!obj) {
1015 debug_objects_oom();
1016 return;
1017 }
1018
1019 /* Object is neither tracked nor static. It's not initialized. */
1020 debug_print_object(&o, "assert_init");
1021 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
1022 }
1023 EXPORT_SYMBOL_GPL(debug_object_assert_init);
1024
1025 /**
1026 * debug_object_active_state - debug checks object usage state machine
1027 * @addr: address of the object
1028 * @descr: pointer to an object specific debug description structure
1029 * @expect: expected state
1030 * @next: state to move to if expected state is found
1031 */
1032 void
debug_object_active_state(void * addr,const struct debug_obj_descr * descr,unsigned int expect,unsigned int next)1033 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
1034 unsigned int expect, unsigned int next)
1035 {
1036 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
1037 struct debug_bucket *db;
1038 struct debug_obj *obj;
1039 unsigned long flags;
1040
1041 if (!debug_objects_enabled)
1042 return;
1043
1044 db = get_bucket((unsigned long) addr);
1045
1046 raw_spin_lock_irqsave(&db->lock, flags);
1047
1048 obj = lookup_object(addr, db);
1049 if (obj) {
1050 switch (obj->state) {
1051 case ODEBUG_STATE_ACTIVE:
1052 if (obj->astate != expect)
1053 break;
1054 obj->astate = next;
1055 raw_spin_unlock_irqrestore(&db->lock, flags);
1056 return;
1057 default:
1058 break;
1059 }
1060 o = *obj;
1061 }
1062
1063 raw_spin_unlock_irqrestore(&db->lock, flags);
1064 debug_print_object(&o, "active_state");
1065 }
1066 EXPORT_SYMBOL_GPL(debug_object_active_state);
1067
1068 #ifdef CONFIG_DEBUG_OBJECTS_FREE
__debug_check_no_obj_freed(const void * address,unsigned long size)1069 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1070 {
1071 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1072 int cnt, objs_checked = 0;
1073 struct debug_obj *obj, o;
1074 struct debug_bucket *db;
1075 struct hlist_node *tmp;
1076
1077 saddr = (unsigned long) address;
1078 eaddr = saddr + size;
1079 paddr = saddr & ODEBUG_CHUNK_MASK;
1080 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1081 chunks >>= ODEBUG_CHUNK_SHIFT;
1082
1083 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1084 db = get_bucket(paddr);
1085
1086 repeat:
1087 cnt = 0;
1088 raw_spin_lock_irqsave(&db->lock, flags);
1089 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1090 cnt++;
1091 oaddr = (unsigned long) obj->object;
1092 if (oaddr < saddr || oaddr >= eaddr)
1093 continue;
1094
1095 switch (obj->state) {
1096 case ODEBUG_STATE_ACTIVE:
1097 o = *obj;
1098 raw_spin_unlock_irqrestore(&db->lock, flags);
1099 debug_print_object(&o, "free");
1100 debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1101 goto repeat;
1102 default:
1103 hlist_del(&obj->node);
1104 __free_object(obj);
1105 break;
1106 }
1107 }
1108 raw_spin_unlock_irqrestore(&db->lock, flags);
1109
1110 if (cnt > debug_objects_maxchain)
1111 debug_objects_maxchain = cnt;
1112
1113 objs_checked += cnt;
1114 }
1115
1116 if (objs_checked > debug_objects_maxchecked)
1117 debug_objects_maxchecked = objs_checked;
1118
1119 /* Schedule work to actually kmem_cache_free() objects */
1120 if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1121 WRITE_ONCE(obj_freeing, true);
1122 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1123 }
1124 }
1125
debug_check_no_obj_freed(const void * address,unsigned long size)1126 void debug_check_no_obj_freed(const void *address, unsigned long size)
1127 {
1128 if (debug_objects_enabled)
1129 __debug_check_no_obj_freed(address, size);
1130 }
1131 #endif
1132
1133 #ifdef CONFIG_DEBUG_FS
1134
debug_stats_show(struct seq_file * m,void * v)1135 static int debug_stats_show(struct seq_file *m, void *v)
1136 {
1137 unsigned int cpu, pool_used, pcp_free = 0;
1138
1139 /*
1140 * pool_global.stats.cur_used is the number of batches currently
1141 * handed out to per CPU pools. Convert it to number of objects
1142 * and subtract the number of free objects in the per CPU pools.
1143 * As this is lockless the number is an estimate.
1144 */
1145 for_each_possible_cpu(cpu)
1146 pcp_free += per_cpu(pool_pcpu.cnt, cpu);
1147
1148 pool_used = READ_ONCE(pool_global.stats.cur_used);
1149 pcp_free = min(pool_used, pcp_free);
1150 pool_used -= pcp_free;
1151
1152 seq_printf(m, "max_chain : %d\n", debug_objects_maxchain);
1153 seq_printf(m, "max_checked : %d\n", debug_objects_maxchecked);
1154 seq_printf(m, "warnings : %d\n", debug_objects_warnings);
1155 seq_printf(m, "fixups : %d\n", debug_objects_fixups);
1156 seq_printf(m, "pool_free : %u\n", pool_count(&pool_global) + pcp_free);
1157 seq_printf(m, "pool_pcp_free : %u\n", pcp_free);
1158 seq_printf(m, "pool_min_free : %u\n", data_race(pool_global.stats.min_fill));
1159 seq_printf(m, "pool_used : %u\n", pool_used);
1160 seq_printf(m, "pool_max_used : %u\n", data_race(pool_global.stats.max_used));
1161 seq_printf(m, "on_free_list : %u\n", pool_count(&pool_to_free));
1162 seq_printf(m, "objs_allocated: %d\n", debug_objects_allocated);
1163 seq_printf(m, "objs_freed : %d\n", debug_objects_freed);
1164 return 0;
1165 }
1166 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1167
debug_objects_init_debugfs(void)1168 static int __init debug_objects_init_debugfs(void)
1169 {
1170 struct dentry *dbgdir;
1171
1172 if (!debug_objects_enabled)
1173 return 0;
1174
1175 dbgdir = debugfs_create_dir("debug_objects", NULL);
1176
1177 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1178
1179 return 0;
1180 }
1181 __initcall(debug_objects_init_debugfs);
1182
1183 #else
debug_objects_init_debugfs(void)1184 static inline void debug_objects_init_debugfs(void) { }
1185 #endif
1186
1187 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1188
1189 /* Random data structure for the self test */
1190 struct self_test {
1191 unsigned long dummy1[6];
1192 int static_init;
1193 unsigned long dummy2[3];
1194 };
1195
1196 static __initconst const struct debug_obj_descr descr_type_test;
1197
is_static_object(void * addr)1198 static bool __init is_static_object(void *addr)
1199 {
1200 struct self_test *obj = addr;
1201
1202 return obj->static_init;
1203 }
1204
1205 /*
1206 * fixup_init is called when:
1207 * - an active object is initialized
1208 */
fixup_init(void * addr,enum debug_obj_state state)1209 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1210 {
1211 struct self_test *obj = addr;
1212
1213 switch (state) {
1214 case ODEBUG_STATE_ACTIVE:
1215 debug_object_deactivate(obj, &descr_type_test);
1216 debug_object_init(obj, &descr_type_test);
1217 return true;
1218 default:
1219 return false;
1220 }
1221 }
1222
1223 /*
1224 * fixup_activate is called when:
1225 * - an active object is activated
1226 * - an unknown non-static object is activated
1227 */
fixup_activate(void * addr,enum debug_obj_state state)1228 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1229 {
1230 struct self_test *obj = addr;
1231
1232 switch (state) {
1233 case ODEBUG_STATE_NOTAVAILABLE:
1234 return true;
1235 case ODEBUG_STATE_ACTIVE:
1236 debug_object_deactivate(obj, &descr_type_test);
1237 debug_object_activate(obj, &descr_type_test);
1238 return true;
1239
1240 default:
1241 return false;
1242 }
1243 }
1244
1245 /*
1246 * fixup_destroy is called when:
1247 * - an active object is destroyed
1248 */
fixup_destroy(void * addr,enum debug_obj_state state)1249 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1250 {
1251 struct self_test *obj = addr;
1252
1253 switch (state) {
1254 case ODEBUG_STATE_ACTIVE:
1255 debug_object_deactivate(obj, &descr_type_test);
1256 debug_object_destroy(obj, &descr_type_test);
1257 return true;
1258 default:
1259 return false;
1260 }
1261 }
1262
1263 /*
1264 * fixup_free is called when:
1265 * - an active object is freed
1266 */
fixup_free(void * addr,enum debug_obj_state state)1267 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1268 {
1269 struct self_test *obj = addr;
1270
1271 switch (state) {
1272 case ODEBUG_STATE_ACTIVE:
1273 debug_object_deactivate(obj, &descr_type_test);
1274 debug_object_free(obj, &descr_type_test);
1275 return true;
1276 default:
1277 return false;
1278 }
1279 }
1280
1281 static int __init
check_results(void * addr,enum debug_obj_state state,int fixups,int warnings)1282 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1283 {
1284 struct debug_bucket *db;
1285 struct debug_obj *obj;
1286 unsigned long flags;
1287 int res = -EINVAL;
1288
1289 db = get_bucket((unsigned long) addr);
1290
1291 raw_spin_lock_irqsave(&db->lock, flags);
1292
1293 obj = lookup_object(addr, db);
1294 if (!obj && state != ODEBUG_STATE_NONE) {
1295 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1296 goto out;
1297 }
1298 if (obj && obj->state != state) {
1299 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1300 obj->state, state);
1301 goto out;
1302 }
1303 if (fixups != debug_objects_fixups) {
1304 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1305 fixups, debug_objects_fixups);
1306 goto out;
1307 }
1308 if (warnings != debug_objects_warnings) {
1309 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1310 warnings, debug_objects_warnings);
1311 goto out;
1312 }
1313 res = 0;
1314 out:
1315 raw_spin_unlock_irqrestore(&db->lock, flags);
1316 if (res)
1317 debug_objects_enabled = false;
1318 return res;
1319 }
1320
1321 static __initconst const struct debug_obj_descr descr_type_test = {
1322 .name = "selftest",
1323 .is_static_object = is_static_object,
1324 .fixup_init = fixup_init,
1325 .fixup_activate = fixup_activate,
1326 .fixup_destroy = fixup_destroy,
1327 .fixup_free = fixup_free,
1328 };
1329
1330 static __initdata struct self_test obj = { .static_init = 0 };
1331
debug_objects_selftest(void)1332 static bool __init debug_objects_selftest(void)
1333 {
1334 int fixups, oldfixups, warnings, oldwarnings;
1335 unsigned long flags;
1336
1337 local_irq_save(flags);
1338
1339 fixups = oldfixups = debug_objects_fixups;
1340 warnings = oldwarnings = debug_objects_warnings;
1341 descr_test = &descr_type_test;
1342
1343 debug_object_init(&obj, &descr_type_test);
1344 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1345 goto out;
1346 debug_object_activate(&obj, &descr_type_test);
1347 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1348 goto out;
1349 debug_object_activate(&obj, &descr_type_test);
1350 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1351 goto out;
1352 debug_object_deactivate(&obj, &descr_type_test);
1353 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1354 goto out;
1355 debug_object_destroy(&obj, &descr_type_test);
1356 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1357 goto out;
1358 debug_object_init(&obj, &descr_type_test);
1359 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1360 goto out;
1361 debug_object_activate(&obj, &descr_type_test);
1362 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1363 goto out;
1364 debug_object_deactivate(&obj, &descr_type_test);
1365 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1366 goto out;
1367 debug_object_free(&obj, &descr_type_test);
1368 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1369 goto out;
1370
1371 obj.static_init = 1;
1372 debug_object_activate(&obj, &descr_type_test);
1373 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1374 goto out;
1375 debug_object_init(&obj, &descr_type_test);
1376 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1377 goto out;
1378 debug_object_free(&obj, &descr_type_test);
1379 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1380 goto out;
1381
1382 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1383 debug_object_init(&obj, &descr_type_test);
1384 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1385 goto out;
1386 debug_object_activate(&obj, &descr_type_test);
1387 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1388 goto out;
1389 __debug_check_no_obj_freed(&obj, sizeof(obj));
1390 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1391 goto out;
1392 #endif
1393 pr_info("selftest passed\n");
1394
1395 out:
1396 debug_objects_fixups = oldfixups;
1397 debug_objects_warnings = oldwarnings;
1398 descr_test = NULL;
1399
1400 local_irq_restore(flags);
1401 return debug_objects_enabled;
1402 }
1403 #else
debug_objects_selftest(void)1404 static inline bool debug_objects_selftest(void) { return true; }
1405 #endif
1406
1407 /*
1408 * Called during early boot to initialize the hash buckets and link
1409 * the static object pool objects into the poll list. After this call
1410 * the object tracker is fully operational.
1411 */
debug_objects_early_init(void)1412 void __init debug_objects_early_init(void)
1413 {
1414 int i;
1415
1416 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1417 raw_spin_lock_init(&obj_hash[i].lock);
1418
1419 /* Keep early boot simple and add everything to the boot list */
1420 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1421 hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1422 }
1423
1424 /*
1425 * Convert the statically allocated objects to dynamic ones.
1426 * debug_objects_mem_init() is called early so only one CPU is up and
1427 * interrupts are disabled, which means it is safe to replace the active
1428 * object references.
1429 */
debug_objects_replace_static_objects(struct kmem_cache * cache)1430 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1431 {
1432 struct debug_bucket *db = obj_hash;
1433 struct hlist_node *tmp;
1434 struct debug_obj *obj;
1435 HLIST_HEAD(objects);
1436 int i;
1437
1438 for (i = 0; i < ODEBUG_POOL_SIZE; i += ODEBUG_BATCH_SIZE) {
1439 if (!kmem_alloc_batch(&objects, cache, GFP_KERNEL))
1440 goto free;
1441 pool_push_batch(&pool_global, &objects);
1442 }
1443
1444 /* Disconnect the boot pool. */
1445 pool_boot.first = NULL;
1446
1447 /* Replace the active object references */
1448 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1449 hlist_move_list(&db->list, &objects);
1450
1451 hlist_for_each_entry(obj, &objects, node) {
1452 struct debug_obj *new = pcpu_alloc();
1453
1454 /* copy object data */
1455 *new = *obj;
1456 hlist_add_head(&new->node, &db->list);
1457 }
1458 }
1459 return true;
1460 free:
1461 /* Can't use free_object_list() as the cache is not populated yet */
1462 hlist_for_each_entry_safe(obj, tmp, &pool_global.objects, node) {
1463 hlist_del(&obj->node);
1464 kmem_cache_free(cache, obj);
1465 }
1466 return false;
1467 }
1468
1469 /*
1470 * Called after the kmem_caches are functional to setup a dedicated
1471 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1472 * prevents that the debug code is called on kmem_cache_free() for the
1473 * debug tracker objects to avoid recursive calls.
1474 */
debug_objects_mem_init(void)1475 void __init debug_objects_mem_init(void)
1476 {
1477 struct kmem_cache *cache;
1478 int extras;
1479
1480 if (!debug_objects_enabled)
1481 return;
1482
1483 if (!debug_objects_selftest())
1484 return;
1485
1486 cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1487 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1488
1489 if (!cache || !debug_objects_replace_static_objects(cache)) {
1490 debug_objects_enabled = false;
1491 pr_warn("Out of memory.\n");
1492 return;
1493 }
1494
1495 /*
1496 * Adjust the thresholds for allocating and freeing objects
1497 * according to the number of possible CPUs available in the
1498 * system.
1499 */
1500 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1501 pool_global.max_cnt += extras;
1502 pool_global.min_cnt += extras;
1503
1504 /* Everything worked. Expose the cache */
1505 obj_cache = cache;
1506 static_branch_enable(&obj_cache_enabled);
1507
1508 #ifdef CONFIG_HOTPLUG_CPU
1509 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1510 object_cpu_offline);
1511 #endif
1512 return;
1513 }
1514