1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic infrastructure for lifetime debugging of objects.
4 *
5 * Copyright (C) 2008, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
6 */
7
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9
10 #include <linux/cpu.h>
11 #include <linux/debugobjects.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/kmemleak.h>
15 #include <linux/sched.h>
16 #include <linux/sched/loadavg.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/static_key.h>
21
22 #define ODEBUG_HASH_BITS 14
23 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
24
25 /* Must be power of two */
26 #define ODEBUG_BATCH_SIZE 16
27
28 /* Initial values. Must all be a multiple of batch size */
29 #define ODEBUG_POOL_SIZE (64 * ODEBUG_BATCH_SIZE)
30 #define ODEBUG_POOL_MIN_LEVEL (ODEBUG_POOL_SIZE / 4)
31
32 #define ODEBUG_POOL_PERCPU_SIZE (8 * ODEBUG_BATCH_SIZE)
33
34 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
35 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
36 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
37
38 /*
39 * We limit the freeing of debug objects via workqueue at a maximum
40 * frequency of 10Hz and about 1024 objects for each freeing operation.
41 * So it is freeing at most 10k debug objects per second.
42 */
43 #define ODEBUG_FREE_WORK_MAX (1024 / ODEBUG_BATCH_SIZE)
44 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
45
46 struct debug_bucket {
47 struct hlist_head list;
48 raw_spinlock_t lock;
49 };
50
51 struct pool_stats {
52 unsigned int cur_used;
53 unsigned int max_used;
54 unsigned int min_fill;
55 };
56
57 struct obj_pool {
58 struct hlist_head objects;
59 unsigned int cnt;
60 unsigned int min_cnt;
61 unsigned int max_cnt;
62 struct pool_stats stats;
63 } ____cacheline_aligned;
64
65
66 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu) = {
67 .max_cnt = ODEBUG_POOL_PERCPU_SIZE,
68 };
69
70 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
71
72 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
73
74 static DEFINE_RAW_SPINLOCK(pool_lock);
75
76 static struct obj_pool pool_global = {
77 .min_cnt = ODEBUG_POOL_MIN_LEVEL,
78 .max_cnt = ODEBUG_POOL_SIZE,
79 .stats = {
80 .min_fill = ODEBUG_POOL_SIZE,
81 },
82 };
83
84 static struct obj_pool pool_to_free = {
85 .max_cnt = UINT_MAX,
86 };
87
88 static HLIST_HEAD(pool_boot);
89
90 static unsigned long avg_usage;
91 static bool obj_freeing;
92
93 static int __data_racy debug_objects_maxchain __read_mostly;
94 static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly;
95 static int __data_racy debug_objects_fixups __read_mostly;
96 static int __data_racy debug_objects_warnings __read_mostly;
97 static bool __data_racy debug_objects_enabled __read_mostly
98 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
99
100 static const struct debug_obj_descr *descr_test __read_mostly;
101 static struct kmem_cache *obj_cache __ro_after_init;
102
103 /*
104 * Track numbers of kmem_cache_alloc()/free() calls done.
105 */
106 static int __data_racy debug_objects_allocated;
107 static int __data_racy debug_objects_freed;
108
109 static void free_obj_work(struct work_struct *work);
110 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
111
112 static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled);
113
enable_object_debug(char * str)114 static int __init enable_object_debug(char *str)
115 {
116 debug_objects_enabled = true;
117 return 0;
118 }
119 early_param("debug_objects", enable_object_debug);
120
disable_object_debug(char * str)121 static int __init disable_object_debug(char *str)
122 {
123 debug_objects_enabled = false;
124 return 0;
125 }
126 early_param("no_debug_objects", disable_object_debug);
127
128 static const char *obj_states[ODEBUG_STATE_MAX] = {
129 [ODEBUG_STATE_NONE] = "none",
130 [ODEBUG_STATE_INIT] = "initialized",
131 [ODEBUG_STATE_INACTIVE] = "inactive",
132 [ODEBUG_STATE_ACTIVE] = "active",
133 [ODEBUG_STATE_DESTROYED] = "destroyed",
134 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
135 };
136
pool_count(struct obj_pool * pool)137 static __always_inline unsigned int pool_count(struct obj_pool *pool)
138 {
139 return READ_ONCE(pool->cnt);
140 }
141
pool_should_refill(struct obj_pool * pool)142 static __always_inline bool pool_should_refill(struct obj_pool *pool)
143 {
144 return pool_count(pool) < pool->min_cnt;
145 }
146
pool_must_refill(struct obj_pool * pool)147 static __always_inline bool pool_must_refill(struct obj_pool *pool)
148 {
149 return pool_count(pool) < pool->min_cnt / 2;
150 }
151
pool_move_batch(struct obj_pool * dst,struct obj_pool * src)152 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
153 {
154 struct hlist_node *last, *next_batch, *first_batch;
155 struct debug_obj *obj;
156
157 if (dst->cnt >= dst->max_cnt || !src->cnt)
158 return false;
159
160 first_batch = src->objects.first;
161 obj = hlist_entry(first_batch, typeof(*obj), node);
162 last = obj->batch_last;
163 next_batch = last->next;
164
165 /* Move the next batch to the front of the source pool */
166 src->objects.first = next_batch;
167 if (next_batch)
168 next_batch->pprev = &src->objects.first;
169
170 /* Add the extracted batch to the destination pool */
171 last->next = dst->objects.first;
172 if (last->next)
173 last->next->pprev = &last->next;
174 first_batch->pprev = &dst->objects.first;
175 dst->objects.first = first_batch;
176
177 WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
178 WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
179 return true;
180 }
181
pool_push_batch(struct obj_pool * dst,struct hlist_head * head)182 static bool pool_push_batch(struct obj_pool *dst, struct hlist_head *head)
183 {
184 struct hlist_node *last;
185 struct debug_obj *obj;
186
187 if (dst->cnt >= dst->max_cnt)
188 return false;
189
190 obj = hlist_entry(head->first, typeof(*obj), node);
191 last = obj->batch_last;
192
193 hlist_splice_init(head, last, &dst->objects);
194 WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
195 return true;
196 }
197
pool_pop_batch(struct hlist_head * head,struct obj_pool * src)198 static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
199 {
200 struct hlist_node *last, *next;
201 struct debug_obj *obj;
202
203 if (!src->cnt)
204 return false;
205
206 /* Move the complete list to the head */
207 hlist_move_list(&src->objects, head);
208
209 obj = hlist_entry(head->first, typeof(*obj), node);
210 last = obj->batch_last;
211 next = last->next;
212 /* Disconnect the batch from the list */
213 last->next = NULL;
214
215 /* Move the node after last back to the source pool. */
216 src->objects.first = next;
217 if (next)
218 next->pprev = &src->objects.first;
219
220 WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
221 return true;
222 }
223
__alloc_object(struct hlist_head * list)224 static struct debug_obj *__alloc_object(struct hlist_head *list)
225 {
226 struct debug_obj *obj;
227
228 if (unlikely(!list->first))
229 return NULL;
230
231 obj = hlist_entry(list->first, typeof(*obj), node);
232 hlist_del(&obj->node);
233 return obj;
234 }
235
pcpu_refill_stats(void)236 static void pcpu_refill_stats(void)
237 {
238 struct pool_stats *stats = &pool_global.stats;
239
240 WRITE_ONCE(stats->cur_used, stats->cur_used + ODEBUG_BATCH_SIZE);
241
242 if (stats->cur_used > stats->max_used)
243 stats->max_used = stats->cur_used;
244
245 if (pool_global.cnt < stats->min_fill)
246 stats->min_fill = pool_global.cnt;
247 }
248
pcpu_alloc(void)249 static struct debug_obj *pcpu_alloc(void)
250 {
251 struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
252
253 lockdep_assert_irqs_disabled();
254
255 for (;;) {
256 struct debug_obj *obj = __alloc_object(&pcp->objects);
257
258 if (likely(obj)) {
259 pcp->cnt--;
260 /*
261 * If this emptied a batch try to refill from the
262 * free pool. Don't do that if this was the top-most
263 * batch as pcpu_free() expects the per CPU pool
264 * to be less than ODEBUG_POOL_PERCPU_SIZE.
265 */
266 if (unlikely(pcp->cnt < (ODEBUG_POOL_PERCPU_SIZE - ODEBUG_BATCH_SIZE) &&
267 !(pcp->cnt % ODEBUG_BATCH_SIZE))) {
268 /*
269 * Don't try to allocate from the regular pool here
270 * to not exhaust it prematurely.
271 */
272 if (pool_count(&pool_to_free)) {
273 guard(raw_spinlock)(&pool_lock);
274 pool_move_batch(pcp, &pool_to_free);
275 pcpu_refill_stats();
276 }
277 }
278 return obj;
279 }
280
281 guard(raw_spinlock)(&pool_lock);
282 if (!pool_move_batch(pcp, &pool_to_free)) {
283 if (!pool_move_batch(pcp, &pool_global))
284 return NULL;
285 }
286 pcpu_refill_stats();
287 }
288 }
289
pcpu_free(struct debug_obj * obj)290 static void pcpu_free(struct debug_obj *obj)
291 {
292 struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
293 struct debug_obj *first;
294
295 lockdep_assert_irqs_disabled();
296
297 if (!(pcp->cnt % ODEBUG_BATCH_SIZE)) {
298 obj->batch_last = &obj->node;
299 } else {
300 first = hlist_entry(pcp->objects.first, typeof(*first), node);
301 obj->batch_last = first->batch_last;
302 }
303 hlist_add_head(&obj->node, &pcp->objects);
304 pcp->cnt++;
305
306 /* Pool full ? */
307 if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
308 return;
309
310 /* Remove a batch from the per CPU pool */
311 guard(raw_spinlock)(&pool_lock);
312 /* Try to fit the batch into the pool_global first */
313 if (!pool_move_batch(&pool_global, pcp))
314 pool_move_batch(&pool_to_free, pcp);
315 WRITE_ONCE(pool_global.stats.cur_used, pool_global.stats.cur_used - ODEBUG_BATCH_SIZE);
316 }
317
free_object_list(struct hlist_head * head)318 static void free_object_list(struct hlist_head *head)
319 {
320 struct hlist_node *tmp;
321 struct debug_obj *obj;
322 int cnt = 0;
323
324 hlist_for_each_entry_safe(obj, tmp, head, node) {
325 hlist_del(&obj->node);
326 kmem_cache_free(obj_cache, obj);
327 cnt++;
328 }
329 debug_objects_freed += cnt;
330 }
331
fill_pool_from_freelist(void)332 static void fill_pool_from_freelist(void)
333 {
334 static unsigned long state;
335
336 /*
337 * Reuse objs from the global obj_to_free list; they will be
338 * reinitialized when allocating.
339 */
340 if (!pool_count(&pool_to_free))
341 return;
342
343 /*
344 * Prevent the context from being scheduled or interrupted after
345 * setting the state flag;
346 */
347 guard(irqsave)();
348
349 /*
350 * Avoid lock contention on &pool_lock and avoid making the cache
351 * line exclusive by testing the bit before attempting to set it.
352 */
353 if (test_bit(0, &state) || test_and_set_bit(0, &state))
354 return;
355
356 /* Avoid taking the lock when there is no work to do */
357 while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
358 guard(raw_spinlock)(&pool_lock);
359 /* Move a batch if possible */
360 pool_move_batch(&pool_global, &pool_to_free);
361 }
362 clear_bit(0, &state);
363 }
364
kmem_alloc_batch(struct hlist_head * head,struct kmem_cache * cache,gfp_t gfp)365 static bool kmem_alloc_batch(struct hlist_head *head, struct kmem_cache *cache, gfp_t gfp)
366 {
367 struct hlist_node *last = NULL;
368 struct debug_obj *obj;
369
370 for (int cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
371 obj = kmem_cache_zalloc(cache, gfp);
372 if (!obj) {
373 free_object_list(head);
374 return false;
375 }
376 debug_objects_allocated++;
377
378 if (!last)
379 last = &obj->node;
380 obj->batch_last = last;
381
382 hlist_add_head(&obj->node, head);
383 }
384 return true;
385 }
386
fill_pool(void)387 static void fill_pool(void)
388 {
389 static atomic_t cpus_allocating;
390
391 /*
392 * Avoid allocation and lock contention when:
393 * - One other CPU is already allocating
394 * - the global pool has not reached the critical level yet
395 */
396 if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
397 return;
398
399 atomic_inc(&cpus_allocating);
400 while (pool_should_refill(&pool_global)) {
401 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
402 HLIST_HEAD(head);
403
404 /*
405 * Allow reclaim only in preemptible context and during
406 * early boot. If not preemptible, the caller might hold
407 * locks causing a deadlock in the allocator.
408 *
409 * If the reclaim flag is not set during early boot then
410 * allocations, which happen before deferred page
411 * initialization has completed, will fail.
412 *
413 * In preemptible context the flag is harmless and not a
414 * performance issue as that's usually invoked from slow
415 * path initialization context.
416 */
417 if (preemptible() || system_state < SYSTEM_SCHEDULING)
418 gfp |= __GFP_KSWAPD_RECLAIM;
419
420 if (!kmem_alloc_batch(&head, obj_cache, gfp))
421 break;
422
423 guard(raw_spinlock_irqsave)(&pool_lock);
424 if (!pool_push_batch(&pool_global, &head))
425 pool_push_batch(&pool_to_free, &head);
426 }
427 atomic_dec(&cpus_allocating);
428 }
429
430 /*
431 * Lookup an object in the hash bucket.
432 */
lookup_object(void * addr,struct debug_bucket * b)433 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
434 {
435 struct debug_obj *obj;
436 int cnt = 0;
437
438 hlist_for_each_entry(obj, &b->list, node) {
439 cnt++;
440 if (obj->object == addr)
441 return obj;
442 }
443 if (cnt > debug_objects_maxchain)
444 debug_objects_maxchain = cnt;
445
446 return NULL;
447 }
448
calc_usage(void)449 static void calc_usage(void)
450 {
451 static DEFINE_RAW_SPINLOCK(avg_lock);
452 static unsigned long avg_period;
453 unsigned long cur, now = jiffies;
454
455 if (!time_after_eq(now, READ_ONCE(avg_period)))
456 return;
457
458 if (!raw_spin_trylock(&avg_lock))
459 return;
460
461 WRITE_ONCE(avg_period, now + msecs_to_jiffies(10));
462 cur = READ_ONCE(pool_global.stats.cur_used) * ODEBUG_FREE_WORK_MAX;
463 WRITE_ONCE(avg_usage, calc_load(avg_usage, EXP_5, cur));
464 raw_spin_unlock(&avg_lock);
465 }
466
alloc_object(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr)467 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
468 const struct debug_obj_descr *descr)
469 {
470 struct debug_obj *obj;
471
472 calc_usage();
473
474 if (static_branch_likely(&obj_cache_enabled))
475 obj = pcpu_alloc();
476 else
477 obj = __alloc_object(&pool_boot);
478
479 if (likely(obj)) {
480 obj->object = addr;
481 obj->descr = descr;
482 obj->state = ODEBUG_STATE_NONE;
483 obj->astate = 0;
484 hlist_add_head(&obj->node, &b->list);
485 }
486 return obj;
487 }
488
489 /* workqueue function to free objects. */
free_obj_work(struct work_struct * work)490 static void free_obj_work(struct work_struct *work)
491 {
492 static unsigned long last_use_avg;
493 unsigned long cur_used, last_used, delta;
494 unsigned int max_free = 0;
495
496 WRITE_ONCE(obj_freeing, false);
497
498 /* Rate limit freeing based on current use average */
499 cur_used = READ_ONCE(avg_usage);
500 last_used = last_use_avg;
501 last_use_avg = cur_used;
502
503 if (!pool_count(&pool_to_free))
504 return;
505
506 if (cur_used <= last_used) {
507 delta = (last_used - cur_used) / ODEBUG_FREE_WORK_MAX;
508 max_free = min(delta, ODEBUG_FREE_WORK_MAX);
509 }
510
511 for (int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
512 HLIST_HEAD(tofree);
513
514 /* Acquire and drop the lock for each batch */
515 scoped_guard(raw_spinlock_irqsave, &pool_lock) {
516 if (!pool_to_free.cnt)
517 return;
518
519 /* Refill the global pool if possible */
520 if (pool_move_batch(&pool_global, &pool_to_free)) {
521 /* Don't free as there seems to be demand */
522 max_free = 0;
523 } else if (max_free) {
524 pool_pop_batch(&tofree, &pool_to_free);
525 max_free--;
526 } else {
527 return;
528 }
529 }
530 free_object_list(&tofree);
531 }
532 }
533
__free_object(struct debug_obj * obj)534 static void __free_object(struct debug_obj *obj)
535 {
536 guard(irqsave)();
537 if (static_branch_likely(&obj_cache_enabled))
538 pcpu_free(obj);
539 else
540 hlist_add_head(&obj->node, &pool_boot);
541 }
542
543 /*
544 * Put the object back into the pool and schedule work to free objects
545 * if necessary.
546 */
free_object(struct debug_obj * obj)547 static void free_object(struct debug_obj *obj)
548 {
549 __free_object(obj);
550 if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
551 WRITE_ONCE(obj_freeing, true);
552 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
553 }
554 }
555
put_objects(struct hlist_head * list)556 static void put_objects(struct hlist_head *list)
557 {
558 struct hlist_node *tmp;
559 struct debug_obj *obj;
560
561 /*
562 * Using free_object() puts the objects into reuse or schedules
563 * them for freeing and it get's all the accounting correct.
564 */
565 hlist_for_each_entry_safe(obj, tmp, list, node) {
566 hlist_del(&obj->node);
567 free_object(obj);
568 }
569 }
570
571 #ifdef CONFIG_HOTPLUG_CPU
object_cpu_offline(unsigned int cpu)572 static int object_cpu_offline(unsigned int cpu)
573 {
574 /* Remote access is safe as the CPU is dead already */
575 struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
576
577 put_objects(&pcp->objects);
578 pcp->cnt = 0;
579 return 0;
580 }
581 #endif
582
583 /* Out of memory. Free all objects from hash */
debug_objects_oom(void)584 static void debug_objects_oom(void)
585 {
586 struct debug_bucket *db = obj_hash;
587 HLIST_HEAD(freelist);
588
589 pr_warn("Out of memory. ODEBUG disabled\n");
590
591 for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
592 scoped_guard(raw_spinlock_irqsave, &db->lock)
593 hlist_move_list(&db->list, &freelist);
594
595 put_objects(&freelist);
596 }
597 }
598
599 /*
600 * We use the pfn of the address for the hash. That way we can check
601 * for freed objects simply by checking the affected bucket.
602 */
get_bucket(unsigned long addr)603 static struct debug_bucket *get_bucket(unsigned long addr)
604 {
605 unsigned long hash;
606
607 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
608 return &obj_hash[hash];
609 }
610
debug_print_object(struct debug_obj * obj,char * msg)611 static void debug_print_object(struct debug_obj *obj, char *msg)
612 {
613 const struct debug_obj_descr *descr = obj->descr;
614 static int limit;
615
616 /*
617 * Don't report if lookup_object_or_alloc() by the current thread
618 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
619 * concurrent thread turned off debug_objects_enabled and cleared
620 * the hash buckets.
621 */
622 if (!debug_objects_enabled)
623 return;
624
625 if (limit < 5 && descr != descr_test) {
626 void *hint = descr->debug_hint ?
627 descr->debug_hint(obj->object) : NULL;
628 limit++;
629 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
630 "object: %p object type: %s hint: %pS\n",
631 msg, obj_states[obj->state], obj->astate,
632 obj->object, descr->name, hint);
633 }
634 debug_objects_warnings++;
635 }
636
637 /*
638 * Try to repair the damage, so we have a better chance to get useful
639 * debug output.
640 */
641 static bool
debug_object_fixup(bool (* fixup)(void * addr,enum debug_obj_state state),void * addr,enum debug_obj_state state)642 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
643 void * addr, enum debug_obj_state state)
644 {
645 if (fixup && fixup(addr, state)) {
646 debug_objects_fixups++;
647 return true;
648 }
649 return false;
650 }
651
debug_object_is_on_stack(void * addr,int onstack)652 static void debug_object_is_on_stack(void *addr, int onstack)
653 {
654 int is_on_stack;
655 static int limit;
656
657 if (limit > 4)
658 return;
659
660 is_on_stack = object_is_on_stack(addr);
661 if (is_on_stack == onstack)
662 return;
663
664 limit++;
665 if (is_on_stack)
666 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
667 task_stack_page(current));
668 else
669 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
670 task_stack_page(current));
671
672 WARN_ON(1);
673 }
674
lookup_object_or_alloc(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr,bool onstack,bool alloc_ifstatic)675 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
676 const struct debug_obj_descr *descr,
677 bool onstack, bool alloc_ifstatic)
678 {
679 struct debug_obj *obj = lookup_object(addr, b);
680 enum debug_obj_state state = ODEBUG_STATE_NONE;
681
682 if (likely(obj))
683 return obj;
684
685 /*
686 * debug_object_init() unconditionally allocates untracked
687 * objects. It does not matter whether it is a static object or
688 * not.
689 *
690 * debug_object_assert_init() and debug_object_activate() allow
691 * allocation only if the descriptor callback confirms that the
692 * object is static and considered initialized. For non-static
693 * objects the allocation needs to be done from the fixup callback.
694 */
695 if (unlikely(alloc_ifstatic)) {
696 if (!descr->is_static_object || !descr->is_static_object(addr))
697 return ERR_PTR(-ENOENT);
698 /* Statically allocated objects are considered initialized */
699 state = ODEBUG_STATE_INIT;
700 }
701
702 obj = alloc_object(addr, b, descr);
703 if (likely(obj)) {
704 obj->state = state;
705 debug_object_is_on_stack(addr, onstack);
706 return obj;
707 }
708
709 /* Out of memory. Do the cleanup outside of the locked region */
710 debug_objects_enabled = false;
711 return NULL;
712 }
713
debug_objects_fill_pool(void)714 static void debug_objects_fill_pool(void)
715 {
716 if (!static_branch_likely(&obj_cache_enabled))
717 return;
718
719 if (likely(!pool_should_refill(&pool_global)))
720 return;
721
722 /* Try reusing objects from obj_to_free_list */
723 fill_pool_from_freelist();
724
725 if (likely(!pool_should_refill(&pool_global)))
726 return;
727
728 /*
729 * On RT enabled kernels the pool refill must happen in preemptible
730 * context -- for !RT kernels we rely on the fact that spinlock_t and
731 * raw_spinlock_t are basically the same type and this lock-type
732 * inversion works just fine.
733 */
734 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible() || system_state < SYSTEM_SCHEDULING) {
735 /*
736 * Annotate away the spinlock_t inside raw_spinlock_t warning
737 * by temporarily raising the wait-type to LD_WAIT_CONFIG, matching
738 * the preemptible() condition above.
739 */
740 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_CONFIG);
741 lock_map_acquire_try(&fill_pool_map);
742 fill_pool();
743 lock_map_release(&fill_pool_map);
744 }
745 }
746
747 static void
__debug_object_init(void * addr,const struct debug_obj_descr * descr,int onstack)748 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
749 {
750 struct debug_obj *obj, o;
751 struct debug_bucket *db;
752 unsigned long flags;
753
754 debug_objects_fill_pool();
755
756 db = get_bucket((unsigned long) addr);
757
758 raw_spin_lock_irqsave(&db->lock, flags);
759
760 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
761 if (unlikely(!obj)) {
762 raw_spin_unlock_irqrestore(&db->lock, flags);
763 debug_objects_oom();
764 return;
765 }
766
767 switch (obj->state) {
768 case ODEBUG_STATE_NONE:
769 case ODEBUG_STATE_INIT:
770 case ODEBUG_STATE_INACTIVE:
771 obj->state = ODEBUG_STATE_INIT;
772 raw_spin_unlock_irqrestore(&db->lock, flags);
773 return;
774 default:
775 break;
776 }
777
778 o = *obj;
779 raw_spin_unlock_irqrestore(&db->lock, flags);
780 debug_print_object(&o, "init");
781
782 if (o.state == ODEBUG_STATE_ACTIVE)
783 debug_object_fixup(descr->fixup_init, addr, o.state);
784 }
785
786 /**
787 * debug_object_init - debug checks when an object is initialized
788 * @addr: address of the object
789 * @descr: pointer to an object specific debug description structure
790 */
debug_object_init(void * addr,const struct debug_obj_descr * descr)791 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
792 {
793 if (!debug_objects_enabled)
794 return;
795
796 __debug_object_init(addr, descr, 0);
797 }
798 EXPORT_SYMBOL_GPL(debug_object_init);
799
800 /**
801 * debug_object_init_on_stack - debug checks when an object on stack is
802 * initialized
803 * @addr: address of the object
804 * @descr: pointer to an object specific debug description structure
805 */
debug_object_init_on_stack(void * addr,const struct debug_obj_descr * descr)806 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
807 {
808 if (!debug_objects_enabled)
809 return;
810
811 __debug_object_init(addr, descr, 1);
812 }
813 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
814
815 /**
816 * debug_object_activate - debug checks when an object is activated
817 * @addr: address of the object
818 * @descr: pointer to an object specific debug description structure
819 * Returns 0 for success, -EINVAL for check failed.
820 */
debug_object_activate(void * addr,const struct debug_obj_descr * descr)821 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
822 {
823 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
824 struct debug_bucket *db;
825 struct debug_obj *obj;
826 unsigned long flags;
827
828 if (!debug_objects_enabled)
829 return 0;
830
831 debug_objects_fill_pool();
832
833 db = get_bucket((unsigned long) addr);
834
835 raw_spin_lock_irqsave(&db->lock, flags);
836
837 obj = lookup_object_or_alloc(addr, db, descr, false, true);
838 if (unlikely(!obj)) {
839 raw_spin_unlock_irqrestore(&db->lock, flags);
840 debug_objects_oom();
841 return 0;
842 } else if (likely(!IS_ERR(obj))) {
843 switch (obj->state) {
844 case ODEBUG_STATE_ACTIVE:
845 case ODEBUG_STATE_DESTROYED:
846 o = *obj;
847 break;
848 case ODEBUG_STATE_INIT:
849 case ODEBUG_STATE_INACTIVE:
850 obj->state = ODEBUG_STATE_ACTIVE;
851 fallthrough;
852 default:
853 raw_spin_unlock_irqrestore(&db->lock, flags);
854 return 0;
855 }
856 }
857
858 raw_spin_unlock_irqrestore(&db->lock, flags);
859 debug_print_object(&o, "activate");
860
861 switch (o.state) {
862 case ODEBUG_STATE_ACTIVE:
863 case ODEBUG_STATE_NOTAVAILABLE:
864 if (debug_object_fixup(descr->fixup_activate, addr, o.state))
865 return 0;
866 fallthrough;
867 default:
868 return -EINVAL;
869 }
870 }
871 EXPORT_SYMBOL_GPL(debug_object_activate);
872
873 /**
874 * debug_object_deactivate - debug checks when an object is deactivated
875 * @addr: address of the object
876 * @descr: pointer to an object specific debug description structure
877 */
debug_object_deactivate(void * addr,const struct debug_obj_descr * descr)878 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
879 {
880 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
881 struct debug_bucket *db;
882 struct debug_obj *obj;
883 unsigned long flags;
884
885 if (!debug_objects_enabled)
886 return;
887
888 db = get_bucket((unsigned long) addr);
889
890 raw_spin_lock_irqsave(&db->lock, flags);
891
892 obj = lookup_object(addr, db);
893 if (obj) {
894 switch (obj->state) {
895 case ODEBUG_STATE_DESTROYED:
896 break;
897 case ODEBUG_STATE_INIT:
898 case ODEBUG_STATE_INACTIVE:
899 case ODEBUG_STATE_ACTIVE:
900 if (obj->astate)
901 break;
902 obj->state = ODEBUG_STATE_INACTIVE;
903 fallthrough;
904 default:
905 raw_spin_unlock_irqrestore(&db->lock, flags);
906 return;
907 }
908 o = *obj;
909 }
910
911 raw_spin_unlock_irqrestore(&db->lock, flags);
912 debug_print_object(&o, "deactivate");
913 }
914 EXPORT_SYMBOL_GPL(debug_object_deactivate);
915
916 /**
917 * debug_object_destroy - debug checks when an object is destroyed
918 * @addr: address of the object
919 * @descr: pointer to an object specific debug description structure
920 */
debug_object_destroy(void * addr,const struct debug_obj_descr * descr)921 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
922 {
923 struct debug_obj *obj, o;
924 struct debug_bucket *db;
925 unsigned long flags;
926
927 if (!debug_objects_enabled)
928 return;
929
930 db = get_bucket((unsigned long) addr);
931
932 raw_spin_lock_irqsave(&db->lock, flags);
933
934 obj = lookup_object(addr, db);
935 if (!obj) {
936 raw_spin_unlock_irqrestore(&db->lock, flags);
937 return;
938 }
939
940 switch (obj->state) {
941 case ODEBUG_STATE_ACTIVE:
942 case ODEBUG_STATE_DESTROYED:
943 break;
944 case ODEBUG_STATE_NONE:
945 case ODEBUG_STATE_INIT:
946 case ODEBUG_STATE_INACTIVE:
947 obj->state = ODEBUG_STATE_DESTROYED;
948 fallthrough;
949 default:
950 raw_spin_unlock_irqrestore(&db->lock, flags);
951 return;
952 }
953
954 o = *obj;
955 raw_spin_unlock_irqrestore(&db->lock, flags);
956 debug_print_object(&o, "destroy");
957
958 if (o.state == ODEBUG_STATE_ACTIVE)
959 debug_object_fixup(descr->fixup_destroy, addr, o.state);
960 }
961 EXPORT_SYMBOL_GPL(debug_object_destroy);
962
963 /**
964 * debug_object_free - debug checks when an object is freed
965 * @addr: address of the object
966 * @descr: pointer to an object specific debug description structure
967 */
debug_object_free(void * addr,const struct debug_obj_descr * descr)968 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
969 {
970 struct debug_obj *obj, o;
971 struct debug_bucket *db;
972 unsigned long flags;
973
974 if (!debug_objects_enabled)
975 return;
976
977 db = get_bucket((unsigned long) addr);
978
979 raw_spin_lock_irqsave(&db->lock, flags);
980
981 obj = lookup_object(addr, db);
982 if (!obj) {
983 raw_spin_unlock_irqrestore(&db->lock, flags);
984 return;
985 }
986
987 switch (obj->state) {
988 case ODEBUG_STATE_ACTIVE:
989 break;
990 default:
991 hlist_del(&obj->node);
992 raw_spin_unlock_irqrestore(&db->lock, flags);
993 free_object(obj);
994 return;
995 }
996
997 o = *obj;
998 raw_spin_unlock_irqrestore(&db->lock, flags);
999 debug_print_object(&o, "free");
1000
1001 debug_object_fixup(descr->fixup_free, addr, o.state);
1002 }
1003 EXPORT_SYMBOL_GPL(debug_object_free);
1004
1005 /**
1006 * debug_object_assert_init - debug checks when object should be init-ed
1007 * @addr: address of the object
1008 * @descr: pointer to an object specific debug description structure
1009 */
debug_object_assert_init(void * addr,const struct debug_obj_descr * descr)1010 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
1011 {
1012 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
1013 struct debug_bucket *db;
1014 struct debug_obj *obj;
1015 unsigned long flags;
1016
1017 if (!debug_objects_enabled)
1018 return;
1019
1020 debug_objects_fill_pool();
1021
1022 db = get_bucket((unsigned long) addr);
1023
1024 raw_spin_lock_irqsave(&db->lock, flags);
1025 obj = lookup_object_or_alloc(addr, db, descr, false, true);
1026 raw_spin_unlock_irqrestore(&db->lock, flags);
1027 if (likely(!IS_ERR_OR_NULL(obj)))
1028 return;
1029
1030 /* If NULL the allocation has hit OOM */
1031 if (!obj) {
1032 debug_objects_oom();
1033 return;
1034 }
1035
1036 /* Object is neither tracked nor static. It's not initialized. */
1037 debug_print_object(&o, "assert_init");
1038 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
1039 }
1040 EXPORT_SYMBOL_GPL(debug_object_assert_init);
1041
1042 /**
1043 * debug_object_active_state - debug checks object usage state machine
1044 * @addr: address of the object
1045 * @descr: pointer to an object specific debug description structure
1046 * @expect: expected state
1047 * @next: state to move to if expected state is found
1048 */
1049 void
debug_object_active_state(void * addr,const struct debug_obj_descr * descr,unsigned int expect,unsigned int next)1050 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
1051 unsigned int expect, unsigned int next)
1052 {
1053 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
1054 struct debug_bucket *db;
1055 struct debug_obj *obj;
1056 unsigned long flags;
1057
1058 if (!debug_objects_enabled)
1059 return;
1060
1061 db = get_bucket((unsigned long) addr);
1062
1063 raw_spin_lock_irqsave(&db->lock, flags);
1064
1065 obj = lookup_object(addr, db);
1066 if (obj) {
1067 switch (obj->state) {
1068 case ODEBUG_STATE_ACTIVE:
1069 if (obj->astate != expect)
1070 break;
1071 obj->astate = next;
1072 raw_spin_unlock_irqrestore(&db->lock, flags);
1073 return;
1074 default:
1075 break;
1076 }
1077 o = *obj;
1078 }
1079
1080 raw_spin_unlock_irqrestore(&db->lock, flags);
1081 debug_print_object(&o, "active_state");
1082 }
1083 EXPORT_SYMBOL_GPL(debug_object_active_state);
1084
1085 #ifdef CONFIG_DEBUG_OBJECTS_FREE
__debug_check_no_obj_freed(const void * address,unsigned long size)1086 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1087 {
1088 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1089 int cnt, objs_checked = 0;
1090 struct debug_obj *obj, o;
1091 struct debug_bucket *db;
1092 struct hlist_node *tmp;
1093
1094 saddr = (unsigned long) address;
1095 eaddr = saddr + size;
1096 paddr = saddr & ODEBUG_CHUNK_MASK;
1097 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1098 chunks >>= ODEBUG_CHUNK_SHIFT;
1099
1100 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1101 db = get_bucket(paddr);
1102
1103 repeat:
1104 cnt = 0;
1105 raw_spin_lock_irqsave(&db->lock, flags);
1106 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1107 cnt++;
1108 oaddr = (unsigned long) obj->object;
1109 if (oaddr < saddr || oaddr >= eaddr)
1110 continue;
1111
1112 switch (obj->state) {
1113 case ODEBUG_STATE_ACTIVE:
1114 o = *obj;
1115 raw_spin_unlock_irqrestore(&db->lock, flags);
1116 debug_print_object(&o, "free");
1117 debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1118 goto repeat;
1119 default:
1120 hlist_del(&obj->node);
1121 __free_object(obj);
1122 break;
1123 }
1124 }
1125 raw_spin_unlock_irqrestore(&db->lock, flags);
1126
1127 if (cnt > debug_objects_maxchain)
1128 debug_objects_maxchain = cnt;
1129
1130 objs_checked += cnt;
1131 }
1132
1133 if (objs_checked > debug_objects_maxchecked)
1134 debug_objects_maxchecked = objs_checked;
1135
1136 /* Schedule work to actually kmem_cache_free() objects */
1137 if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1138 WRITE_ONCE(obj_freeing, true);
1139 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1140 }
1141 }
1142
debug_check_no_obj_freed(const void * address,unsigned long size)1143 void debug_check_no_obj_freed(const void *address, unsigned long size)
1144 {
1145 if (debug_objects_enabled)
1146 __debug_check_no_obj_freed(address, size);
1147 }
1148 #endif
1149
1150 #ifdef CONFIG_DEBUG_FS
1151
debug_stats_show(struct seq_file * m,void * v)1152 static int debug_stats_show(struct seq_file *m, void *v)
1153 {
1154 unsigned int cpu, pool_used, pcp_free = 0;
1155
1156 /*
1157 * pool_global.stats.cur_used is the number of batches currently
1158 * handed out to per CPU pools. Convert it to number of objects
1159 * and subtract the number of free objects in the per CPU pools.
1160 * As this is lockless the number is an estimate.
1161 */
1162 for_each_possible_cpu(cpu)
1163 pcp_free += per_cpu(pool_pcpu.cnt, cpu);
1164
1165 pool_used = READ_ONCE(pool_global.stats.cur_used);
1166 pcp_free = min(pool_used, pcp_free);
1167 pool_used -= pcp_free;
1168
1169 seq_printf(m, "max_chain : %d\n", debug_objects_maxchain);
1170 seq_printf(m, "max_checked : %d\n", debug_objects_maxchecked);
1171 seq_printf(m, "warnings : %d\n", debug_objects_warnings);
1172 seq_printf(m, "fixups : %d\n", debug_objects_fixups);
1173 seq_printf(m, "pool_free : %u\n", pool_count(&pool_global) + pcp_free);
1174 seq_printf(m, "pool_pcp_free : %u\n", pcp_free);
1175 seq_printf(m, "pool_min_free : %u\n", data_race(pool_global.stats.min_fill));
1176 seq_printf(m, "pool_used : %u\n", pool_used);
1177 seq_printf(m, "pool_max_used : %u\n", data_race(pool_global.stats.max_used));
1178 seq_printf(m, "on_free_list : %u\n", pool_count(&pool_to_free));
1179 seq_printf(m, "objs_allocated: %d\n", debug_objects_allocated);
1180 seq_printf(m, "objs_freed : %d\n", debug_objects_freed);
1181 return 0;
1182 }
1183 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1184
debug_objects_init_debugfs(void)1185 static int __init debug_objects_init_debugfs(void)
1186 {
1187 struct dentry *dbgdir;
1188
1189 if (!debug_objects_enabled)
1190 return 0;
1191
1192 dbgdir = debugfs_create_dir("debug_objects", NULL);
1193
1194 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1195
1196 return 0;
1197 }
1198 __initcall(debug_objects_init_debugfs);
1199
1200 #else
debug_objects_init_debugfs(void)1201 static inline void debug_objects_init_debugfs(void) { }
1202 #endif
1203
1204 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1205
1206 /* Random data structure for the self test */
1207 struct self_test {
1208 unsigned long dummy1[6];
1209 int static_init;
1210 unsigned long dummy2[3];
1211 };
1212
1213 static __initconst const struct debug_obj_descr descr_type_test;
1214
is_static_object(void * addr)1215 static bool __init is_static_object(void *addr)
1216 {
1217 struct self_test *obj = addr;
1218
1219 return obj->static_init;
1220 }
1221
1222 /*
1223 * fixup_init is called when:
1224 * - an active object is initialized
1225 */
fixup_init(void * addr,enum debug_obj_state state)1226 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1227 {
1228 struct self_test *obj = addr;
1229
1230 switch (state) {
1231 case ODEBUG_STATE_ACTIVE:
1232 debug_object_deactivate(obj, &descr_type_test);
1233 debug_object_init(obj, &descr_type_test);
1234 return true;
1235 default:
1236 return false;
1237 }
1238 }
1239
1240 /*
1241 * fixup_activate is called when:
1242 * - an active object is activated
1243 * - an unknown non-static object is activated
1244 */
fixup_activate(void * addr,enum debug_obj_state state)1245 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1246 {
1247 struct self_test *obj = addr;
1248
1249 switch (state) {
1250 case ODEBUG_STATE_NOTAVAILABLE:
1251 return true;
1252 case ODEBUG_STATE_ACTIVE:
1253 debug_object_deactivate(obj, &descr_type_test);
1254 debug_object_activate(obj, &descr_type_test);
1255 return true;
1256
1257 default:
1258 return false;
1259 }
1260 }
1261
1262 /*
1263 * fixup_destroy is called when:
1264 * - an active object is destroyed
1265 */
fixup_destroy(void * addr,enum debug_obj_state state)1266 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1267 {
1268 struct self_test *obj = addr;
1269
1270 switch (state) {
1271 case ODEBUG_STATE_ACTIVE:
1272 debug_object_deactivate(obj, &descr_type_test);
1273 debug_object_destroy(obj, &descr_type_test);
1274 return true;
1275 default:
1276 return false;
1277 }
1278 }
1279
1280 /*
1281 * fixup_free is called when:
1282 * - an active object is freed
1283 */
fixup_free(void * addr,enum debug_obj_state state)1284 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1285 {
1286 struct self_test *obj = addr;
1287
1288 switch (state) {
1289 case ODEBUG_STATE_ACTIVE:
1290 debug_object_deactivate(obj, &descr_type_test);
1291 debug_object_free(obj, &descr_type_test);
1292 return true;
1293 default:
1294 return false;
1295 }
1296 }
1297
1298 static int __init
check_results(void * addr,enum debug_obj_state state,int fixups,int warnings)1299 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1300 {
1301 struct debug_bucket *db;
1302 struct debug_obj *obj;
1303 unsigned long flags;
1304 int res = -EINVAL;
1305
1306 db = get_bucket((unsigned long) addr);
1307
1308 raw_spin_lock_irqsave(&db->lock, flags);
1309
1310 obj = lookup_object(addr, db);
1311 if (!obj && state != ODEBUG_STATE_NONE) {
1312 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1313 goto out;
1314 }
1315 if (obj && obj->state != state) {
1316 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1317 obj->state, state);
1318 goto out;
1319 }
1320 if (fixups != debug_objects_fixups) {
1321 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1322 fixups, debug_objects_fixups);
1323 goto out;
1324 }
1325 if (warnings != debug_objects_warnings) {
1326 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1327 warnings, debug_objects_warnings);
1328 goto out;
1329 }
1330 res = 0;
1331 out:
1332 raw_spin_unlock_irqrestore(&db->lock, flags);
1333 if (res)
1334 debug_objects_enabled = false;
1335 return res;
1336 }
1337
1338 static __initconst const struct debug_obj_descr descr_type_test = {
1339 .name = "selftest",
1340 .is_static_object = is_static_object,
1341 .fixup_init = fixup_init,
1342 .fixup_activate = fixup_activate,
1343 .fixup_destroy = fixup_destroy,
1344 .fixup_free = fixup_free,
1345 };
1346
1347 static __initdata struct self_test obj = { .static_init = 0 };
1348
debug_objects_selftest(void)1349 static bool __init debug_objects_selftest(void)
1350 {
1351 int fixups, oldfixups, warnings, oldwarnings;
1352 unsigned long flags;
1353
1354 local_irq_save(flags);
1355
1356 fixups = oldfixups = debug_objects_fixups;
1357 warnings = oldwarnings = debug_objects_warnings;
1358 descr_test = &descr_type_test;
1359
1360 debug_object_init(&obj, &descr_type_test);
1361 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1362 goto out;
1363 debug_object_activate(&obj, &descr_type_test);
1364 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1365 goto out;
1366 debug_object_activate(&obj, &descr_type_test);
1367 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1368 goto out;
1369 debug_object_deactivate(&obj, &descr_type_test);
1370 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1371 goto out;
1372 debug_object_destroy(&obj, &descr_type_test);
1373 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1374 goto out;
1375 debug_object_init(&obj, &descr_type_test);
1376 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1377 goto out;
1378 debug_object_activate(&obj, &descr_type_test);
1379 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1380 goto out;
1381 debug_object_deactivate(&obj, &descr_type_test);
1382 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1383 goto out;
1384 debug_object_free(&obj, &descr_type_test);
1385 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1386 goto out;
1387
1388 obj.static_init = 1;
1389 debug_object_activate(&obj, &descr_type_test);
1390 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1391 goto out;
1392 debug_object_init(&obj, &descr_type_test);
1393 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1394 goto out;
1395 debug_object_free(&obj, &descr_type_test);
1396 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1397 goto out;
1398
1399 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1400 debug_object_init(&obj, &descr_type_test);
1401 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1402 goto out;
1403 debug_object_activate(&obj, &descr_type_test);
1404 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1405 goto out;
1406 __debug_check_no_obj_freed(&obj, sizeof(obj));
1407 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1408 goto out;
1409 #endif
1410 pr_info("selftest passed\n");
1411
1412 out:
1413 debug_objects_fixups = oldfixups;
1414 debug_objects_warnings = oldwarnings;
1415 descr_test = NULL;
1416
1417 local_irq_restore(flags);
1418 return debug_objects_enabled;
1419 }
1420 #else
debug_objects_selftest(void)1421 static inline bool debug_objects_selftest(void) { return true; }
1422 #endif
1423
1424 /*
1425 * Called during early boot to initialize the hash buckets and link
1426 * the static object pool objects into the poll list. After this call
1427 * the object tracker is fully operational.
1428 */
debug_objects_early_init(void)1429 void __init debug_objects_early_init(void)
1430 {
1431 int i;
1432
1433 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1434 raw_spin_lock_init(&obj_hash[i].lock);
1435
1436 /* Keep early boot simple and add everything to the boot list */
1437 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1438 hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1439 }
1440
1441 /*
1442 * Convert the statically allocated objects to dynamic ones.
1443 * debug_objects_mem_init() is called early so only one CPU is up and
1444 * interrupts are disabled, which means it is safe to replace the active
1445 * object references.
1446 */
debug_objects_replace_static_objects(struct kmem_cache * cache)1447 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1448 {
1449 struct debug_bucket *db = obj_hash;
1450 struct hlist_node *tmp;
1451 struct debug_obj *obj;
1452 HLIST_HEAD(objects);
1453 int i;
1454
1455 for (i = 0; i < ODEBUG_POOL_SIZE; i += ODEBUG_BATCH_SIZE) {
1456 if (!kmem_alloc_batch(&objects, cache, GFP_KERNEL))
1457 goto free;
1458 pool_push_batch(&pool_global, &objects);
1459 }
1460
1461 /* Disconnect the boot pool. */
1462 pool_boot.first = NULL;
1463
1464 /* Replace the active object references */
1465 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1466 hlist_move_list(&db->list, &objects);
1467
1468 hlist_for_each_entry(obj, &objects, node) {
1469 struct debug_obj *new = pcpu_alloc();
1470
1471 /* copy object data */
1472 *new = *obj;
1473 hlist_add_head(&new->node, &db->list);
1474 }
1475 }
1476 return true;
1477 free:
1478 /* Can't use free_object_list() as the cache is not populated yet */
1479 hlist_for_each_entry_safe(obj, tmp, &pool_global.objects, node) {
1480 hlist_del(&obj->node);
1481 kmem_cache_free(cache, obj);
1482 }
1483 return false;
1484 }
1485
1486 /*
1487 * Called after the kmem_caches are functional to setup a dedicated
1488 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1489 * prevents that the debug code is called on kmem_cache_free() for the
1490 * debug tracker objects to avoid recursive calls.
1491 */
debug_objects_mem_init(void)1492 void __init debug_objects_mem_init(void)
1493 {
1494 struct kmem_cache *cache;
1495 int extras;
1496
1497 if (!debug_objects_enabled)
1498 return;
1499
1500 if (!debug_objects_selftest())
1501 return;
1502
1503 cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1504 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1505
1506 if (!cache || !debug_objects_replace_static_objects(cache)) {
1507 debug_objects_enabled = false;
1508 pr_warn("Out of memory.\n");
1509 return;
1510 }
1511
1512 /*
1513 * Adjust the thresholds for allocating and freeing objects
1514 * according to the number of possible CPUs available in the
1515 * system.
1516 */
1517 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1518 pool_global.max_cnt += extras;
1519 pool_global.min_cnt += extras;
1520
1521 /* Everything worked. Expose the cache */
1522 obj_cache = cache;
1523 static_branch_enable(&obj_cache_enabled);
1524
1525 #ifdef CONFIG_HOTPLUG_CPU
1526 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1527 object_cpu_offline);
1528 #endif
1529 return;
1530 }
1531