xref: /linux/lib/debugobjects.c (revision 42466b9f29b415c254dc4c2f4618e2a96951a406)
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10 
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12 
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22 
23 #define ODEBUG_HASH_BITS	14
24 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
25 
26 #define ODEBUG_POOL_SIZE	1024
27 #define ODEBUG_POOL_MIN_LEVEL	256
28 #define ODEBUG_POOL_PERCPU_SIZE	64
29 #define ODEBUG_BATCH_SIZE	16
30 
31 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
32 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
33 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
34 
35 /*
36  * We limit the freeing of debug objects via workqueue at a maximum
37  * frequency of 10Hz and about 1024 objects for each freeing operation.
38  * So it is freeing at most 10k debug objects per second.
39  */
40 #define ODEBUG_FREE_WORK_MAX	1024
41 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
42 
43 struct debug_bucket {
44 	struct hlist_head	list;
45 	raw_spinlock_t		lock;
46 };
47 
48 /*
49  * Debug object percpu free list
50  * Access is protected by disabling irq
51  */
52 struct debug_percpu_free {
53 	struct hlist_head	free_objs;
54 	int			obj_free;
55 };
56 
57 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
58 
59 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
60 
61 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
62 
63 static DEFINE_RAW_SPINLOCK(pool_lock);
64 
65 static HLIST_HEAD(obj_pool);
66 static HLIST_HEAD(obj_to_free);
67 
68 /*
69  * Because of the presence of percpu free pools, obj_pool_free will
70  * under-count those in the percpu free pools. Similarly, obj_pool_used
71  * will over-count those in the percpu free pools. Adjustments will be
72  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
73  * can be off.
74  */
75 static int			obj_pool_min_free = ODEBUG_POOL_SIZE;
76 static int			obj_pool_free = ODEBUG_POOL_SIZE;
77 static int			obj_pool_used;
78 static int			obj_pool_max_used;
79 static bool			obj_freeing;
80 /* The number of objs on the global free list */
81 static int			obj_nr_tofree;
82 
83 static int			debug_objects_maxchain __read_mostly;
84 static int __maybe_unused	debug_objects_maxchecked __read_mostly;
85 static int			debug_objects_fixups __read_mostly;
86 static int			debug_objects_warnings __read_mostly;
87 static int			debug_objects_enabled __read_mostly
88 				= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
89 static int			debug_objects_pool_size __read_mostly
90 				= ODEBUG_POOL_SIZE;
91 static int			debug_objects_pool_min_level __read_mostly
92 				= ODEBUG_POOL_MIN_LEVEL;
93 static struct debug_obj_descr	*descr_test  __read_mostly;
94 static struct kmem_cache	*obj_cache __read_mostly;
95 
96 /*
97  * Track numbers of kmem_cache_alloc()/free() calls done.
98  */
99 static int			debug_objects_allocated;
100 static int			debug_objects_freed;
101 
102 static void free_obj_work(struct work_struct *work);
103 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
104 
105 static int __init enable_object_debug(char *str)
106 {
107 	debug_objects_enabled = 1;
108 	return 0;
109 }
110 
111 static int __init disable_object_debug(char *str)
112 {
113 	debug_objects_enabled = 0;
114 	return 0;
115 }
116 
117 early_param("debug_objects", enable_object_debug);
118 early_param("no_debug_objects", disable_object_debug);
119 
120 static const char *obj_states[ODEBUG_STATE_MAX] = {
121 	[ODEBUG_STATE_NONE]		= "none",
122 	[ODEBUG_STATE_INIT]		= "initialized",
123 	[ODEBUG_STATE_INACTIVE]		= "inactive",
124 	[ODEBUG_STATE_ACTIVE]		= "active",
125 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
126 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
127 };
128 
129 static void fill_pool(void)
130 {
131 	gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
132 	struct debug_obj *obj;
133 	unsigned long flags;
134 
135 	if (likely(obj_pool_free >= debug_objects_pool_min_level))
136 		return;
137 
138 	/*
139 	 * Reuse objs from the global free list; they will be reinitialized
140 	 * when allocating.
141 	 */
142 	while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
143 		raw_spin_lock_irqsave(&pool_lock, flags);
144 		/*
145 		 * Recheck with the lock held as the worker thread might have
146 		 * won the race and freed the global free list already.
147 		 */
148 		while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
149 			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
150 			hlist_del(&obj->node);
151 			obj_nr_tofree--;
152 			hlist_add_head(&obj->node, &obj_pool);
153 			obj_pool_free++;
154 		}
155 		raw_spin_unlock_irqrestore(&pool_lock, flags);
156 	}
157 
158 	if (unlikely(!obj_cache))
159 		return;
160 
161 	while (obj_pool_free < debug_objects_pool_min_level) {
162 		struct debug_obj *new[ODEBUG_BATCH_SIZE];
163 		int cnt;
164 
165 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
166 			new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
167 			if (!new[cnt])
168 				break;
169 		}
170 		if (!cnt)
171 			return;
172 
173 		raw_spin_lock_irqsave(&pool_lock, flags);
174 		while (cnt) {
175 			hlist_add_head(&new[--cnt]->node, &obj_pool);
176 			debug_objects_allocated++;
177 			obj_pool_free++;
178 		}
179 		raw_spin_unlock_irqrestore(&pool_lock, flags);
180 	}
181 }
182 
183 /*
184  * Lookup an object in the hash bucket.
185  */
186 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
187 {
188 	struct debug_obj *obj;
189 	int cnt = 0;
190 
191 	hlist_for_each_entry(obj, &b->list, node) {
192 		cnt++;
193 		if (obj->object == addr)
194 			return obj;
195 	}
196 	if (cnt > debug_objects_maxchain)
197 		debug_objects_maxchain = cnt;
198 
199 	return NULL;
200 }
201 
202 /*
203  * Allocate a new object from the hlist
204  */
205 static struct debug_obj *__alloc_object(struct hlist_head *list)
206 {
207 	struct debug_obj *obj = NULL;
208 
209 	if (list->first) {
210 		obj = hlist_entry(list->first, typeof(*obj), node);
211 		hlist_del(&obj->node);
212 	}
213 
214 	return obj;
215 }
216 
217 /*
218  * Allocate a new object. If the pool is empty, switch off the debugger.
219  * Must be called with interrupts disabled.
220  */
221 static struct debug_obj *
222 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
223 {
224 	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
225 	struct debug_obj *obj;
226 
227 	if (likely(obj_cache)) {
228 		obj = __alloc_object(&percpu_pool->free_objs);
229 		if (obj) {
230 			percpu_pool->obj_free--;
231 			goto init_obj;
232 		}
233 	}
234 
235 	raw_spin_lock(&pool_lock);
236 	obj = __alloc_object(&obj_pool);
237 	if (obj) {
238 		obj_pool_used++;
239 		obj_pool_free--;
240 
241 		/*
242 		 * Looking ahead, allocate one batch of debug objects and
243 		 * put them into the percpu free pool.
244 		 */
245 		if (likely(obj_cache)) {
246 			int i;
247 
248 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
249 				struct debug_obj *obj2;
250 
251 				obj2 = __alloc_object(&obj_pool);
252 				if (!obj2)
253 					break;
254 				hlist_add_head(&obj2->node,
255 					       &percpu_pool->free_objs);
256 				percpu_pool->obj_free++;
257 				obj_pool_used++;
258 				obj_pool_free--;
259 			}
260 		}
261 
262 		if (obj_pool_used > obj_pool_max_used)
263 			obj_pool_max_used = obj_pool_used;
264 
265 		if (obj_pool_free < obj_pool_min_free)
266 			obj_pool_min_free = obj_pool_free;
267 	}
268 	raw_spin_unlock(&pool_lock);
269 
270 init_obj:
271 	if (obj) {
272 		obj->object = addr;
273 		obj->descr  = descr;
274 		obj->state  = ODEBUG_STATE_NONE;
275 		obj->astate = 0;
276 		hlist_add_head(&obj->node, &b->list);
277 	}
278 	return obj;
279 }
280 
281 /*
282  * workqueue function to free objects.
283  *
284  * To reduce contention on the global pool_lock, the actual freeing of
285  * debug objects will be delayed if the pool_lock is busy.
286  */
287 static void free_obj_work(struct work_struct *work)
288 {
289 	struct hlist_node *tmp;
290 	struct debug_obj *obj;
291 	unsigned long flags;
292 	HLIST_HEAD(tofree);
293 
294 	WRITE_ONCE(obj_freeing, false);
295 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
296 		return;
297 
298 	if (obj_pool_free >= debug_objects_pool_size)
299 		goto free_objs;
300 
301 	/*
302 	 * The objs on the pool list might be allocated before the work is
303 	 * run, so recheck if pool list it full or not, if not fill pool
304 	 * list from the global free list. As it is likely that a workload
305 	 * may be gearing up to use more and more objects, don't free any
306 	 * of them until the next round.
307 	 */
308 	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
309 		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
310 		hlist_del(&obj->node);
311 		hlist_add_head(&obj->node, &obj_pool);
312 		obj_pool_free++;
313 		obj_nr_tofree--;
314 	}
315 	raw_spin_unlock_irqrestore(&pool_lock, flags);
316 	return;
317 
318 free_objs:
319 	/*
320 	 * Pool list is already full and there are still objs on the free
321 	 * list. Move remaining free objs to a temporary list to free the
322 	 * memory outside the pool_lock held region.
323 	 */
324 	if (obj_nr_tofree) {
325 		hlist_move_list(&obj_to_free, &tofree);
326 		debug_objects_freed += obj_nr_tofree;
327 		obj_nr_tofree = 0;
328 	}
329 	raw_spin_unlock_irqrestore(&pool_lock, flags);
330 
331 	hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
332 		hlist_del(&obj->node);
333 		kmem_cache_free(obj_cache, obj);
334 	}
335 }
336 
337 static void __free_object(struct debug_obj *obj)
338 {
339 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
340 	struct debug_percpu_free *percpu_pool;
341 	int lookahead_count = 0;
342 	unsigned long flags;
343 	bool work;
344 
345 	local_irq_save(flags);
346 	if (!obj_cache)
347 		goto free_to_obj_pool;
348 
349 	/*
350 	 * Try to free it into the percpu pool first.
351 	 */
352 	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
353 	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
354 		hlist_add_head(&obj->node, &percpu_pool->free_objs);
355 		percpu_pool->obj_free++;
356 		local_irq_restore(flags);
357 		return;
358 	}
359 
360 	/*
361 	 * As the percpu pool is full, look ahead and pull out a batch
362 	 * of objects from the percpu pool and free them as well.
363 	 */
364 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
365 		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
366 		if (!objs[lookahead_count])
367 			break;
368 		percpu_pool->obj_free--;
369 	}
370 
371 free_to_obj_pool:
372 	raw_spin_lock(&pool_lock);
373 	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
374 	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
375 	obj_pool_used--;
376 
377 	if (work) {
378 		obj_nr_tofree++;
379 		hlist_add_head(&obj->node, &obj_to_free);
380 		if (lookahead_count) {
381 			obj_nr_tofree += lookahead_count;
382 			obj_pool_used -= lookahead_count;
383 			while (lookahead_count) {
384 				hlist_add_head(&objs[--lookahead_count]->node,
385 					       &obj_to_free);
386 			}
387 		}
388 
389 		if ((obj_pool_free > debug_objects_pool_size) &&
390 		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
391 			int i;
392 
393 			/*
394 			 * Free one more batch of objects from obj_pool.
395 			 */
396 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
397 				obj = __alloc_object(&obj_pool);
398 				hlist_add_head(&obj->node, &obj_to_free);
399 				obj_pool_free--;
400 				obj_nr_tofree++;
401 			}
402 		}
403 	} else {
404 		obj_pool_free++;
405 		hlist_add_head(&obj->node, &obj_pool);
406 		if (lookahead_count) {
407 			obj_pool_free += lookahead_count;
408 			obj_pool_used -= lookahead_count;
409 			while (lookahead_count) {
410 				hlist_add_head(&objs[--lookahead_count]->node,
411 					       &obj_pool);
412 			}
413 		}
414 	}
415 	raw_spin_unlock(&pool_lock);
416 	local_irq_restore(flags);
417 }
418 
419 /*
420  * Put the object back into the pool and schedule work to free objects
421  * if necessary.
422  */
423 static void free_object(struct debug_obj *obj)
424 {
425 	__free_object(obj);
426 	if (!obj_freeing && obj_nr_tofree) {
427 		WRITE_ONCE(obj_freeing, true);
428 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
429 	}
430 }
431 
432 /*
433  * We run out of memory. That means we probably have tons of objects
434  * allocated.
435  */
436 static void debug_objects_oom(void)
437 {
438 	struct debug_bucket *db = obj_hash;
439 	struct hlist_node *tmp;
440 	HLIST_HEAD(freelist);
441 	struct debug_obj *obj;
442 	unsigned long flags;
443 	int i;
444 
445 	pr_warn("Out of memory. ODEBUG disabled\n");
446 
447 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
448 		raw_spin_lock_irqsave(&db->lock, flags);
449 		hlist_move_list(&db->list, &freelist);
450 		raw_spin_unlock_irqrestore(&db->lock, flags);
451 
452 		/* Now free them */
453 		hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
454 			hlist_del(&obj->node);
455 			free_object(obj);
456 		}
457 	}
458 }
459 
460 /*
461  * We use the pfn of the address for the hash. That way we can check
462  * for freed objects simply by checking the affected bucket.
463  */
464 static struct debug_bucket *get_bucket(unsigned long addr)
465 {
466 	unsigned long hash;
467 
468 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
469 	return &obj_hash[hash];
470 }
471 
472 static void debug_print_object(struct debug_obj *obj, char *msg)
473 {
474 	struct debug_obj_descr *descr = obj->descr;
475 	static int limit;
476 
477 	if (limit < 5 && descr != descr_test) {
478 		void *hint = descr->debug_hint ?
479 			descr->debug_hint(obj->object) : NULL;
480 		limit++;
481 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
482 				 "object type: %s hint: %pS\n",
483 			msg, obj_states[obj->state], obj->astate,
484 			descr->name, hint);
485 	}
486 	debug_objects_warnings++;
487 }
488 
489 /*
490  * Try to repair the damage, so we have a better chance to get useful
491  * debug output.
492  */
493 static bool
494 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
495 		   void * addr, enum debug_obj_state state)
496 {
497 	if (fixup && fixup(addr, state)) {
498 		debug_objects_fixups++;
499 		return true;
500 	}
501 	return false;
502 }
503 
504 static void debug_object_is_on_stack(void *addr, int onstack)
505 {
506 	int is_on_stack;
507 	static int limit;
508 
509 	if (limit > 4)
510 		return;
511 
512 	is_on_stack = object_is_on_stack(addr);
513 	if (is_on_stack == onstack)
514 		return;
515 
516 	limit++;
517 	if (is_on_stack)
518 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
519 			 task_stack_page(current));
520 	else
521 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
522 			 task_stack_page(current));
523 
524 	WARN_ON(1);
525 }
526 
527 static void
528 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
529 {
530 	enum debug_obj_state state;
531 	bool check_stack = false;
532 	struct debug_bucket *db;
533 	struct debug_obj *obj;
534 	unsigned long flags;
535 
536 	fill_pool();
537 
538 	db = get_bucket((unsigned long) addr);
539 
540 	raw_spin_lock_irqsave(&db->lock, flags);
541 
542 	obj = lookup_object(addr, db);
543 	if (!obj) {
544 		obj = alloc_object(addr, db, descr);
545 		if (!obj) {
546 			debug_objects_enabled = 0;
547 			raw_spin_unlock_irqrestore(&db->lock, flags);
548 			debug_objects_oom();
549 			return;
550 		}
551 		check_stack = true;
552 	}
553 
554 	switch (obj->state) {
555 	case ODEBUG_STATE_NONE:
556 	case ODEBUG_STATE_INIT:
557 	case ODEBUG_STATE_INACTIVE:
558 		obj->state = ODEBUG_STATE_INIT;
559 		break;
560 
561 	case ODEBUG_STATE_ACTIVE:
562 		state = obj->state;
563 		raw_spin_unlock_irqrestore(&db->lock, flags);
564 		debug_print_object(obj, "init");
565 		debug_object_fixup(descr->fixup_init, addr, state);
566 		return;
567 
568 	case ODEBUG_STATE_DESTROYED:
569 		raw_spin_unlock_irqrestore(&db->lock, flags);
570 		debug_print_object(obj, "init");
571 		return;
572 	default:
573 		break;
574 	}
575 
576 	raw_spin_unlock_irqrestore(&db->lock, flags);
577 	if (check_stack)
578 		debug_object_is_on_stack(addr, onstack);
579 }
580 
581 /**
582  * debug_object_init - debug checks when an object is initialized
583  * @addr:	address of the object
584  * @descr:	pointer to an object specific debug description structure
585  */
586 void debug_object_init(void *addr, struct debug_obj_descr *descr)
587 {
588 	if (!debug_objects_enabled)
589 		return;
590 
591 	__debug_object_init(addr, descr, 0);
592 }
593 EXPORT_SYMBOL_GPL(debug_object_init);
594 
595 /**
596  * debug_object_init_on_stack - debug checks when an object on stack is
597  *				initialized
598  * @addr:	address of the object
599  * @descr:	pointer to an object specific debug description structure
600  */
601 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
602 {
603 	if (!debug_objects_enabled)
604 		return;
605 
606 	__debug_object_init(addr, descr, 1);
607 }
608 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
609 
610 /**
611  * debug_object_activate - debug checks when an object is activated
612  * @addr:	address of the object
613  * @descr:	pointer to an object specific debug description structure
614  * Returns 0 for success, -EINVAL for check failed.
615  */
616 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
617 {
618 	enum debug_obj_state state;
619 	struct debug_bucket *db;
620 	struct debug_obj *obj;
621 	unsigned long flags;
622 	int ret;
623 	struct debug_obj o = { .object = addr,
624 			       .state = ODEBUG_STATE_NOTAVAILABLE,
625 			       .descr = descr };
626 
627 	if (!debug_objects_enabled)
628 		return 0;
629 
630 	db = get_bucket((unsigned long) addr);
631 
632 	raw_spin_lock_irqsave(&db->lock, flags);
633 
634 	obj = lookup_object(addr, db);
635 	if (obj) {
636 		bool print_object = false;
637 
638 		switch (obj->state) {
639 		case ODEBUG_STATE_INIT:
640 		case ODEBUG_STATE_INACTIVE:
641 			obj->state = ODEBUG_STATE_ACTIVE;
642 			ret = 0;
643 			break;
644 
645 		case ODEBUG_STATE_ACTIVE:
646 			state = obj->state;
647 			raw_spin_unlock_irqrestore(&db->lock, flags);
648 			debug_print_object(obj, "activate");
649 			ret = debug_object_fixup(descr->fixup_activate, addr, state);
650 			return ret ? 0 : -EINVAL;
651 
652 		case ODEBUG_STATE_DESTROYED:
653 			print_object = true;
654 			ret = -EINVAL;
655 			break;
656 		default:
657 			ret = 0;
658 			break;
659 		}
660 		raw_spin_unlock_irqrestore(&db->lock, flags);
661 		if (print_object)
662 			debug_print_object(obj, "activate");
663 		return ret;
664 	}
665 
666 	raw_spin_unlock_irqrestore(&db->lock, flags);
667 
668 	/*
669 	 * We are here when a static object is activated. We
670 	 * let the type specific code confirm whether this is
671 	 * true or not. if true, we just make sure that the
672 	 * static object is tracked in the object tracker. If
673 	 * not, this must be a bug, so we try to fix it up.
674 	 */
675 	if (descr->is_static_object && descr->is_static_object(addr)) {
676 		/* track this static object */
677 		debug_object_init(addr, descr);
678 		debug_object_activate(addr, descr);
679 	} else {
680 		debug_print_object(&o, "activate");
681 		ret = debug_object_fixup(descr->fixup_activate, addr,
682 					ODEBUG_STATE_NOTAVAILABLE);
683 		return ret ? 0 : -EINVAL;
684 	}
685 	return 0;
686 }
687 EXPORT_SYMBOL_GPL(debug_object_activate);
688 
689 /**
690  * debug_object_deactivate - debug checks when an object is deactivated
691  * @addr:	address of the object
692  * @descr:	pointer to an object specific debug description structure
693  */
694 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
695 {
696 	struct debug_bucket *db;
697 	struct debug_obj *obj;
698 	unsigned long flags;
699 	bool print_object = false;
700 
701 	if (!debug_objects_enabled)
702 		return;
703 
704 	db = get_bucket((unsigned long) addr);
705 
706 	raw_spin_lock_irqsave(&db->lock, flags);
707 
708 	obj = lookup_object(addr, db);
709 	if (obj) {
710 		switch (obj->state) {
711 		case ODEBUG_STATE_INIT:
712 		case ODEBUG_STATE_INACTIVE:
713 		case ODEBUG_STATE_ACTIVE:
714 			if (!obj->astate)
715 				obj->state = ODEBUG_STATE_INACTIVE;
716 			else
717 				print_object = true;
718 			break;
719 
720 		case ODEBUG_STATE_DESTROYED:
721 			print_object = true;
722 			break;
723 		default:
724 			break;
725 		}
726 	}
727 
728 	raw_spin_unlock_irqrestore(&db->lock, flags);
729 	if (!obj) {
730 		struct debug_obj o = { .object = addr,
731 				       .state = ODEBUG_STATE_NOTAVAILABLE,
732 				       .descr = descr };
733 
734 		debug_print_object(&o, "deactivate");
735 	} else if (print_object) {
736 		debug_print_object(obj, "deactivate");
737 	}
738 }
739 EXPORT_SYMBOL_GPL(debug_object_deactivate);
740 
741 /**
742  * debug_object_destroy - debug checks when an object is destroyed
743  * @addr:	address of the object
744  * @descr:	pointer to an object specific debug description structure
745  */
746 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
747 {
748 	enum debug_obj_state state;
749 	struct debug_bucket *db;
750 	struct debug_obj *obj;
751 	unsigned long flags;
752 	bool print_object = false;
753 
754 	if (!debug_objects_enabled)
755 		return;
756 
757 	db = get_bucket((unsigned long) addr);
758 
759 	raw_spin_lock_irqsave(&db->lock, flags);
760 
761 	obj = lookup_object(addr, db);
762 	if (!obj)
763 		goto out_unlock;
764 
765 	switch (obj->state) {
766 	case ODEBUG_STATE_NONE:
767 	case ODEBUG_STATE_INIT:
768 	case ODEBUG_STATE_INACTIVE:
769 		obj->state = ODEBUG_STATE_DESTROYED;
770 		break;
771 	case ODEBUG_STATE_ACTIVE:
772 		state = obj->state;
773 		raw_spin_unlock_irqrestore(&db->lock, flags);
774 		debug_print_object(obj, "destroy");
775 		debug_object_fixup(descr->fixup_destroy, addr, state);
776 		return;
777 
778 	case ODEBUG_STATE_DESTROYED:
779 		print_object = true;
780 		break;
781 	default:
782 		break;
783 	}
784 out_unlock:
785 	raw_spin_unlock_irqrestore(&db->lock, flags);
786 	if (print_object)
787 		debug_print_object(obj, "destroy");
788 }
789 EXPORT_SYMBOL_GPL(debug_object_destroy);
790 
791 /**
792  * debug_object_free - debug checks when an object is freed
793  * @addr:	address of the object
794  * @descr:	pointer to an object specific debug description structure
795  */
796 void debug_object_free(void *addr, struct debug_obj_descr *descr)
797 {
798 	enum debug_obj_state state;
799 	struct debug_bucket *db;
800 	struct debug_obj *obj;
801 	unsigned long flags;
802 
803 	if (!debug_objects_enabled)
804 		return;
805 
806 	db = get_bucket((unsigned long) addr);
807 
808 	raw_spin_lock_irqsave(&db->lock, flags);
809 
810 	obj = lookup_object(addr, db);
811 	if (!obj)
812 		goto out_unlock;
813 
814 	switch (obj->state) {
815 	case ODEBUG_STATE_ACTIVE:
816 		state = obj->state;
817 		raw_spin_unlock_irqrestore(&db->lock, flags);
818 		debug_print_object(obj, "free");
819 		debug_object_fixup(descr->fixup_free, addr, state);
820 		return;
821 	default:
822 		hlist_del(&obj->node);
823 		raw_spin_unlock_irqrestore(&db->lock, flags);
824 		free_object(obj);
825 		return;
826 	}
827 out_unlock:
828 	raw_spin_unlock_irqrestore(&db->lock, flags);
829 }
830 EXPORT_SYMBOL_GPL(debug_object_free);
831 
832 /**
833  * debug_object_assert_init - debug checks when object should be init-ed
834  * @addr:	address of the object
835  * @descr:	pointer to an object specific debug description structure
836  */
837 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
838 {
839 	struct debug_bucket *db;
840 	struct debug_obj *obj;
841 	unsigned long flags;
842 
843 	if (!debug_objects_enabled)
844 		return;
845 
846 	db = get_bucket((unsigned long) addr);
847 
848 	raw_spin_lock_irqsave(&db->lock, flags);
849 
850 	obj = lookup_object(addr, db);
851 	if (!obj) {
852 		struct debug_obj o = { .object = addr,
853 				       .state = ODEBUG_STATE_NOTAVAILABLE,
854 				       .descr = descr };
855 
856 		raw_spin_unlock_irqrestore(&db->lock, flags);
857 		/*
858 		 * Maybe the object is static, and we let the type specific
859 		 * code confirm. Track this static object if true, else invoke
860 		 * fixup.
861 		 */
862 		if (descr->is_static_object && descr->is_static_object(addr)) {
863 			/* Track this static object */
864 			debug_object_init(addr, descr);
865 		} else {
866 			debug_print_object(&o, "assert_init");
867 			debug_object_fixup(descr->fixup_assert_init, addr,
868 					   ODEBUG_STATE_NOTAVAILABLE);
869 		}
870 		return;
871 	}
872 
873 	raw_spin_unlock_irqrestore(&db->lock, flags);
874 }
875 EXPORT_SYMBOL_GPL(debug_object_assert_init);
876 
877 /**
878  * debug_object_active_state - debug checks object usage state machine
879  * @addr:	address of the object
880  * @descr:	pointer to an object specific debug description structure
881  * @expect:	expected state
882  * @next:	state to move to if expected state is found
883  */
884 void
885 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
886 			  unsigned int expect, unsigned int next)
887 {
888 	struct debug_bucket *db;
889 	struct debug_obj *obj;
890 	unsigned long flags;
891 	bool print_object = false;
892 
893 	if (!debug_objects_enabled)
894 		return;
895 
896 	db = get_bucket((unsigned long) addr);
897 
898 	raw_spin_lock_irqsave(&db->lock, flags);
899 
900 	obj = lookup_object(addr, db);
901 	if (obj) {
902 		switch (obj->state) {
903 		case ODEBUG_STATE_ACTIVE:
904 			if (obj->astate == expect)
905 				obj->astate = next;
906 			else
907 				print_object = true;
908 			break;
909 
910 		default:
911 			print_object = true;
912 			break;
913 		}
914 	}
915 
916 	raw_spin_unlock_irqrestore(&db->lock, flags);
917 	if (!obj) {
918 		struct debug_obj o = { .object = addr,
919 				       .state = ODEBUG_STATE_NOTAVAILABLE,
920 				       .descr = descr };
921 
922 		debug_print_object(&o, "active_state");
923 	} else if (print_object) {
924 		debug_print_object(obj, "active_state");
925 	}
926 }
927 EXPORT_SYMBOL_GPL(debug_object_active_state);
928 
929 #ifdef CONFIG_DEBUG_OBJECTS_FREE
930 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
931 {
932 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
933 	struct debug_obj_descr *descr;
934 	enum debug_obj_state state;
935 	struct debug_bucket *db;
936 	struct hlist_node *tmp;
937 	struct debug_obj *obj;
938 	int cnt, objs_checked = 0;
939 
940 	saddr = (unsigned long) address;
941 	eaddr = saddr + size;
942 	paddr = saddr & ODEBUG_CHUNK_MASK;
943 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
944 	chunks >>= ODEBUG_CHUNK_SHIFT;
945 
946 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
947 		db = get_bucket(paddr);
948 
949 repeat:
950 		cnt = 0;
951 		raw_spin_lock_irqsave(&db->lock, flags);
952 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
953 			cnt++;
954 			oaddr = (unsigned long) obj->object;
955 			if (oaddr < saddr || oaddr >= eaddr)
956 				continue;
957 
958 			switch (obj->state) {
959 			case ODEBUG_STATE_ACTIVE:
960 				descr = obj->descr;
961 				state = obj->state;
962 				raw_spin_unlock_irqrestore(&db->lock, flags);
963 				debug_print_object(obj, "free");
964 				debug_object_fixup(descr->fixup_free,
965 						   (void *) oaddr, state);
966 				goto repeat;
967 			default:
968 				hlist_del(&obj->node);
969 				__free_object(obj);
970 				break;
971 			}
972 		}
973 		raw_spin_unlock_irqrestore(&db->lock, flags);
974 
975 		if (cnt > debug_objects_maxchain)
976 			debug_objects_maxchain = cnt;
977 
978 		objs_checked += cnt;
979 	}
980 
981 	if (objs_checked > debug_objects_maxchecked)
982 		debug_objects_maxchecked = objs_checked;
983 
984 	/* Schedule work to actually kmem_cache_free() objects */
985 	if (!obj_freeing && obj_nr_tofree) {
986 		WRITE_ONCE(obj_freeing, true);
987 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
988 	}
989 }
990 
991 void debug_check_no_obj_freed(const void *address, unsigned long size)
992 {
993 	if (debug_objects_enabled)
994 		__debug_check_no_obj_freed(address, size);
995 }
996 #endif
997 
998 #ifdef CONFIG_DEBUG_FS
999 
1000 static int debug_stats_show(struct seq_file *m, void *v)
1001 {
1002 	int cpu, obj_percpu_free = 0;
1003 
1004 	for_each_possible_cpu(cpu)
1005 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1006 
1007 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1008 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1009 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1010 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1011 	seq_printf(m, "pool_free     :%d\n", obj_pool_free + obj_percpu_free);
1012 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1013 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1014 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1015 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1016 	seq_printf(m, "on_free_list  :%d\n", obj_nr_tofree);
1017 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1018 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1019 	return 0;
1020 }
1021 
1022 static int debug_stats_open(struct inode *inode, struct file *filp)
1023 {
1024 	return single_open(filp, debug_stats_show, NULL);
1025 }
1026 
1027 static const struct file_operations debug_stats_fops = {
1028 	.open		= debug_stats_open,
1029 	.read		= seq_read,
1030 	.llseek		= seq_lseek,
1031 	.release	= single_release,
1032 };
1033 
1034 static int __init debug_objects_init_debugfs(void)
1035 {
1036 	struct dentry *dbgdir;
1037 
1038 	if (!debug_objects_enabled)
1039 		return 0;
1040 
1041 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1042 
1043 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1044 
1045 	return 0;
1046 }
1047 __initcall(debug_objects_init_debugfs);
1048 
1049 #else
1050 static inline void debug_objects_init_debugfs(void) { }
1051 #endif
1052 
1053 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1054 
1055 /* Random data structure for the self test */
1056 struct self_test {
1057 	unsigned long	dummy1[6];
1058 	int		static_init;
1059 	unsigned long	dummy2[3];
1060 };
1061 
1062 static __initdata struct debug_obj_descr descr_type_test;
1063 
1064 static bool __init is_static_object(void *addr)
1065 {
1066 	struct self_test *obj = addr;
1067 
1068 	return obj->static_init;
1069 }
1070 
1071 /*
1072  * fixup_init is called when:
1073  * - an active object is initialized
1074  */
1075 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1076 {
1077 	struct self_test *obj = addr;
1078 
1079 	switch (state) {
1080 	case ODEBUG_STATE_ACTIVE:
1081 		debug_object_deactivate(obj, &descr_type_test);
1082 		debug_object_init(obj, &descr_type_test);
1083 		return true;
1084 	default:
1085 		return false;
1086 	}
1087 }
1088 
1089 /*
1090  * fixup_activate is called when:
1091  * - an active object is activated
1092  * - an unknown non-static object is activated
1093  */
1094 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1095 {
1096 	struct self_test *obj = addr;
1097 
1098 	switch (state) {
1099 	case ODEBUG_STATE_NOTAVAILABLE:
1100 		return true;
1101 	case ODEBUG_STATE_ACTIVE:
1102 		debug_object_deactivate(obj, &descr_type_test);
1103 		debug_object_activate(obj, &descr_type_test);
1104 		return true;
1105 
1106 	default:
1107 		return false;
1108 	}
1109 }
1110 
1111 /*
1112  * fixup_destroy is called when:
1113  * - an active object is destroyed
1114  */
1115 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1116 {
1117 	struct self_test *obj = addr;
1118 
1119 	switch (state) {
1120 	case ODEBUG_STATE_ACTIVE:
1121 		debug_object_deactivate(obj, &descr_type_test);
1122 		debug_object_destroy(obj, &descr_type_test);
1123 		return true;
1124 	default:
1125 		return false;
1126 	}
1127 }
1128 
1129 /*
1130  * fixup_free is called when:
1131  * - an active object is freed
1132  */
1133 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1134 {
1135 	struct self_test *obj = addr;
1136 
1137 	switch (state) {
1138 	case ODEBUG_STATE_ACTIVE:
1139 		debug_object_deactivate(obj, &descr_type_test);
1140 		debug_object_free(obj, &descr_type_test);
1141 		return true;
1142 	default:
1143 		return false;
1144 	}
1145 }
1146 
1147 static int __init
1148 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1149 {
1150 	struct debug_bucket *db;
1151 	struct debug_obj *obj;
1152 	unsigned long flags;
1153 	int res = -EINVAL;
1154 
1155 	db = get_bucket((unsigned long) addr);
1156 
1157 	raw_spin_lock_irqsave(&db->lock, flags);
1158 
1159 	obj = lookup_object(addr, db);
1160 	if (!obj && state != ODEBUG_STATE_NONE) {
1161 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1162 		goto out;
1163 	}
1164 	if (obj && obj->state != state) {
1165 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1166 		       obj->state, state);
1167 		goto out;
1168 	}
1169 	if (fixups != debug_objects_fixups) {
1170 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1171 		       fixups, debug_objects_fixups);
1172 		goto out;
1173 	}
1174 	if (warnings != debug_objects_warnings) {
1175 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1176 		       warnings, debug_objects_warnings);
1177 		goto out;
1178 	}
1179 	res = 0;
1180 out:
1181 	raw_spin_unlock_irqrestore(&db->lock, flags);
1182 	if (res)
1183 		debug_objects_enabled = 0;
1184 	return res;
1185 }
1186 
1187 static __initdata struct debug_obj_descr descr_type_test = {
1188 	.name			= "selftest",
1189 	.is_static_object	= is_static_object,
1190 	.fixup_init		= fixup_init,
1191 	.fixup_activate		= fixup_activate,
1192 	.fixup_destroy		= fixup_destroy,
1193 	.fixup_free		= fixup_free,
1194 };
1195 
1196 static __initdata struct self_test obj = { .static_init = 0 };
1197 
1198 static void __init debug_objects_selftest(void)
1199 {
1200 	int fixups, oldfixups, warnings, oldwarnings;
1201 	unsigned long flags;
1202 
1203 	local_irq_save(flags);
1204 
1205 	fixups = oldfixups = debug_objects_fixups;
1206 	warnings = oldwarnings = debug_objects_warnings;
1207 	descr_test = &descr_type_test;
1208 
1209 	debug_object_init(&obj, &descr_type_test);
1210 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1211 		goto out;
1212 	debug_object_activate(&obj, &descr_type_test);
1213 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1214 		goto out;
1215 	debug_object_activate(&obj, &descr_type_test);
1216 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1217 		goto out;
1218 	debug_object_deactivate(&obj, &descr_type_test);
1219 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1220 		goto out;
1221 	debug_object_destroy(&obj, &descr_type_test);
1222 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1223 		goto out;
1224 	debug_object_init(&obj, &descr_type_test);
1225 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1226 		goto out;
1227 	debug_object_activate(&obj, &descr_type_test);
1228 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1229 		goto out;
1230 	debug_object_deactivate(&obj, &descr_type_test);
1231 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1232 		goto out;
1233 	debug_object_free(&obj, &descr_type_test);
1234 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1235 		goto out;
1236 
1237 	obj.static_init = 1;
1238 	debug_object_activate(&obj, &descr_type_test);
1239 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1240 		goto out;
1241 	debug_object_init(&obj, &descr_type_test);
1242 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1243 		goto out;
1244 	debug_object_free(&obj, &descr_type_test);
1245 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1246 		goto out;
1247 
1248 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1249 	debug_object_init(&obj, &descr_type_test);
1250 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1251 		goto out;
1252 	debug_object_activate(&obj, &descr_type_test);
1253 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1254 		goto out;
1255 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1256 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1257 		goto out;
1258 #endif
1259 	pr_info("selftest passed\n");
1260 
1261 out:
1262 	debug_objects_fixups = oldfixups;
1263 	debug_objects_warnings = oldwarnings;
1264 	descr_test = NULL;
1265 
1266 	local_irq_restore(flags);
1267 }
1268 #else
1269 static inline void debug_objects_selftest(void) { }
1270 #endif
1271 
1272 /*
1273  * Called during early boot to initialize the hash buckets and link
1274  * the static object pool objects into the poll list. After this call
1275  * the object tracker is fully operational.
1276  */
1277 void __init debug_objects_early_init(void)
1278 {
1279 	int i;
1280 
1281 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1282 		raw_spin_lock_init(&obj_hash[i].lock);
1283 
1284 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1285 		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1286 }
1287 
1288 /*
1289  * Convert the statically allocated objects to dynamic ones:
1290  */
1291 static int __init debug_objects_replace_static_objects(void)
1292 {
1293 	struct debug_bucket *db = obj_hash;
1294 	struct hlist_node *tmp;
1295 	struct debug_obj *obj, *new;
1296 	HLIST_HEAD(objects);
1297 	int i, cnt = 0;
1298 
1299 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1300 		obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1301 		if (!obj)
1302 			goto free;
1303 		hlist_add_head(&obj->node, &objects);
1304 	}
1305 
1306 	/*
1307 	 * debug_objects_mem_init() is now called early that only one CPU is up
1308 	 * and interrupts have been disabled, so it is safe to replace the
1309 	 * active object references.
1310 	 */
1311 
1312 	/* Remove the statically allocated objects from the pool */
1313 	hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1314 		hlist_del(&obj->node);
1315 	/* Move the allocated objects to the pool */
1316 	hlist_move_list(&objects, &obj_pool);
1317 
1318 	/* Replace the active object references */
1319 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1320 		hlist_move_list(&db->list, &objects);
1321 
1322 		hlist_for_each_entry(obj, &objects, node) {
1323 			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1324 			hlist_del(&new->node);
1325 			/* copy object data */
1326 			*new = *obj;
1327 			hlist_add_head(&new->node, &db->list);
1328 			cnt++;
1329 		}
1330 	}
1331 
1332 	pr_debug("%d of %d active objects replaced\n",
1333 		 cnt, obj_pool_used);
1334 	return 0;
1335 free:
1336 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1337 		hlist_del(&obj->node);
1338 		kmem_cache_free(obj_cache, obj);
1339 	}
1340 	return -ENOMEM;
1341 }
1342 
1343 /*
1344  * Called after the kmem_caches are functional to setup a dedicated
1345  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1346  * prevents that the debug code is called on kmem_cache_free() for the
1347  * debug tracker objects to avoid recursive calls.
1348  */
1349 void __init debug_objects_mem_init(void)
1350 {
1351 	int cpu, extras;
1352 
1353 	if (!debug_objects_enabled)
1354 		return;
1355 
1356 	/*
1357 	 * Initialize the percpu object pools
1358 	 *
1359 	 * Initialization is not strictly necessary, but was done for
1360 	 * completeness.
1361 	 */
1362 	for_each_possible_cpu(cpu)
1363 		INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1364 
1365 	obj_cache = kmem_cache_create("debug_objects_cache",
1366 				      sizeof (struct debug_obj), 0,
1367 				      SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1368 				      NULL);
1369 
1370 	if (!obj_cache || debug_objects_replace_static_objects()) {
1371 		debug_objects_enabled = 0;
1372 		kmem_cache_destroy(obj_cache);
1373 		pr_warn("out of memory.\n");
1374 	} else
1375 		debug_objects_selftest();
1376 
1377 	/*
1378 	 * Increase the thresholds for allocating and freeing objects
1379 	 * according to the number of possible CPUs available in the system.
1380 	 */
1381 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1382 	debug_objects_pool_size += extras;
1383 	debug_objects_pool_min_level += extras;
1384 }
1385