1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic infrastructure for lifetime debugging of objects. 4 * 5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 6 */ 7 8 #define pr_fmt(fmt) "ODEBUG: " fmt 9 10 #include <linux/debugobjects.h> 11 #include <linux/interrupt.h> 12 #include <linux/sched.h> 13 #include <linux/sched/task_stack.h> 14 #include <linux/seq_file.h> 15 #include <linux/debugfs.h> 16 #include <linux/slab.h> 17 #include <linux/hash.h> 18 #include <linux/kmemleak.h> 19 #include <linux/cpu.h> 20 21 #define ODEBUG_HASH_BITS 14 22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 23 24 #define ODEBUG_POOL_SIZE 1024 25 #define ODEBUG_POOL_MIN_LEVEL 256 26 #define ODEBUG_POOL_PERCPU_SIZE 64 27 #define ODEBUG_BATCH_SIZE 16 28 29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 32 33 /* 34 * We limit the freeing of debug objects via workqueue at a maximum 35 * frequency of 10Hz and about 1024 objects for each freeing operation. 36 * So it is freeing at most 10k debug objects per second. 37 */ 38 #define ODEBUG_FREE_WORK_MAX 1024 39 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) 40 41 struct debug_bucket { 42 struct hlist_head list; 43 raw_spinlock_t lock; 44 }; 45 46 /* 47 * Debug object percpu free list 48 * Access is protected by disabling irq 49 */ 50 struct debug_percpu_free { 51 struct hlist_head free_objs; 52 int obj_free; 53 }; 54 55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool); 56 57 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 58 59 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 60 61 static DEFINE_RAW_SPINLOCK(pool_lock); 62 63 static HLIST_HEAD(obj_pool); 64 static HLIST_HEAD(obj_to_free); 65 66 /* 67 * Because of the presence of percpu free pools, obj_pool_free will 68 * under-count those in the percpu free pools. Similarly, obj_pool_used 69 * will over-count those in the percpu free pools. Adjustments will be 70 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used 71 * can be off. 72 */ 73 static int obj_pool_min_free = ODEBUG_POOL_SIZE; 74 static int obj_pool_free = ODEBUG_POOL_SIZE; 75 static int obj_pool_used; 76 static int obj_pool_max_used; 77 static bool obj_freeing; 78 /* The number of objs on the global free list */ 79 static int obj_nr_tofree; 80 81 static int debug_objects_maxchain __read_mostly; 82 static int __maybe_unused debug_objects_maxchecked __read_mostly; 83 static int debug_objects_fixups __read_mostly; 84 static int debug_objects_warnings __read_mostly; 85 static int debug_objects_enabled __read_mostly 86 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 87 static int debug_objects_pool_size __read_mostly 88 = ODEBUG_POOL_SIZE; 89 static int debug_objects_pool_min_level __read_mostly 90 = ODEBUG_POOL_MIN_LEVEL; 91 static const struct debug_obj_descr *descr_test __read_mostly; 92 static struct kmem_cache *obj_cache __read_mostly; 93 94 /* 95 * Track numbers of kmem_cache_alloc()/free() calls done. 96 */ 97 static int debug_objects_allocated; 98 static int debug_objects_freed; 99 100 static void free_obj_work(struct work_struct *work); 101 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); 102 103 static int __init enable_object_debug(char *str) 104 { 105 debug_objects_enabled = 1; 106 return 0; 107 } 108 109 static int __init disable_object_debug(char *str) 110 { 111 debug_objects_enabled = 0; 112 return 0; 113 } 114 115 early_param("debug_objects", enable_object_debug); 116 early_param("no_debug_objects", disable_object_debug); 117 118 static const char *obj_states[ODEBUG_STATE_MAX] = { 119 [ODEBUG_STATE_NONE] = "none", 120 [ODEBUG_STATE_INIT] = "initialized", 121 [ODEBUG_STATE_INACTIVE] = "inactive", 122 [ODEBUG_STATE_ACTIVE] = "active", 123 [ODEBUG_STATE_DESTROYED] = "destroyed", 124 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 125 }; 126 127 static void fill_pool(void) 128 { 129 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN; 130 struct debug_obj *obj; 131 unsigned long flags; 132 133 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level)) 134 return; 135 136 /* 137 * Reuse objs from the global free list; they will be reinitialized 138 * when allocating. 139 * 140 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the 141 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical 142 * sections. 143 */ 144 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { 145 raw_spin_lock_irqsave(&pool_lock, flags); 146 /* 147 * Recheck with the lock held as the worker thread might have 148 * won the race and freed the global free list already. 149 */ 150 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { 151 obj = hlist_entry(obj_to_free.first, typeof(*obj), node); 152 hlist_del(&obj->node); 153 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); 154 hlist_add_head(&obj->node, &obj_pool); 155 WRITE_ONCE(obj_pool_free, obj_pool_free + 1); 156 } 157 raw_spin_unlock_irqrestore(&pool_lock, flags); 158 } 159 160 if (unlikely(!obj_cache)) 161 return; 162 163 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { 164 struct debug_obj *new[ODEBUG_BATCH_SIZE]; 165 int cnt; 166 167 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { 168 new[cnt] = kmem_cache_zalloc(obj_cache, gfp); 169 if (!new[cnt]) 170 break; 171 } 172 if (!cnt) 173 return; 174 175 raw_spin_lock_irqsave(&pool_lock, flags); 176 while (cnt) { 177 hlist_add_head(&new[--cnt]->node, &obj_pool); 178 debug_objects_allocated++; 179 WRITE_ONCE(obj_pool_free, obj_pool_free + 1); 180 } 181 raw_spin_unlock_irqrestore(&pool_lock, flags); 182 } 183 } 184 185 /* 186 * Lookup an object in the hash bucket. 187 */ 188 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 189 { 190 struct debug_obj *obj; 191 int cnt = 0; 192 193 hlist_for_each_entry(obj, &b->list, node) { 194 cnt++; 195 if (obj->object == addr) 196 return obj; 197 } 198 if (cnt > debug_objects_maxchain) 199 debug_objects_maxchain = cnt; 200 201 return NULL; 202 } 203 204 /* 205 * Allocate a new object from the hlist 206 */ 207 static struct debug_obj *__alloc_object(struct hlist_head *list) 208 { 209 struct debug_obj *obj = NULL; 210 211 if (list->first) { 212 obj = hlist_entry(list->first, typeof(*obj), node); 213 hlist_del(&obj->node); 214 } 215 216 return obj; 217 } 218 219 static struct debug_obj * 220 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr) 221 { 222 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool); 223 struct debug_obj *obj; 224 225 if (likely(obj_cache)) { 226 obj = __alloc_object(&percpu_pool->free_objs); 227 if (obj) { 228 percpu_pool->obj_free--; 229 goto init_obj; 230 } 231 } 232 233 raw_spin_lock(&pool_lock); 234 obj = __alloc_object(&obj_pool); 235 if (obj) { 236 obj_pool_used++; 237 WRITE_ONCE(obj_pool_free, obj_pool_free - 1); 238 239 /* 240 * Looking ahead, allocate one batch of debug objects and 241 * put them into the percpu free pool. 242 */ 243 if (likely(obj_cache)) { 244 int i; 245 246 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { 247 struct debug_obj *obj2; 248 249 obj2 = __alloc_object(&obj_pool); 250 if (!obj2) 251 break; 252 hlist_add_head(&obj2->node, 253 &percpu_pool->free_objs); 254 percpu_pool->obj_free++; 255 obj_pool_used++; 256 WRITE_ONCE(obj_pool_free, obj_pool_free - 1); 257 } 258 } 259 260 if (obj_pool_used > obj_pool_max_used) 261 obj_pool_max_used = obj_pool_used; 262 263 if (obj_pool_free < obj_pool_min_free) 264 obj_pool_min_free = obj_pool_free; 265 } 266 raw_spin_unlock(&pool_lock); 267 268 init_obj: 269 if (obj) { 270 obj->object = addr; 271 obj->descr = descr; 272 obj->state = ODEBUG_STATE_NONE; 273 obj->astate = 0; 274 hlist_add_head(&obj->node, &b->list); 275 } 276 return obj; 277 } 278 279 /* 280 * workqueue function to free objects. 281 * 282 * To reduce contention on the global pool_lock, the actual freeing of 283 * debug objects will be delayed if the pool_lock is busy. 284 */ 285 static void free_obj_work(struct work_struct *work) 286 { 287 struct hlist_node *tmp; 288 struct debug_obj *obj; 289 unsigned long flags; 290 HLIST_HEAD(tofree); 291 292 WRITE_ONCE(obj_freeing, false); 293 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 294 return; 295 296 if (obj_pool_free >= debug_objects_pool_size) 297 goto free_objs; 298 299 /* 300 * The objs on the pool list might be allocated before the work is 301 * run, so recheck if pool list it full or not, if not fill pool 302 * list from the global free list. As it is likely that a workload 303 * may be gearing up to use more and more objects, don't free any 304 * of them until the next round. 305 */ 306 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { 307 obj = hlist_entry(obj_to_free.first, typeof(*obj), node); 308 hlist_del(&obj->node); 309 hlist_add_head(&obj->node, &obj_pool); 310 WRITE_ONCE(obj_pool_free, obj_pool_free + 1); 311 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); 312 } 313 raw_spin_unlock_irqrestore(&pool_lock, flags); 314 return; 315 316 free_objs: 317 /* 318 * Pool list is already full and there are still objs on the free 319 * list. Move remaining free objs to a temporary list to free the 320 * memory outside the pool_lock held region. 321 */ 322 if (obj_nr_tofree) { 323 hlist_move_list(&obj_to_free, &tofree); 324 debug_objects_freed += obj_nr_tofree; 325 WRITE_ONCE(obj_nr_tofree, 0); 326 } 327 raw_spin_unlock_irqrestore(&pool_lock, flags); 328 329 hlist_for_each_entry_safe(obj, tmp, &tofree, node) { 330 hlist_del(&obj->node); 331 kmem_cache_free(obj_cache, obj); 332 } 333 } 334 335 static void __free_object(struct debug_obj *obj) 336 { 337 struct debug_obj *objs[ODEBUG_BATCH_SIZE]; 338 struct debug_percpu_free *percpu_pool; 339 int lookahead_count = 0; 340 unsigned long flags; 341 bool work; 342 343 local_irq_save(flags); 344 if (!obj_cache) 345 goto free_to_obj_pool; 346 347 /* 348 * Try to free it into the percpu pool first. 349 */ 350 percpu_pool = this_cpu_ptr(&percpu_obj_pool); 351 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) { 352 hlist_add_head(&obj->node, &percpu_pool->free_objs); 353 percpu_pool->obj_free++; 354 local_irq_restore(flags); 355 return; 356 } 357 358 /* 359 * As the percpu pool is full, look ahead and pull out a batch 360 * of objects from the percpu pool and free them as well. 361 */ 362 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) { 363 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs); 364 if (!objs[lookahead_count]) 365 break; 366 percpu_pool->obj_free--; 367 } 368 369 free_to_obj_pool: 370 raw_spin_lock(&pool_lock); 371 work = (obj_pool_free > debug_objects_pool_size) && obj_cache && 372 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX); 373 obj_pool_used--; 374 375 if (work) { 376 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); 377 hlist_add_head(&obj->node, &obj_to_free); 378 if (lookahead_count) { 379 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count); 380 obj_pool_used -= lookahead_count; 381 while (lookahead_count) { 382 hlist_add_head(&objs[--lookahead_count]->node, 383 &obj_to_free); 384 } 385 } 386 387 if ((obj_pool_free > debug_objects_pool_size) && 388 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) { 389 int i; 390 391 /* 392 * Free one more batch of objects from obj_pool. 393 */ 394 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { 395 obj = __alloc_object(&obj_pool); 396 hlist_add_head(&obj->node, &obj_to_free); 397 WRITE_ONCE(obj_pool_free, obj_pool_free - 1); 398 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); 399 } 400 } 401 } else { 402 WRITE_ONCE(obj_pool_free, obj_pool_free + 1); 403 hlist_add_head(&obj->node, &obj_pool); 404 if (lookahead_count) { 405 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count); 406 obj_pool_used -= lookahead_count; 407 while (lookahead_count) { 408 hlist_add_head(&objs[--lookahead_count]->node, 409 &obj_pool); 410 } 411 } 412 } 413 raw_spin_unlock(&pool_lock); 414 local_irq_restore(flags); 415 } 416 417 /* 418 * Put the object back into the pool and schedule work to free objects 419 * if necessary. 420 */ 421 static void free_object(struct debug_obj *obj) 422 { 423 __free_object(obj); 424 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { 425 WRITE_ONCE(obj_freeing, true); 426 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); 427 } 428 } 429 430 #ifdef CONFIG_HOTPLUG_CPU 431 static int object_cpu_offline(unsigned int cpu) 432 { 433 struct debug_percpu_free *percpu_pool; 434 struct hlist_node *tmp; 435 struct debug_obj *obj; 436 unsigned long flags; 437 438 /* Remote access is safe as the CPU is dead already */ 439 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); 440 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) { 441 hlist_del(&obj->node); 442 kmem_cache_free(obj_cache, obj); 443 } 444 445 raw_spin_lock_irqsave(&pool_lock, flags); 446 obj_pool_used -= percpu_pool->obj_free; 447 debug_objects_freed += percpu_pool->obj_free; 448 raw_spin_unlock_irqrestore(&pool_lock, flags); 449 450 percpu_pool->obj_free = 0; 451 452 return 0; 453 } 454 #endif 455 456 /* 457 * We run out of memory. That means we probably have tons of objects 458 * allocated. 459 */ 460 static void debug_objects_oom(void) 461 { 462 struct debug_bucket *db = obj_hash; 463 struct hlist_node *tmp; 464 HLIST_HEAD(freelist); 465 struct debug_obj *obj; 466 unsigned long flags; 467 int i; 468 469 pr_warn("Out of memory. ODEBUG disabled\n"); 470 471 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 472 raw_spin_lock_irqsave(&db->lock, flags); 473 hlist_move_list(&db->list, &freelist); 474 raw_spin_unlock_irqrestore(&db->lock, flags); 475 476 /* Now free them */ 477 hlist_for_each_entry_safe(obj, tmp, &freelist, node) { 478 hlist_del(&obj->node); 479 free_object(obj); 480 } 481 } 482 } 483 484 /* 485 * We use the pfn of the address for the hash. That way we can check 486 * for freed objects simply by checking the affected bucket. 487 */ 488 static struct debug_bucket *get_bucket(unsigned long addr) 489 { 490 unsigned long hash; 491 492 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 493 return &obj_hash[hash]; 494 } 495 496 static void debug_print_object(struct debug_obj *obj, char *msg) 497 { 498 const struct debug_obj_descr *descr = obj->descr; 499 static int limit; 500 501 /* 502 * Don't report if lookup_object_or_alloc() by the current thread 503 * failed because lookup_object_or_alloc()/debug_objects_oom() by a 504 * concurrent thread turned off debug_objects_enabled and cleared 505 * the hash buckets. 506 */ 507 if (!debug_objects_enabled) 508 return; 509 510 if (limit < 5 && descr != descr_test) { 511 void *hint = descr->debug_hint ? 512 descr->debug_hint(obj->object) : NULL; 513 limit++; 514 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 515 "object: %p object type: %s hint: %pS\n", 516 msg, obj_states[obj->state], obj->astate, 517 obj->object, descr->name, hint); 518 } 519 debug_objects_warnings++; 520 } 521 522 /* 523 * Try to repair the damage, so we have a better chance to get useful 524 * debug output. 525 */ 526 static bool 527 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), 528 void * addr, enum debug_obj_state state) 529 { 530 if (fixup && fixup(addr, state)) { 531 debug_objects_fixups++; 532 return true; 533 } 534 return false; 535 } 536 537 static void debug_object_is_on_stack(void *addr, int onstack) 538 { 539 int is_on_stack; 540 static int limit; 541 542 if (limit > 4) 543 return; 544 545 is_on_stack = object_is_on_stack(addr); 546 if (is_on_stack == onstack) 547 return; 548 549 limit++; 550 if (is_on_stack) 551 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, 552 task_stack_page(current)); 553 else 554 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, 555 task_stack_page(current)); 556 557 WARN_ON(1); 558 } 559 560 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, 561 const struct debug_obj_descr *descr, 562 bool onstack, bool alloc_ifstatic) 563 { 564 struct debug_obj *obj = lookup_object(addr, b); 565 enum debug_obj_state state = ODEBUG_STATE_NONE; 566 567 if (likely(obj)) 568 return obj; 569 570 /* 571 * debug_object_init() unconditionally allocates untracked 572 * objects. It does not matter whether it is a static object or 573 * not. 574 * 575 * debug_object_assert_init() and debug_object_activate() allow 576 * allocation only if the descriptor callback confirms that the 577 * object is static and considered initialized. For non-static 578 * objects the allocation needs to be done from the fixup callback. 579 */ 580 if (unlikely(alloc_ifstatic)) { 581 if (!descr->is_static_object || !descr->is_static_object(addr)) 582 return ERR_PTR(-ENOENT); 583 /* Statically allocated objects are considered initialized */ 584 state = ODEBUG_STATE_INIT; 585 } 586 587 obj = alloc_object(addr, b, descr); 588 if (likely(obj)) { 589 obj->state = state; 590 debug_object_is_on_stack(addr, onstack); 591 return obj; 592 } 593 594 /* Out of memory. Do the cleanup outside of the locked region */ 595 debug_objects_enabled = 0; 596 return NULL; 597 } 598 599 static void debug_objects_fill_pool(void) 600 { 601 /* 602 * On RT enabled kernels the pool refill must happen in preemptible 603 * context -- for !RT kernels we rely on the fact that spinlock_t and 604 * raw_spinlock_t are basically the same type and this lock-type 605 * inversion works just fine. 606 */ 607 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { 608 /* 609 * Annotate away the spinlock_t inside raw_spinlock_t warning 610 * by temporarily raising the wait-type to WAIT_SLEEP, matching 611 * the preemptible() condition above. 612 */ 613 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); 614 lock_map_acquire_try(&fill_pool_map); 615 fill_pool(); 616 lock_map_release(&fill_pool_map); 617 } 618 } 619 620 static void 621 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) 622 { 623 enum debug_obj_state state; 624 struct debug_bucket *db; 625 struct debug_obj *obj; 626 unsigned long flags; 627 628 debug_objects_fill_pool(); 629 630 db = get_bucket((unsigned long) addr); 631 632 raw_spin_lock_irqsave(&db->lock, flags); 633 634 obj = lookup_object_or_alloc(addr, db, descr, onstack, false); 635 if (unlikely(!obj)) { 636 raw_spin_unlock_irqrestore(&db->lock, flags); 637 debug_objects_oom(); 638 return; 639 } 640 641 switch (obj->state) { 642 case ODEBUG_STATE_NONE: 643 case ODEBUG_STATE_INIT: 644 case ODEBUG_STATE_INACTIVE: 645 obj->state = ODEBUG_STATE_INIT; 646 break; 647 648 case ODEBUG_STATE_ACTIVE: 649 state = obj->state; 650 raw_spin_unlock_irqrestore(&db->lock, flags); 651 debug_print_object(obj, "init"); 652 debug_object_fixup(descr->fixup_init, addr, state); 653 return; 654 655 case ODEBUG_STATE_DESTROYED: 656 raw_spin_unlock_irqrestore(&db->lock, flags); 657 debug_print_object(obj, "init"); 658 return; 659 default: 660 break; 661 } 662 663 raw_spin_unlock_irqrestore(&db->lock, flags); 664 } 665 666 /** 667 * debug_object_init - debug checks when an object is initialized 668 * @addr: address of the object 669 * @descr: pointer to an object specific debug description structure 670 */ 671 void debug_object_init(void *addr, const struct debug_obj_descr *descr) 672 { 673 if (!debug_objects_enabled) 674 return; 675 676 __debug_object_init(addr, descr, 0); 677 } 678 EXPORT_SYMBOL_GPL(debug_object_init); 679 680 /** 681 * debug_object_init_on_stack - debug checks when an object on stack is 682 * initialized 683 * @addr: address of the object 684 * @descr: pointer to an object specific debug description structure 685 */ 686 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) 687 { 688 if (!debug_objects_enabled) 689 return; 690 691 __debug_object_init(addr, descr, 1); 692 } 693 EXPORT_SYMBOL_GPL(debug_object_init_on_stack); 694 695 /** 696 * debug_object_activate - debug checks when an object is activated 697 * @addr: address of the object 698 * @descr: pointer to an object specific debug description structure 699 * Returns 0 for success, -EINVAL for check failed. 700 */ 701 int debug_object_activate(void *addr, const struct debug_obj_descr *descr) 702 { 703 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 704 enum debug_obj_state state; 705 struct debug_bucket *db; 706 struct debug_obj *obj; 707 unsigned long flags; 708 int ret; 709 710 if (!debug_objects_enabled) 711 return 0; 712 713 debug_objects_fill_pool(); 714 715 db = get_bucket((unsigned long) addr); 716 717 raw_spin_lock_irqsave(&db->lock, flags); 718 719 obj = lookup_object_or_alloc(addr, db, descr, false, true); 720 if (likely(!IS_ERR_OR_NULL(obj))) { 721 bool print_object = false; 722 723 switch (obj->state) { 724 case ODEBUG_STATE_INIT: 725 case ODEBUG_STATE_INACTIVE: 726 obj->state = ODEBUG_STATE_ACTIVE; 727 ret = 0; 728 break; 729 730 case ODEBUG_STATE_ACTIVE: 731 state = obj->state; 732 raw_spin_unlock_irqrestore(&db->lock, flags); 733 debug_print_object(obj, "activate"); 734 ret = debug_object_fixup(descr->fixup_activate, addr, state); 735 return ret ? 0 : -EINVAL; 736 737 case ODEBUG_STATE_DESTROYED: 738 print_object = true; 739 ret = -EINVAL; 740 break; 741 default: 742 ret = 0; 743 break; 744 } 745 raw_spin_unlock_irqrestore(&db->lock, flags); 746 if (print_object) 747 debug_print_object(obj, "activate"); 748 return ret; 749 } 750 751 raw_spin_unlock_irqrestore(&db->lock, flags); 752 753 /* If NULL the allocation has hit OOM */ 754 if (!obj) { 755 debug_objects_oom(); 756 return 0; 757 } 758 759 /* Object is neither static nor tracked. It's not initialized */ 760 debug_print_object(&o, "activate"); 761 ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE); 762 return ret ? 0 : -EINVAL; 763 } 764 EXPORT_SYMBOL_GPL(debug_object_activate); 765 766 /** 767 * debug_object_deactivate - debug checks when an object is deactivated 768 * @addr: address of the object 769 * @descr: pointer to an object specific debug description structure 770 */ 771 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) 772 { 773 struct debug_bucket *db; 774 struct debug_obj *obj; 775 unsigned long flags; 776 bool print_object = false; 777 778 if (!debug_objects_enabled) 779 return; 780 781 db = get_bucket((unsigned long) addr); 782 783 raw_spin_lock_irqsave(&db->lock, flags); 784 785 obj = lookup_object(addr, db); 786 if (obj) { 787 switch (obj->state) { 788 case ODEBUG_STATE_INIT: 789 case ODEBUG_STATE_INACTIVE: 790 case ODEBUG_STATE_ACTIVE: 791 if (!obj->astate) 792 obj->state = ODEBUG_STATE_INACTIVE; 793 else 794 print_object = true; 795 break; 796 797 case ODEBUG_STATE_DESTROYED: 798 print_object = true; 799 break; 800 default: 801 break; 802 } 803 } 804 805 raw_spin_unlock_irqrestore(&db->lock, flags); 806 if (!obj) { 807 struct debug_obj o = { .object = addr, 808 .state = ODEBUG_STATE_NOTAVAILABLE, 809 .descr = descr }; 810 811 debug_print_object(&o, "deactivate"); 812 } else if (print_object) { 813 debug_print_object(obj, "deactivate"); 814 } 815 } 816 EXPORT_SYMBOL_GPL(debug_object_deactivate); 817 818 /** 819 * debug_object_destroy - debug checks when an object is destroyed 820 * @addr: address of the object 821 * @descr: pointer to an object specific debug description structure 822 */ 823 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) 824 { 825 enum debug_obj_state state; 826 struct debug_bucket *db; 827 struct debug_obj *obj; 828 unsigned long flags; 829 bool print_object = false; 830 831 if (!debug_objects_enabled) 832 return; 833 834 db = get_bucket((unsigned long) addr); 835 836 raw_spin_lock_irqsave(&db->lock, flags); 837 838 obj = lookup_object(addr, db); 839 if (!obj) 840 goto out_unlock; 841 842 switch (obj->state) { 843 case ODEBUG_STATE_NONE: 844 case ODEBUG_STATE_INIT: 845 case ODEBUG_STATE_INACTIVE: 846 obj->state = ODEBUG_STATE_DESTROYED; 847 break; 848 case ODEBUG_STATE_ACTIVE: 849 state = obj->state; 850 raw_spin_unlock_irqrestore(&db->lock, flags); 851 debug_print_object(obj, "destroy"); 852 debug_object_fixup(descr->fixup_destroy, addr, state); 853 return; 854 855 case ODEBUG_STATE_DESTROYED: 856 print_object = true; 857 break; 858 default: 859 break; 860 } 861 out_unlock: 862 raw_spin_unlock_irqrestore(&db->lock, flags); 863 if (print_object) 864 debug_print_object(obj, "destroy"); 865 } 866 EXPORT_SYMBOL_GPL(debug_object_destroy); 867 868 /** 869 * debug_object_free - debug checks when an object is freed 870 * @addr: address of the object 871 * @descr: pointer to an object specific debug description structure 872 */ 873 void debug_object_free(void *addr, const struct debug_obj_descr *descr) 874 { 875 enum debug_obj_state state; 876 struct debug_bucket *db; 877 struct debug_obj *obj; 878 unsigned long flags; 879 880 if (!debug_objects_enabled) 881 return; 882 883 db = get_bucket((unsigned long) addr); 884 885 raw_spin_lock_irqsave(&db->lock, flags); 886 887 obj = lookup_object(addr, db); 888 if (!obj) 889 goto out_unlock; 890 891 switch (obj->state) { 892 case ODEBUG_STATE_ACTIVE: 893 state = obj->state; 894 raw_spin_unlock_irqrestore(&db->lock, flags); 895 debug_print_object(obj, "free"); 896 debug_object_fixup(descr->fixup_free, addr, state); 897 return; 898 default: 899 hlist_del(&obj->node); 900 raw_spin_unlock_irqrestore(&db->lock, flags); 901 free_object(obj); 902 return; 903 } 904 out_unlock: 905 raw_spin_unlock_irqrestore(&db->lock, flags); 906 } 907 EXPORT_SYMBOL_GPL(debug_object_free); 908 909 /** 910 * debug_object_assert_init - debug checks when object should be init-ed 911 * @addr: address of the object 912 * @descr: pointer to an object specific debug description structure 913 */ 914 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) 915 { 916 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 917 struct debug_bucket *db; 918 struct debug_obj *obj; 919 unsigned long flags; 920 921 if (!debug_objects_enabled) 922 return; 923 924 debug_objects_fill_pool(); 925 926 db = get_bucket((unsigned long) addr); 927 928 raw_spin_lock_irqsave(&db->lock, flags); 929 obj = lookup_object_or_alloc(addr, db, descr, false, true); 930 raw_spin_unlock_irqrestore(&db->lock, flags); 931 if (likely(!IS_ERR_OR_NULL(obj))) 932 return; 933 934 /* If NULL the allocation has hit OOM */ 935 if (!obj) { 936 debug_objects_oom(); 937 return; 938 } 939 940 /* Object is neither tracked nor static. It's not initialized. */ 941 debug_print_object(&o, "assert_init"); 942 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); 943 } 944 EXPORT_SYMBOL_GPL(debug_object_assert_init); 945 946 /** 947 * debug_object_active_state - debug checks object usage state machine 948 * @addr: address of the object 949 * @descr: pointer to an object specific debug description structure 950 * @expect: expected state 951 * @next: state to move to if expected state is found 952 */ 953 void 954 debug_object_active_state(void *addr, const struct debug_obj_descr *descr, 955 unsigned int expect, unsigned int next) 956 { 957 struct debug_bucket *db; 958 struct debug_obj *obj; 959 unsigned long flags; 960 bool print_object = false; 961 962 if (!debug_objects_enabled) 963 return; 964 965 db = get_bucket((unsigned long) addr); 966 967 raw_spin_lock_irqsave(&db->lock, flags); 968 969 obj = lookup_object(addr, db); 970 if (obj) { 971 switch (obj->state) { 972 case ODEBUG_STATE_ACTIVE: 973 if (obj->astate == expect) 974 obj->astate = next; 975 else 976 print_object = true; 977 break; 978 979 default: 980 print_object = true; 981 break; 982 } 983 } 984 985 raw_spin_unlock_irqrestore(&db->lock, flags); 986 if (!obj) { 987 struct debug_obj o = { .object = addr, 988 .state = ODEBUG_STATE_NOTAVAILABLE, 989 .descr = descr }; 990 991 debug_print_object(&o, "active_state"); 992 } else if (print_object) { 993 debug_print_object(obj, "active_state"); 994 } 995 } 996 EXPORT_SYMBOL_GPL(debug_object_active_state); 997 998 #ifdef CONFIG_DEBUG_OBJECTS_FREE 999 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 1000 { 1001 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 1002 const struct debug_obj_descr *descr; 1003 enum debug_obj_state state; 1004 struct debug_bucket *db; 1005 struct hlist_node *tmp; 1006 struct debug_obj *obj; 1007 int cnt, objs_checked = 0; 1008 1009 saddr = (unsigned long) address; 1010 eaddr = saddr + size; 1011 paddr = saddr & ODEBUG_CHUNK_MASK; 1012 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 1013 chunks >>= ODEBUG_CHUNK_SHIFT; 1014 1015 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 1016 db = get_bucket(paddr); 1017 1018 repeat: 1019 cnt = 0; 1020 raw_spin_lock_irqsave(&db->lock, flags); 1021 hlist_for_each_entry_safe(obj, tmp, &db->list, node) { 1022 cnt++; 1023 oaddr = (unsigned long) obj->object; 1024 if (oaddr < saddr || oaddr >= eaddr) 1025 continue; 1026 1027 switch (obj->state) { 1028 case ODEBUG_STATE_ACTIVE: 1029 descr = obj->descr; 1030 state = obj->state; 1031 raw_spin_unlock_irqrestore(&db->lock, flags); 1032 debug_print_object(obj, "free"); 1033 debug_object_fixup(descr->fixup_free, 1034 (void *) oaddr, state); 1035 goto repeat; 1036 default: 1037 hlist_del(&obj->node); 1038 __free_object(obj); 1039 break; 1040 } 1041 } 1042 raw_spin_unlock_irqrestore(&db->lock, flags); 1043 1044 if (cnt > debug_objects_maxchain) 1045 debug_objects_maxchain = cnt; 1046 1047 objs_checked += cnt; 1048 } 1049 1050 if (objs_checked > debug_objects_maxchecked) 1051 debug_objects_maxchecked = objs_checked; 1052 1053 /* Schedule work to actually kmem_cache_free() objects */ 1054 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { 1055 WRITE_ONCE(obj_freeing, true); 1056 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); 1057 } 1058 } 1059 1060 void debug_check_no_obj_freed(const void *address, unsigned long size) 1061 { 1062 if (debug_objects_enabled) 1063 __debug_check_no_obj_freed(address, size); 1064 } 1065 #endif 1066 1067 #ifdef CONFIG_DEBUG_FS 1068 1069 static int debug_stats_show(struct seq_file *m, void *v) 1070 { 1071 int cpu, obj_percpu_free = 0; 1072 1073 for_each_possible_cpu(cpu) 1074 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); 1075 1076 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 1077 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); 1078 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 1079 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 1080 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free); 1081 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); 1082 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 1083 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); 1084 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 1085 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree)); 1086 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 1087 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 1088 return 0; 1089 } 1090 DEFINE_SHOW_ATTRIBUTE(debug_stats); 1091 1092 static int __init debug_objects_init_debugfs(void) 1093 { 1094 struct dentry *dbgdir; 1095 1096 if (!debug_objects_enabled) 1097 return 0; 1098 1099 dbgdir = debugfs_create_dir("debug_objects", NULL); 1100 1101 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); 1102 1103 return 0; 1104 } 1105 __initcall(debug_objects_init_debugfs); 1106 1107 #else 1108 static inline void debug_objects_init_debugfs(void) { } 1109 #endif 1110 1111 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 1112 1113 /* Random data structure for the self test */ 1114 struct self_test { 1115 unsigned long dummy1[6]; 1116 int static_init; 1117 unsigned long dummy2[3]; 1118 }; 1119 1120 static __initconst const struct debug_obj_descr descr_type_test; 1121 1122 static bool __init is_static_object(void *addr) 1123 { 1124 struct self_test *obj = addr; 1125 1126 return obj->static_init; 1127 } 1128 1129 /* 1130 * fixup_init is called when: 1131 * - an active object is initialized 1132 */ 1133 static bool __init fixup_init(void *addr, enum debug_obj_state state) 1134 { 1135 struct self_test *obj = addr; 1136 1137 switch (state) { 1138 case ODEBUG_STATE_ACTIVE: 1139 debug_object_deactivate(obj, &descr_type_test); 1140 debug_object_init(obj, &descr_type_test); 1141 return true; 1142 default: 1143 return false; 1144 } 1145 } 1146 1147 /* 1148 * fixup_activate is called when: 1149 * - an active object is activated 1150 * - an unknown non-static object is activated 1151 */ 1152 static bool __init fixup_activate(void *addr, enum debug_obj_state state) 1153 { 1154 struct self_test *obj = addr; 1155 1156 switch (state) { 1157 case ODEBUG_STATE_NOTAVAILABLE: 1158 return true; 1159 case ODEBUG_STATE_ACTIVE: 1160 debug_object_deactivate(obj, &descr_type_test); 1161 debug_object_activate(obj, &descr_type_test); 1162 return true; 1163 1164 default: 1165 return false; 1166 } 1167 } 1168 1169 /* 1170 * fixup_destroy is called when: 1171 * - an active object is destroyed 1172 */ 1173 static bool __init fixup_destroy(void *addr, enum debug_obj_state state) 1174 { 1175 struct self_test *obj = addr; 1176 1177 switch (state) { 1178 case ODEBUG_STATE_ACTIVE: 1179 debug_object_deactivate(obj, &descr_type_test); 1180 debug_object_destroy(obj, &descr_type_test); 1181 return true; 1182 default: 1183 return false; 1184 } 1185 } 1186 1187 /* 1188 * fixup_free is called when: 1189 * - an active object is freed 1190 */ 1191 static bool __init fixup_free(void *addr, enum debug_obj_state state) 1192 { 1193 struct self_test *obj = addr; 1194 1195 switch (state) { 1196 case ODEBUG_STATE_ACTIVE: 1197 debug_object_deactivate(obj, &descr_type_test); 1198 debug_object_free(obj, &descr_type_test); 1199 return true; 1200 default: 1201 return false; 1202 } 1203 } 1204 1205 static int __init 1206 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 1207 { 1208 struct debug_bucket *db; 1209 struct debug_obj *obj; 1210 unsigned long flags; 1211 int res = -EINVAL; 1212 1213 db = get_bucket((unsigned long) addr); 1214 1215 raw_spin_lock_irqsave(&db->lock, flags); 1216 1217 obj = lookup_object(addr, db); 1218 if (!obj && state != ODEBUG_STATE_NONE) { 1219 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 1220 goto out; 1221 } 1222 if (obj && obj->state != state) { 1223 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 1224 obj->state, state); 1225 goto out; 1226 } 1227 if (fixups != debug_objects_fixups) { 1228 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 1229 fixups, debug_objects_fixups); 1230 goto out; 1231 } 1232 if (warnings != debug_objects_warnings) { 1233 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 1234 warnings, debug_objects_warnings); 1235 goto out; 1236 } 1237 res = 0; 1238 out: 1239 raw_spin_unlock_irqrestore(&db->lock, flags); 1240 if (res) 1241 debug_objects_enabled = 0; 1242 return res; 1243 } 1244 1245 static __initconst const struct debug_obj_descr descr_type_test = { 1246 .name = "selftest", 1247 .is_static_object = is_static_object, 1248 .fixup_init = fixup_init, 1249 .fixup_activate = fixup_activate, 1250 .fixup_destroy = fixup_destroy, 1251 .fixup_free = fixup_free, 1252 }; 1253 1254 static __initdata struct self_test obj = { .static_init = 0 }; 1255 1256 static void __init debug_objects_selftest(void) 1257 { 1258 int fixups, oldfixups, warnings, oldwarnings; 1259 unsigned long flags; 1260 1261 local_irq_save(flags); 1262 1263 fixups = oldfixups = debug_objects_fixups; 1264 warnings = oldwarnings = debug_objects_warnings; 1265 descr_test = &descr_type_test; 1266 1267 debug_object_init(&obj, &descr_type_test); 1268 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1269 goto out; 1270 debug_object_activate(&obj, &descr_type_test); 1271 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1272 goto out; 1273 debug_object_activate(&obj, &descr_type_test); 1274 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 1275 goto out; 1276 debug_object_deactivate(&obj, &descr_type_test); 1277 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 1278 goto out; 1279 debug_object_destroy(&obj, &descr_type_test); 1280 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 1281 goto out; 1282 debug_object_init(&obj, &descr_type_test); 1283 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1284 goto out; 1285 debug_object_activate(&obj, &descr_type_test); 1286 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1287 goto out; 1288 debug_object_deactivate(&obj, &descr_type_test); 1289 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1290 goto out; 1291 debug_object_free(&obj, &descr_type_test); 1292 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1293 goto out; 1294 1295 obj.static_init = 1; 1296 debug_object_activate(&obj, &descr_type_test); 1297 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1298 goto out; 1299 debug_object_init(&obj, &descr_type_test); 1300 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 1301 goto out; 1302 debug_object_free(&obj, &descr_type_test); 1303 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1304 goto out; 1305 1306 #ifdef CONFIG_DEBUG_OBJECTS_FREE 1307 debug_object_init(&obj, &descr_type_test); 1308 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1309 goto out; 1310 debug_object_activate(&obj, &descr_type_test); 1311 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1312 goto out; 1313 __debug_check_no_obj_freed(&obj, sizeof(obj)); 1314 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 1315 goto out; 1316 #endif 1317 pr_info("selftest passed\n"); 1318 1319 out: 1320 debug_objects_fixups = oldfixups; 1321 debug_objects_warnings = oldwarnings; 1322 descr_test = NULL; 1323 1324 local_irq_restore(flags); 1325 } 1326 #else 1327 static inline void debug_objects_selftest(void) { } 1328 #endif 1329 1330 /* 1331 * Called during early boot to initialize the hash buckets and link 1332 * the static object pool objects into the poll list. After this call 1333 * the object tracker is fully operational. 1334 */ 1335 void __init debug_objects_early_init(void) 1336 { 1337 int i; 1338 1339 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 1340 raw_spin_lock_init(&obj_hash[i].lock); 1341 1342 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 1343 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 1344 } 1345 1346 /* 1347 * Convert the statically allocated objects to dynamic ones: 1348 */ 1349 static int __init debug_objects_replace_static_objects(void) 1350 { 1351 struct debug_bucket *db = obj_hash; 1352 struct hlist_node *tmp; 1353 struct debug_obj *obj, *new; 1354 HLIST_HEAD(objects); 1355 int i, cnt = 0; 1356 1357 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 1358 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 1359 if (!obj) 1360 goto free; 1361 hlist_add_head(&obj->node, &objects); 1362 } 1363 1364 debug_objects_allocated += i; 1365 1366 /* 1367 * debug_objects_mem_init() is now called early that only one CPU is up 1368 * and interrupts have been disabled, so it is safe to replace the 1369 * active object references. 1370 */ 1371 1372 /* Remove the statically allocated objects from the pool */ 1373 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) 1374 hlist_del(&obj->node); 1375 /* Move the allocated objects to the pool */ 1376 hlist_move_list(&objects, &obj_pool); 1377 1378 /* Replace the active object references */ 1379 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1380 hlist_move_list(&db->list, &objects); 1381 1382 hlist_for_each_entry(obj, &objects, node) { 1383 new = hlist_entry(obj_pool.first, typeof(*obj), node); 1384 hlist_del(&new->node); 1385 /* copy object data */ 1386 *new = *obj; 1387 hlist_add_head(&new->node, &db->list); 1388 cnt++; 1389 } 1390 } 1391 1392 pr_debug("%d of %d active objects replaced\n", 1393 cnt, obj_pool_used); 1394 return 0; 1395 free: 1396 hlist_for_each_entry_safe(obj, tmp, &objects, node) { 1397 hlist_del(&obj->node); 1398 kmem_cache_free(obj_cache, obj); 1399 } 1400 return -ENOMEM; 1401 } 1402 1403 /* 1404 * Called after the kmem_caches are functional to setup a dedicated 1405 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 1406 * prevents that the debug code is called on kmem_cache_free() for the 1407 * debug tracker objects to avoid recursive calls. 1408 */ 1409 void __init debug_objects_mem_init(void) 1410 { 1411 int cpu, extras; 1412 1413 if (!debug_objects_enabled) 1414 return; 1415 1416 /* 1417 * Initialize the percpu object pools 1418 * 1419 * Initialization is not strictly necessary, but was done for 1420 * completeness. 1421 */ 1422 for_each_possible_cpu(cpu) 1423 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); 1424 1425 obj_cache = kmem_cache_create("debug_objects_cache", 1426 sizeof (struct debug_obj), 0, 1427 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, 1428 NULL); 1429 1430 if (!obj_cache || debug_objects_replace_static_objects()) { 1431 debug_objects_enabled = 0; 1432 kmem_cache_destroy(obj_cache); 1433 pr_warn("out of memory.\n"); 1434 return; 1435 } else 1436 debug_objects_selftest(); 1437 1438 #ifdef CONFIG_HOTPLUG_CPU 1439 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL, 1440 object_cpu_offline); 1441 #endif 1442 1443 /* 1444 * Increase the thresholds for allocating and freeing objects 1445 * according to the number of possible CPUs available in the system. 1446 */ 1447 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; 1448 debug_objects_pool_size += extras; 1449 debug_objects_pool_min_level += extras; 1450 } 1451