1 /* 2 * Generic infrastructure for lifetime debugging of objects. 3 * 4 * Started by Thomas Gleixner 5 * 6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 7 * 8 * For licencing details see kernel-base/COPYING 9 */ 10 11 #define pr_fmt(fmt) "ODEBUG: " fmt 12 13 #include <linux/debugobjects.h> 14 #include <linux/interrupt.h> 15 #include <linux/sched.h> 16 #include <linux/sched/task_stack.h> 17 #include <linux/seq_file.h> 18 #include <linux/debugfs.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/kmemleak.h> 22 23 #define ODEBUG_HASH_BITS 14 24 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 25 26 #define ODEBUG_POOL_SIZE 1024 27 #define ODEBUG_POOL_MIN_LEVEL 256 28 29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 32 33 struct debug_bucket { 34 struct hlist_head list; 35 raw_spinlock_t lock; 36 }; 37 38 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 39 40 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 41 42 static DEFINE_RAW_SPINLOCK(pool_lock); 43 44 static HLIST_HEAD(obj_pool); 45 46 static int obj_pool_min_free = ODEBUG_POOL_SIZE; 47 static int obj_pool_free = ODEBUG_POOL_SIZE; 48 static int obj_pool_used; 49 static int obj_pool_max_used; 50 static struct kmem_cache *obj_cache; 51 52 static int debug_objects_maxchain __read_mostly; 53 static int debug_objects_fixups __read_mostly; 54 static int debug_objects_warnings __read_mostly; 55 static int debug_objects_enabled __read_mostly 56 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 57 static int debug_objects_pool_size __read_mostly 58 = ODEBUG_POOL_SIZE; 59 static int debug_objects_pool_min_level __read_mostly 60 = ODEBUG_POOL_MIN_LEVEL; 61 static struct debug_obj_descr *descr_test __read_mostly; 62 63 /* 64 * Track numbers of kmem_cache_alloc()/free() calls done. 65 */ 66 static int debug_objects_allocated; 67 static int debug_objects_freed; 68 69 static void free_obj_work(struct work_struct *work); 70 static DECLARE_WORK(debug_obj_work, free_obj_work); 71 72 static int __init enable_object_debug(char *str) 73 { 74 debug_objects_enabled = 1; 75 return 0; 76 } 77 78 static int __init disable_object_debug(char *str) 79 { 80 debug_objects_enabled = 0; 81 return 0; 82 } 83 84 early_param("debug_objects", enable_object_debug); 85 early_param("no_debug_objects", disable_object_debug); 86 87 static const char *obj_states[ODEBUG_STATE_MAX] = { 88 [ODEBUG_STATE_NONE] = "none", 89 [ODEBUG_STATE_INIT] = "initialized", 90 [ODEBUG_STATE_INACTIVE] = "inactive", 91 [ODEBUG_STATE_ACTIVE] = "active", 92 [ODEBUG_STATE_DESTROYED] = "destroyed", 93 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 94 }; 95 96 static void fill_pool(void) 97 { 98 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 99 struct debug_obj *new; 100 unsigned long flags; 101 102 if (likely(obj_pool_free >= debug_objects_pool_min_level)) 103 return; 104 105 if (unlikely(!obj_cache)) 106 return; 107 108 while (obj_pool_free < debug_objects_pool_min_level) { 109 110 new = kmem_cache_zalloc(obj_cache, gfp); 111 if (!new) 112 return; 113 114 kmemleak_ignore(new); 115 raw_spin_lock_irqsave(&pool_lock, flags); 116 hlist_add_head(&new->node, &obj_pool); 117 debug_objects_allocated++; 118 obj_pool_free++; 119 raw_spin_unlock_irqrestore(&pool_lock, flags); 120 } 121 } 122 123 /* 124 * Lookup an object in the hash bucket. 125 */ 126 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 127 { 128 struct debug_obj *obj; 129 int cnt = 0; 130 131 hlist_for_each_entry(obj, &b->list, node) { 132 cnt++; 133 if (obj->object == addr) 134 return obj; 135 } 136 if (cnt > debug_objects_maxchain) 137 debug_objects_maxchain = cnt; 138 139 return NULL; 140 } 141 142 /* 143 * Allocate a new object. If the pool is empty, switch off the debugger. 144 * Must be called with interrupts disabled. 145 */ 146 static struct debug_obj * 147 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 148 { 149 struct debug_obj *obj = NULL; 150 151 raw_spin_lock(&pool_lock); 152 if (obj_pool.first) { 153 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 154 155 obj->object = addr; 156 obj->descr = descr; 157 obj->state = ODEBUG_STATE_NONE; 158 obj->astate = 0; 159 hlist_del(&obj->node); 160 161 hlist_add_head(&obj->node, &b->list); 162 163 obj_pool_used++; 164 if (obj_pool_used > obj_pool_max_used) 165 obj_pool_max_used = obj_pool_used; 166 167 obj_pool_free--; 168 if (obj_pool_free < obj_pool_min_free) 169 obj_pool_min_free = obj_pool_free; 170 } 171 raw_spin_unlock(&pool_lock); 172 173 return obj; 174 } 175 176 /* 177 * workqueue function to free objects. 178 * 179 * To reduce contention on the global pool_lock, the actual freeing of 180 * debug objects will be delayed if the pool_lock is busy. We also free 181 * the objects in a batch of 4 for each lock/unlock cycle. 182 */ 183 #define ODEBUG_FREE_BATCH 4 184 185 static void free_obj_work(struct work_struct *work) 186 { 187 struct debug_obj *objs[ODEBUG_FREE_BATCH]; 188 unsigned long flags; 189 int i; 190 191 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 192 return; 193 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) { 194 for (i = 0; i < ODEBUG_FREE_BATCH; i++) { 195 objs[i] = hlist_entry(obj_pool.first, 196 typeof(*objs[0]), node); 197 hlist_del(&objs[i]->node); 198 } 199 200 obj_pool_free -= ODEBUG_FREE_BATCH; 201 debug_objects_freed += ODEBUG_FREE_BATCH; 202 /* 203 * We release pool_lock across kmem_cache_free() to 204 * avoid contention on pool_lock. 205 */ 206 raw_spin_unlock_irqrestore(&pool_lock, flags); 207 for (i = 0; i < ODEBUG_FREE_BATCH; i++) 208 kmem_cache_free(obj_cache, objs[i]); 209 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 210 return; 211 } 212 raw_spin_unlock_irqrestore(&pool_lock, flags); 213 } 214 215 /* 216 * Put the object back into the pool and schedule work to free objects 217 * if necessary. 218 */ 219 static void free_object(struct debug_obj *obj) 220 { 221 unsigned long flags; 222 int sched = 0; 223 224 raw_spin_lock_irqsave(&pool_lock, flags); 225 /* 226 * schedule work when the pool is filled and the cache is 227 * initialized: 228 */ 229 if (obj_pool_free > debug_objects_pool_size && obj_cache) 230 sched = 1; 231 hlist_add_head(&obj->node, &obj_pool); 232 obj_pool_free++; 233 obj_pool_used--; 234 raw_spin_unlock_irqrestore(&pool_lock, flags); 235 if (sched) 236 schedule_work(&debug_obj_work); 237 } 238 239 /* 240 * We run out of memory. That means we probably have tons of objects 241 * allocated. 242 */ 243 static void debug_objects_oom(void) 244 { 245 struct debug_bucket *db = obj_hash; 246 struct hlist_node *tmp; 247 HLIST_HEAD(freelist); 248 struct debug_obj *obj; 249 unsigned long flags; 250 int i; 251 252 pr_warn("Out of memory. ODEBUG disabled\n"); 253 254 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 255 raw_spin_lock_irqsave(&db->lock, flags); 256 hlist_move_list(&db->list, &freelist); 257 raw_spin_unlock_irqrestore(&db->lock, flags); 258 259 /* Now free them */ 260 hlist_for_each_entry_safe(obj, tmp, &freelist, node) { 261 hlist_del(&obj->node); 262 free_object(obj); 263 } 264 } 265 } 266 267 /* 268 * We use the pfn of the address for the hash. That way we can check 269 * for freed objects simply by checking the affected bucket. 270 */ 271 static struct debug_bucket *get_bucket(unsigned long addr) 272 { 273 unsigned long hash; 274 275 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 276 return &obj_hash[hash]; 277 } 278 279 static void debug_print_object(struct debug_obj *obj, char *msg) 280 { 281 struct debug_obj_descr *descr = obj->descr; 282 static int limit; 283 284 if (limit < 5 && descr != descr_test) { 285 void *hint = descr->debug_hint ? 286 descr->debug_hint(obj->object) : NULL; 287 limit++; 288 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 289 "object type: %s hint: %pS\n", 290 msg, obj_states[obj->state], obj->astate, 291 descr->name, hint); 292 } 293 debug_objects_warnings++; 294 } 295 296 /* 297 * Try to repair the damage, so we have a better chance to get useful 298 * debug output. 299 */ 300 static bool 301 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), 302 void * addr, enum debug_obj_state state) 303 { 304 if (fixup && fixup(addr, state)) { 305 debug_objects_fixups++; 306 return true; 307 } 308 return false; 309 } 310 311 static void debug_object_is_on_stack(void *addr, int onstack) 312 { 313 int is_on_stack; 314 static int limit; 315 316 if (limit > 4) 317 return; 318 319 is_on_stack = object_is_on_stack(addr); 320 if (is_on_stack == onstack) 321 return; 322 323 limit++; 324 if (is_on_stack) 325 pr_warn("object is on stack, but not annotated\n"); 326 else 327 pr_warn("object is not on stack, but annotated\n"); 328 WARN_ON(1); 329 } 330 331 static void 332 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 333 { 334 enum debug_obj_state state; 335 struct debug_bucket *db; 336 struct debug_obj *obj; 337 unsigned long flags; 338 339 fill_pool(); 340 341 db = get_bucket((unsigned long) addr); 342 343 raw_spin_lock_irqsave(&db->lock, flags); 344 345 obj = lookup_object(addr, db); 346 if (!obj) { 347 obj = alloc_object(addr, db, descr); 348 if (!obj) { 349 debug_objects_enabled = 0; 350 raw_spin_unlock_irqrestore(&db->lock, flags); 351 debug_objects_oom(); 352 return; 353 } 354 debug_object_is_on_stack(addr, onstack); 355 } 356 357 switch (obj->state) { 358 case ODEBUG_STATE_NONE: 359 case ODEBUG_STATE_INIT: 360 case ODEBUG_STATE_INACTIVE: 361 obj->state = ODEBUG_STATE_INIT; 362 break; 363 364 case ODEBUG_STATE_ACTIVE: 365 debug_print_object(obj, "init"); 366 state = obj->state; 367 raw_spin_unlock_irqrestore(&db->lock, flags); 368 debug_object_fixup(descr->fixup_init, addr, state); 369 return; 370 371 case ODEBUG_STATE_DESTROYED: 372 debug_print_object(obj, "init"); 373 break; 374 default: 375 break; 376 } 377 378 raw_spin_unlock_irqrestore(&db->lock, flags); 379 } 380 381 /** 382 * debug_object_init - debug checks when an object is initialized 383 * @addr: address of the object 384 * @descr: pointer to an object specific debug description structure 385 */ 386 void debug_object_init(void *addr, struct debug_obj_descr *descr) 387 { 388 if (!debug_objects_enabled) 389 return; 390 391 __debug_object_init(addr, descr, 0); 392 } 393 EXPORT_SYMBOL_GPL(debug_object_init); 394 395 /** 396 * debug_object_init_on_stack - debug checks when an object on stack is 397 * initialized 398 * @addr: address of the object 399 * @descr: pointer to an object specific debug description structure 400 */ 401 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) 402 { 403 if (!debug_objects_enabled) 404 return; 405 406 __debug_object_init(addr, descr, 1); 407 } 408 EXPORT_SYMBOL_GPL(debug_object_init_on_stack); 409 410 /** 411 * debug_object_activate - debug checks when an object is activated 412 * @addr: address of the object 413 * @descr: pointer to an object specific debug description structure 414 * Returns 0 for success, -EINVAL for check failed. 415 */ 416 int debug_object_activate(void *addr, struct debug_obj_descr *descr) 417 { 418 enum debug_obj_state state; 419 struct debug_bucket *db; 420 struct debug_obj *obj; 421 unsigned long flags; 422 int ret; 423 struct debug_obj o = { .object = addr, 424 .state = ODEBUG_STATE_NOTAVAILABLE, 425 .descr = descr }; 426 427 if (!debug_objects_enabled) 428 return 0; 429 430 db = get_bucket((unsigned long) addr); 431 432 raw_spin_lock_irqsave(&db->lock, flags); 433 434 obj = lookup_object(addr, db); 435 if (obj) { 436 switch (obj->state) { 437 case ODEBUG_STATE_INIT: 438 case ODEBUG_STATE_INACTIVE: 439 obj->state = ODEBUG_STATE_ACTIVE; 440 ret = 0; 441 break; 442 443 case ODEBUG_STATE_ACTIVE: 444 debug_print_object(obj, "activate"); 445 state = obj->state; 446 raw_spin_unlock_irqrestore(&db->lock, flags); 447 ret = debug_object_fixup(descr->fixup_activate, addr, state); 448 return ret ? 0 : -EINVAL; 449 450 case ODEBUG_STATE_DESTROYED: 451 debug_print_object(obj, "activate"); 452 ret = -EINVAL; 453 break; 454 default: 455 ret = 0; 456 break; 457 } 458 raw_spin_unlock_irqrestore(&db->lock, flags); 459 return ret; 460 } 461 462 raw_spin_unlock_irqrestore(&db->lock, flags); 463 /* 464 * We are here when a static object is activated. We 465 * let the type specific code confirm whether this is 466 * true or not. if true, we just make sure that the 467 * static object is tracked in the object tracker. If 468 * not, this must be a bug, so we try to fix it up. 469 */ 470 if (descr->is_static_object && descr->is_static_object(addr)) { 471 /* track this static object */ 472 debug_object_init(addr, descr); 473 debug_object_activate(addr, descr); 474 } else { 475 debug_print_object(&o, "activate"); 476 ret = debug_object_fixup(descr->fixup_activate, addr, 477 ODEBUG_STATE_NOTAVAILABLE); 478 return ret ? 0 : -EINVAL; 479 } 480 return 0; 481 } 482 EXPORT_SYMBOL_GPL(debug_object_activate); 483 484 /** 485 * debug_object_deactivate - debug checks when an object is deactivated 486 * @addr: address of the object 487 * @descr: pointer to an object specific debug description structure 488 */ 489 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) 490 { 491 struct debug_bucket *db; 492 struct debug_obj *obj; 493 unsigned long flags; 494 495 if (!debug_objects_enabled) 496 return; 497 498 db = get_bucket((unsigned long) addr); 499 500 raw_spin_lock_irqsave(&db->lock, flags); 501 502 obj = lookup_object(addr, db); 503 if (obj) { 504 switch (obj->state) { 505 case ODEBUG_STATE_INIT: 506 case ODEBUG_STATE_INACTIVE: 507 case ODEBUG_STATE_ACTIVE: 508 if (!obj->astate) 509 obj->state = ODEBUG_STATE_INACTIVE; 510 else 511 debug_print_object(obj, "deactivate"); 512 break; 513 514 case ODEBUG_STATE_DESTROYED: 515 debug_print_object(obj, "deactivate"); 516 break; 517 default: 518 break; 519 } 520 } else { 521 struct debug_obj o = { .object = addr, 522 .state = ODEBUG_STATE_NOTAVAILABLE, 523 .descr = descr }; 524 525 debug_print_object(&o, "deactivate"); 526 } 527 528 raw_spin_unlock_irqrestore(&db->lock, flags); 529 } 530 EXPORT_SYMBOL_GPL(debug_object_deactivate); 531 532 /** 533 * debug_object_destroy - debug checks when an object is destroyed 534 * @addr: address of the object 535 * @descr: pointer to an object specific debug description structure 536 */ 537 void debug_object_destroy(void *addr, struct debug_obj_descr *descr) 538 { 539 enum debug_obj_state state; 540 struct debug_bucket *db; 541 struct debug_obj *obj; 542 unsigned long flags; 543 544 if (!debug_objects_enabled) 545 return; 546 547 db = get_bucket((unsigned long) addr); 548 549 raw_spin_lock_irqsave(&db->lock, flags); 550 551 obj = lookup_object(addr, db); 552 if (!obj) 553 goto out_unlock; 554 555 switch (obj->state) { 556 case ODEBUG_STATE_NONE: 557 case ODEBUG_STATE_INIT: 558 case ODEBUG_STATE_INACTIVE: 559 obj->state = ODEBUG_STATE_DESTROYED; 560 break; 561 case ODEBUG_STATE_ACTIVE: 562 debug_print_object(obj, "destroy"); 563 state = obj->state; 564 raw_spin_unlock_irqrestore(&db->lock, flags); 565 debug_object_fixup(descr->fixup_destroy, addr, state); 566 return; 567 568 case ODEBUG_STATE_DESTROYED: 569 debug_print_object(obj, "destroy"); 570 break; 571 default: 572 break; 573 } 574 out_unlock: 575 raw_spin_unlock_irqrestore(&db->lock, flags); 576 } 577 EXPORT_SYMBOL_GPL(debug_object_destroy); 578 579 /** 580 * debug_object_free - debug checks when an object is freed 581 * @addr: address of the object 582 * @descr: pointer to an object specific debug description structure 583 */ 584 void debug_object_free(void *addr, struct debug_obj_descr *descr) 585 { 586 enum debug_obj_state state; 587 struct debug_bucket *db; 588 struct debug_obj *obj; 589 unsigned long flags; 590 591 if (!debug_objects_enabled) 592 return; 593 594 db = get_bucket((unsigned long) addr); 595 596 raw_spin_lock_irqsave(&db->lock, flags); 597 598 obj = lookup_object(addr, db); 599 if (!obj) 600 goto out_unlock; 601 602 switch (obj->state) { 603 case ODEBUG_STATE_ACTIVE: 604 debug_print_object(obj, "free"); 605 state = obj->state; 606 raw_spin_unlock_irqrestore(&db->lock, flags); 607 debug_object_fixup(descr->fixup_free, addr, state); 608 return; 609 default: 610 hlist_del(&obj->node); 611 raw_spin_unlock_irqrestore(&db->lock, flags); 612 free_object(obj); 613 return; 614 } 615 out_unlock: 616 raw_spin_unlock_irqrestore(&db->lock, flags); 617 } 618 EXPORT_SYMBOL_GPL(debug_object_free); 619 620 /** 621 * debug_object_assert_init - debug checks when object should be init-ed 622 * @addr: address of the object 623 * @descr: pointer to an object specific debug description structure 624 */ 625 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) 626 { 627 struct debug_bucket *db; 628 struct debug_obj *obj; 629 unsigned long flags; 630 631 if (!debug_objects_enabled) 632 return; 633 634 db = get_bucket((unsigned long) addr); 635 636 raw_spin_lock_irqsave(&db->lock, flags); 637 638 obj = lookup_object(addr, db); 639 if (!obj) { 640 struct debug_obj o = { .object = addr, 641 .state = ODEBUG_STATE_NOTAVAILABLE, 642 .descr = descr }; 643 644 raw_spin_unlock_irqrestore(&db->lock, flags); 645 /* 646 * Maybe the object is static, and we let the type specific 647 * code confirm. Track this static object if true, else invoke 648 * fixup. 649 */ 650 if (descr->is_static_object && descr->is_static_object(addr)) { 651 /* Track this static object */ 652 debug_object_init(addr, descr); 653 } else { 654 debug_print_object(&o, "assert_init"); 655 debug_object_fixup(descr->fixup_assert_init, addr, 656 ODEBUG_STATE_NOTAVAILABLE); 657 } 658 return; 659 } 660 661 raw_spin_unlock_irqrestore(&db->lock, flags); 662 } 663 EXPORT_SYMBOL_GPL(debug_object_assert_init); 664 665 /** 666 * debug_object_active_state - debug checks object usage state machine 667 * @addr: address of the object 668 * @descr: pointer to an object specific debug description structure 669 * @expect: expected state 670 * @next: state to move to if expected state is found 671 */ 672 void 673 debug_object_active_state(void *addr, struct debug_obj_descr *descr, 674 unsigned int expect, unsigned int next) 675 { 676 struct debug_bucket *db; 677 struct debug_obj *obj; 678 unsigned long flags; 679 680 if (!debug_objects_enabled) 681 return; 682 683 db = get_bucket((unsigned long) addr); 684 685 raw_spin_lock_irqsave(&db->lock, flags); 686 687 obj = lookup_object(addr, db); 688 if (obj) { 689 switch (obj->state) { 690 case ODEBUG_STATE_ACTIVE: 691 if (obj->astate == expect) 692 obj->astate = next; 693 else 694 debug_print_object(obj, "active_state"); 695 break; 696 697 default: 698 debug_print_object(obj, "active_state"); 699 break; 700 } 701 } else { 702 struct debug_obj o = { .object = addr, 703 .state = ODEBUG_STATE_NOTAVAILABLE, 704 .descr = descr }; 705 706 debug_print_object(&o, "active_state"); 707 } 708 709 raw_spin_unlock_irqrestore(&db->lock, flags); 710 } 711 EXPORT_SYMBOL_GPL(debug_object_active_state); 712 713 #ifdef CONFIG_DEBUG_OBJECTS_FREE 714 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 715 { 716 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 717 struct hlist_node *tmp; 718 HLIST_HEAD(freelist); 719 struct debug_obj_descr *descr; 720 enum debug_obj_state state; 721 struct debug_bucket *db; 722 struct debug_obj *obj; 723 int cnt; 724 725 saddr = (unsigned long) address; 726 eaddr = saddr + size; 727 paddr = saddr & ODEBUG_CHUNK_MASK; 728 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 729 chunks >>= ODEBUG_CHUNK_SHIFT; 730 731 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 732 db = get_bucket(paddr); 733 734 repeat: 735 cnt = 0; 736 raw_spin_lock_irqsave(&db->lock, flags); 737 hlist_for_each_entry_safe(obj, tmp, &db->list, node) { 738 cnt++; 739 oaddr = (unsigned long) obj->object; 740 if (oaddr < saddr || oaddr >= eaddr) 741 continue; 742 743 switch (obj->state) { 744 case ODEBUG_STATE_ACTIVE: 745 debug_print_object(obj, "free"); 746 descr = obj->descr; 747 state = obj->state; 748 raw_spin_unlock_irqrestore(&db->lock, flags); 749 debug_object_fixup(descr->fixup_free, 750 (void *) oaddr, state); 751 goto repeat; 752 default: 753 hlist_del(&obj->node); 754 hlist_add_head(&obj->node, &freelist); 755 break; 756 } 757 } 758 raw_spin_unlock_irqrestore(&db->lock, flags); 759 760 /* Now free them */ 761 hlist_for_each_entry_safe(obj, tmp, &freelist, node) { 762 hlist_del(&obj->node); 763 free_object(obj); 764 } 765 766 if (cnt > debug_objects_maxchain) 767 debug_objects_maxchain = cnt; 768 } 769 } 770 771 void debug_check_no_obj_freed(const void *address, unsigned long size) 772 { 773 if (debug_objects_enabled) 774 __debug_check_no_obj_freed(address, size); 775 } 776 #endif 777 778 #ifdef CONFIG_DEBUG_FS 779 780 static int debug_stats_show(struct seq_file *m, void *v) 781 { 782 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 783 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 784 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 785 seq_printf(m, "pool_free :%d\n", obj_pool_free); 786 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 787 seq_printf(m, "pool_used :%d\n", obj_pool_used); 788 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 789 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 790 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 791 return 0; 792 } 793 794 static int debug_stats_open(struct inode *inode, struct file *filp) 795 { 796 return single_open(filp, debug_stats_show, NULL); 797 } 798 799 static const struct file_operations debug_stats_fops = { 800 .open = debug_stats_open, 801 .read = seq_read, 802 .llseek = seq_lseek, 803 .release = single_release, 804 }; 805 806 static int __init debug_objects_init_debugfs(void) 807 { 808 struct dentry *dbgdir, *dbgstats; 809 810 if (!debug_objects_enabled) 811 return 0; 812 813 dbgdir = debugfs_create_dir("debug_objects", NULL); 814 if (!dbgdir) 815 return -ENOMEM; 816 817 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 818 &debug_stats_fops); 819 if (!dbgstats) 820 goto err; 821 822 return 0; 823 824 err: 825 debugfs_remove(dbgdir); 826 827 return -ENOMEM; 828 } 829 __initcall(debug_objects_init_debugfs); 830 831 #else 832 static inline void debug_objects_init_debugfs(void) { } 833 #endif 834 835 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 836 837 /* Random data structure for the self test */ 838 struct self_test { 839 unsigned long dummy1[6]; 840 int static_init; 841 unsigned long dummy2[3]; 842 }; 843 844 static __initdata struct debug_obj_descr descr_type_test; 845 846 static bool __init is_static_object(void *addr) 847 { 848 struct self_test *obj = addr; 849 850 return obj->static_init; 851 } 852 853 /* 854 * fixup_init is called when: 855 * - an active object is initialized 856 */ 857 static bool __init fixup_init(void *addr, enum debug_obj_state state) 858 { 859 struct self_test *obj = addr; 860 861 switch (state) { 862 case ODEBUG_STATE_ACTIVE: 863 debug_object_deactivate(obj, &descr_type_test); 864 debug_object_init(obj, &descr_type_test); 865 return true; 866 default: 867 return false; 868 } 869 } 870 871 /* 872 * fixup_activate is called when: 873 * - an active object is activated 874 * - an unknown non-static object is activated 875 */ 876 static bool __init fixup_activate(void *addr, enum debug_obj_state state) 877 { 878 struct self_test *obj = addr; 879 880 switch (state) { 881 case ODEBUG_STATE_NOTAVAILABLE: 882 return true; 883 case ODEBUG_STATE_ACTIVE: 884 debug_object_deactivate(obj, &descr_type_test); 885 debug_object_activate(obj, &descr_type_test); 886 return true; 887 888 default: 889 return false; 890 } 891 } 892 893 /* 894 * fixup_destroy is called when: 895 * - an active object is destroyed 896 */ 897 static bool __init fixup_destroy(void *addr, enum debug_obj_state state) 898 { 899 struct self_test *obj = addr; 900 901 switch (state) { 902 case ODEBUG_STATE_ACTIVE: 903 debug_object_deactivate(obj, &descr_type_test); 904 debug_object_destroy(obj, &descr_type_test); 905 return true; 906 default: 907 return false; 908 } 909 } 910 911 /* 912 * fixup_free is called when: 913 * - an active object is freed 914 */ 915 static bool __init fixup_free(void *addr, enum debug_obj_state state) 916 { 917 struct self_test *obj = addr; 918 919 switch (state) { 920 case ODEBUG_STATE_ACTIVE: 921 debug_object_deactivate(obj, &descr_type_test); 922 debug_object_free(obj, &descr_type_test); 923 return true; 924 default: 925 return false; 926 } 927 } 928 929 static int __init 930 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 931 { 932 struct debug_bucket *db; 933 struct debug_obj *obj; 934 unsigned long flags; 935 int res = -EINVAL; 936 937 db = get_bucket((unsigned long) addr); 938 939 raw_spin_lock_irqsave(&db->lock, flags); 940 941 obj = lookup_object(addr, db); 942 if (!obj && state != ODEBUG_STATE_NONE) { 943 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 944 goto out; 945 } 946 if (obj && obj->state != state) { 947 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 948 obj->state, state); 949 goto out; 950 } 951 if (fixups != debug_objects_fixups) { 952 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 953 fixups, debug_objects_fixups); 954 goto out; 955 } 956 if (warnings != debug_objects_warnings) { 957 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 958 warnings, debug_objects_warnings); 959 goto out; 960 } 961 res = 0; 962 out: 963 raw_spin_unlock_irqrestore(&db->lock, flags); 964 if (res) 965 debug_objects_enabled = 0; 966 return res; 967 } 968 969 static __initdata struct debug_obj_descr descr_type_test = { 970 .name = "selftest", 971 .is_static_object = is_static_object, 972 .fixup_init = fixup_init, 973 .fixup_activate = fixup_activate, 974 .fixup_destroy = fixup_destroy, 975 .fixup_free = fixup_free, 976 }; 977 978 static __initdata struct self_test obj = { .static_init = 0 }; 979 980 static void __init debug_objects_selftest(void) 981 { 982 int fixups, oldfixups, warnings, oldwarnings; 983 unsigned long flags; 984 985 local_irq_save(flags); 986 987 fixups = oldfixups = debug_objects_fixups; 988 warnings = oldwarnings = debug_objects_warnings; 989 descr_test = &descr_type_test; 990 991 debug_object_init(&obj, &descr_type_test); 992 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 993 goto out; 994 debug_object_activate(&obj, &descr_type_test); 995 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 996 goto out; 997 debug_object_activate(&obj, &descr_type_test); 998 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 999 goto out; 1000 debug_object_deactivate(&obj, &descr_type_test); 1001 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 1002 goto out; 1003 debug_object_destroy(&obj, &descr_type_test); 1004 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 1005 goto out; 1006 debug_object_init(&obj, &descr_type_test); 1007 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1008 goto out; 1009 debug_object_activate(&obj, &descr_type_test); 1010 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1011 goto out; 1012 debug_object_deactivate(&obj, &descr_type_test); 1013 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1014 goto out; 1015 debug_object_free(&obj, &descr_type_test); 1016 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1017 goto out; 1018 1019 obj.static_init = 1; 1020 debug_object_activate(&obj, &descr_type_test); 1021 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1022 goto out; 1023 debug_object_init(&obj, &descr_type_test); 1024 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 1025 goto out; 1026 debug_object_free(&obj, &descr_type_test); 1027 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1028 goto out; 1029 1030 #ifdef CONFIG_DEBUG_OBJECTS_FREE 1031 debug_object_init(&obj, &descr_type_test); 1032 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1033 goto out; 1034 debug_object_activate(&obj, &descr_type_test); 1035 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1036 goto out; 1037 __debug_check_no_obj_freed(&obj, sizeof(obj)); 1038 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 1039 goto out; 1040 #endif 1041 pr_info("selftest passed\n"); 1042 1043 out: 1044 debug_objects_fixups = oldfixups; 1045 debug_objects_warnings = oldwarnings; 1046 descr_test = NULL; 1047 1048 local_irq_restore(flags); 1049 } 1050 #else 1051 static inline void debug_objects_selftest(void) { } 1052 #endif 1053 1054 /* 1055 * Called during early boot to initialize the hash buckets and link 1056 * the static object pool objects into the poll list. After this call 1057 * the object tracker is fully operational. 1058 */ 1059 void __init debug_objects_early_init(void) 1060 { 1061 int i; 1062 1063 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 1064 raw_spin_lock_init(&obj_hash[i].lock); 1065 1066 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 1067 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 1068 } 1069 1070 /* 1071 * Convert the statically allocated objects to dynamic ones: 1072 */ 1073 static int __init debug_objects_replace_static_objects(void) 1074 { 1075 struct debug_bucket *db = obj_hash; 1076 struct hlist_node *tmp; 1077 struct debug_obj *obj, *new; 1078 HLIST_HEAD(objects); 1079 int i, cnt = 0; 1080 1081 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 1082 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 1083 if (!obj) 1084 goto free; 1085 kmemleak_ignore(obj); 1086 hlist_add_head(&obj->node, &objects); 1087 } 1088 1089 /* 1090 * When debug_objects_mem_init() is called we know that only 1091 * one CPU is up, so disabling interrupts is enough 1092 * protection. This avoids the lockdep hell of lock ordering. 1093 */ 1094 local_irq_disable(); 1095 1096 /* Remove the statically allocated objects from the pool */ 1097 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) 1098 hlist_del(&obj->node); 1099 /* Move the allocated objects to the pool */ 1100 hlist_move_list(&objects, &obj_pool); 1101 1102 /* Replace the active object references */ 1103 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1104 hlist_move_list(&db->list, &objects); 1105 1106 hlist_for_each_entry(obj, &objects, node) { 1107 new = hlist_entry(obj_pool.first, typeof(*obj), node); 1108 hlist_del(&new->node); 1109 /* copy object data */ 1110 *new = *obj; 1111 hlist_add_head(&new->node, &db->list); 1112 cnt++; 1113 } 1114 } 1115 local_irq_enable(); 1116 1117 pr_debug("%d of %d active objects replaced\n", 1118 cnt, obj_pool_used); 1119 return 0; 1120 free: 1121 hlist_for_each_entry_safe(obj, tmp, &objects, node) { 1122 hlist_del(&obj->node); 1123 kmem_cache_free(obj_cache, obj); 1124 } 1125 return -ENOMEM; 1126 } 1127 1128 /* 1129 * Called after the kmem_caches are functional to setup a dedicated 1130 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 1131 * prevents that the debug code is called on kmem_cache_free() for the 1132 * debug tracker objects to avoid recursive calls. 1133 */ 1134 void __init debug_objects_mem_init(void) 1135 { 1136 if (!debug_objects_enabled) 1137 return; 1138 1139 obj_cache = kmem_cache_create("debug_objects_cache", 1140 sizeof (struct debug_obj), 0, 1141 SLAB_DEBUG_OBJECTS, NULL); 1142 1143 if (!obj_cache || debug_objects_replace_static_objects()) { 1144 debug_objects_enabled = 0; 1145 if (obj_cache) 1146 kmem_cache_destroy(obj_cache); 1147 pr_warn("out of memory.\n"); 1148 } else 1149 debug_objects_selftest(); 1150 1151 /* 1152 * Increase the thresholds for allocating and freeing objects 1153 * according to the number of possible CPUs available in the system. 1154 */ 1155 debug_objects_pool_size += num_possible_cpus() * 32; 1156 debug_objects_pool_min_level += num_possible_cpus() * 4; 1157 } 1158