1 /* 2 * Generic infrastructure for lifetime debugging of objects. 3 * 4 * Started by Thomas Gleixner 5 * 6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 7 * 8 * For licencing details see kernel-base/COPYING 9 */ 10 #include <linux/debugobjects.h> 11 #include <linux/interrupt.h> 12 #include <linux/sched.h> 13 #include <linux/seq_file.h> 14 #include <linux/debugfs.h> 15 #include <linux/slab.h> 16 #include <linux/hash.h> 17 18 #define ODEBUG_HASH_BITS 14 19 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 20 21 #define ODEBUG_POOL_SIZE 512 22 #define ODEBUG_POOL_MIN_LEVEL 256 23 24 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 25 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 26 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 27 28 struct debug_bucket { 29 struct hlist_head list; 30 raw_spinlock_t lock; 31 }; 32 33 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 34 35 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 36 37 static DEFINE_RAW_SPINLOCK(pool_lock); 38 39 static HLIST_HEAD(obj_pool); 40 41 static int obj_pool_min_free = ODEBUG_POOL_SIZE; 42 static int obj_pool_free = ODEBUG_POOL_SIZE; 43 static int obj_pool_used; 44 static int obj_pool_max_used; 45 static struct kmem_cache *obj_cache; 46 47 static int debug_objects_maxchain __read_mostly; 48 static int debug_objects_fixups __read_mostly; 49 static int debug_objects_warnings __read_mostly; 50 static int debug_objects_enabled __read_mostly 51 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 52 53 static struct debug_obj_descr *descr_test __read_mostly; 54 55 static void free_obj_work(struct work_struct *work); 56 static DECLARE_WORK(debug_obj_work, free_obj_work); 57 58 static int __init enable_object_debug(char *str) 59 { 60 debug_objects_enabled = 1; 61 return 0; 62 } 63 64 static int __init disable_object_debug(char *str) 65 { 66 debug_objects_enabled = 0; 67 return 0; 68 } 69 70 early_param("debug_objects", enable_object_debug); 71 early_param("no_debug_objects", disable_object_debug); 72 73 static const char *obj_states[ODEBUG_STATE_MAX] = { 74 [ODEBUG_STATE_NONE] = "none", 75 [ODEBUG_STATE_INIT] = "initialized", 76 [ODEBUG_STATE_INACTIVE] = "inactive", 77 [ODEBUG_STATE_ACTIVE] = "active", 78 [ODEBUG_STATE_DESTROYED] = "destroyed", 79 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 80 }; 81 82 static int fill_pool(void) 83 { 84 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 85 struct debug_obj *new; 86 unsigned long flags; 87 88 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 89 return obj_pool_free; 90 91 if (unlikely(!obj_cache)) 92 return obj_pool_free; 93 94 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { 95 96 new = kmem_cache_zalloc(obj_cache, gfp); 97 if (!new) 98 return obj_pool_free; 99 100 raw_spin_lock_irqsave(&pool_lock, flags); 101 hlist_add_head(&new->node, &obj_pool); 102 obj_pool_free++; 103 raw_spin_unlock_irqrestore(&pool_lock, flags); 104 } 105 return obj_pool_free; 106 } 107 108 /* 109 * Lookup an object in the hash bucket. 110 */ 111 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 112 { 113 struct hlist_node *node; 114 struct debug_obj *obj; 115 int cnt = 0; 116 117 hlist_for_each_entry(obj, node, &b->list, node) { 118 cnt++; 119 if (obj->object == addr) 120 return obj; 121 } 122 if (cnt > debug_objects_maxchain) 123 debug_objects_maxchain = cnt; 124 125 return NULL; 126 } 127 128 /* 129 * Allocate a new object. If the pool is empty, switch off the debugger. 130 * Must be called with interrupts disabled. 131 */ 132 static struct debug_obj * 133 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 134 { 135 struct debug_obj *obj = NULL; 136 137 raw_spin_lock(&pool_lock); 138 if (obj_pool.first) { 139 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 140 141 obj->object = addr; 142 obj->descr = descr; 143 obj->state = ODEBUG_STATE_NONE; 144 obj->astate = 0; 145 hlist_del(&obj->node); 146 147 hlist_add_head(&obj->node, &b->list); 148 149 obj_pool_used++; 150 if (obj_pool_used > obj_pool_max_used) 151 obj_pool_max_used = obj_pool_used; 152 153 obj_pool_free--; 154 if (obj_pool_free < obj_pool_min_free) 155 obj_pool_min_free = obj_pool_free; 156 } 157 raw_spin_unlock(&pool_lock); 158 159 return obj; 160 } 161 162 /* 163 * workqueue function to free objects. 164 */ 165 static void free_obj_work(struct work_struct *work) 166 { 167 struct debug_obj *obj; 168 unsigned long flags; 169 170 raw_spin_lock_irqsave(&pool_lock, flags); 171 while (obj_pool_free > ODEBUG_POOL_SIZE) { 172 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 173 hlist_del(&obj->node); 174 obj_pool_free--; 175 /* 176 * We release pool_lock across kmem_cache_free() to 177 * avoid contention on pool_lock. 178 */ 179 raw_spin_unlock_irqrestore(&pool_lock, flags); 180 kmem_cache_free(obj_cache, obj); 181 raw_spin_lock_irqsave(&pool_lock, flags); 182 } 183 raw_spin_unlock_irqrestore(&pool_lock, flags); 184 } 185 186 /* 187 * Put the object back into the pool and schedule work to free objects 188 * if necessary. 189 */ 190 static void free_object(struct debug_obj *obj) 191 { 192 unsigned long flags; 193 int sched = 0; 194 195 raw_spin_lock_irqsave(&pool_lock, flags); 196 /* 197 * schedule work when the pool is filled and the cache is 198 * initialized: 199 */ 200 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) 201 sched = !work_pending(&debug_obj_work); 202 hlist_add_head(&obj->node, &obj_pool); 203 obj_pool_free++; 204 obj_pool_used--; 205 raw_spin_unlock_irqrestore(&pool_lock, flags); 206 if (sched) 207 schedule_work(&debug_obj_work); 208 } 209 210 /* 211 * We run out of memory. That means we probably have tons of objects 212 * allocated. 213 */ 214 static void debug_objects_oom(void) 215 { 216 struct debug_bucket *db = obj_hash; 217 struct hlist_node *node, *tmp; 218 HLIST_HEAD(freelist); 219 struct debug_obj *obj; 220 unsigned long flags; 221 int i; 222 223 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 224 225 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 226 raw_spin_lock_irqsave(&db->lock, flags); 227 hlist_move_list(&db->list, &freelist); 228 raw_spin_unlock_irqrestore(&db->lock, flags); 229 230 /* Now free them */ 231 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 232 hlist_del(&obj->node); 233 free_object(obj); 234 } 235 } 236 } 237 238 /* 239 * We use the pfn of the address for the hash. That way we can check 240 * for freed objects simply by checking the affected bucket. 241 */ 242 static struct debug_bucket *get_bucket(unsigned long addr) 243 { 244 unsigned long hash; 245 246 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 247 return &obj_hash[hash]; 248 } 249 250 static void debug_print_object(struct debug_obj *obj, char *msg) 251 { 252 static int limit; 253 254 if (limit < 5 && obj->descr != descr_test) { 255 limit++; 256 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 257 "object type: %s\n", 258 msg, obj_states[obj->state], obj->astate, 259 obj->descr->name); 260 } 261 debug_objects_warnings++; 262 } 263 264 /* 265 * Try to repair the damage, so we have a better chance to get useful 266 * debug output. 267 */ 268 static void 269 debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), 270 void * addr, enum debug_obj_state state) 271 { 272 if (fixup) 273 debug_objects_fixups += fixup(addr, state); 274 } 275 276 static void debug_object_is_on_stack(void *addr, int onstack) 277 { 278 int is_on_stack; 279 static int limit; 280 281 if (limit > 4) 282 return; 283 284 is_on_stack = object_is_on_stack(addr); 285 if (is_on_stack == onstack) 286 return; 287 288 limit++; 289 if (is_on_stack) 290 printk(KERN_WARNING 291 "ODEBUG: object is on stack, but not annotated\n"); 292 else 293 printk(KERN_WARNING 294 "ODEBUG: object is not on stack, but annotated\n"); 295 WARN_ON(1); 296 } 297 298 static void 299 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 300 { 301 enum debug_obj_state state; 302 struct debug_bucket *db; 303 struct debug_obj *obj; 304 unsigned long flags; 305 306 fill_pool(); 307 308 db = get_bucket((unsigned long) addr); 309 310 raw_spin_lock_irqsave(&db->lock, flags); 311 312 obj = lookup_object(addr, db); 313 if (!obj) { 314 obj = alloc_object(addr, db, descr); 315 if (!obj) { 316 debug_objects_enabled = 0; 317 raw_spin_unlock_irqrestore(&db->lock, flags); 318 debug_objects_oom(); 319 return; 320 } 321 debug_object_is_on_stack(addr, onstack); 322 } 323 324 switch (obj->state) { 325 case ODEBUG_STATE_NONE: 326 case ODEBUG_STATE_INIT: 327 case ODEBUG_STATE_INACTIVE: 328 obj->state = ODEBUG_STATE_INIT; 329 break; 330 331 case ODEBUG_STATE_ACTIVE: 332 debug_print_object(obj, "init"); 333 state = obj->state; 334 raw_spin_unlock_irqrestore(&db->lock, flags); 335 debug_object_fixup(descr->fixup_init, addr, state); 336 return; 337 338 case ODEBUG_STATE_DESTROYED: 339 debug_print_object(obj, "init"); 340 break; 341 default: 342 break; 343 } 344 345 raw_spin_unlock_irqrestore(&db->lock, flags); 346 } 347 348 /** 349 * debug_object_init - debug checks when an object is initialized 350 * @addr: address of the object 351 * @descr: pointer to an object specific debug description structure 352 */ 353 void debug_object_init(void *addr, struct debug_obj_descr *descr) 354 { 355 if (!debug_objects_enabled) 356 return; 357 358 __debug_object_init(addr, descr, 0); 359 } 360 361 /** 362 * debug_object_init_on_stack - debug checks when an object on stack is 363 * initialized 364 * @addr: address of the object 365 * @descr: pointer to an object specific debug description structure 366 */ 367 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) 368 { 369 if (!debug_objects_enabled) 370 return; 371 372 __debug_object_init(addr, descr, 1); 373 } 374 375 /** 376 * debug_object_activate - debug checks when an object is activated 377 * @addr: address of the object 378 * @descr: pointer to an object specific debug description structure 379 */ 380 void debug_object_activate(void *addr, struct debug_obj_descr *descr) 381 { 382 enum debug_obj_state state; 383 struct debug_bucket *db; 384 struct debug_obj *obj; 385 unsigned long flags; 386 387 if (!debug_objects_enabled) 388 return; 389 390 db = get_bucket((unsigned long) addr); 391 392 raw_spin_lock_irqsave(&db->lock, flags); 393 394 obj = lookup_object(addr, db); 395 if (obj) { 396 switch (obj->state) { 397 case ODEBUG_STATE_INIT: 398 case ODEBUG_STATE_INACTIVE: 399 obj->state = ODEBUG_STATE_ACTIVE; 400 break; 401 402 case ODEBUG_STATE_ACTIVE: 403 debug_print_object(obj, "activate"); 404 state = obj->state; 405 raw_spin_unlock_irqrestore(&db->lock, flags); 406 debug_object_fixup(descr->fixup_activate, addr, state); 407 return; 408 409 case ODEBUG_STATE_DESTROYED: 410 debug_print_object(obj, "activate"); 411 break; 412 default: 413 break; 414 } 415 raw_spin_unlock_irqrestore(&db->lock, flags); 416 return; 417 } 418 419 raw_spin_unlock_irqrestore(&db->lock, flags); 420 /* 421 * This happens when a static object is activated. We 422 * let the type specific code decide whether this is 423 * true or not. 424 */ 425 debug_object_fixup(descr->fixup_activate, addr, 426 ODEBUG_STATE_NOTAVAILABLE); 427 } 428 429 /** 430 * debug_object_deactivate - debug checks when an object is deactivated 431 * @addr: address of the object 432 * @descr: pointer to an object specific debug description structure 433 */ 434 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) 435 { 436 struct debug_bucket *db; 437 struct debug_obj *obj; 438 unsigned long flags; 439 440 if (!debug_objects_enabled) 441 return; 442 443 db = get_bucket((unsigned long) addr); 444 445 raw_spin_lock_irqsave(&db->lock, flags); 446 447 obj = lookup_object(addr, db); 448 if (obj) { 449 switch (obj->state) { 450 case ODEBUG_STATE_INIT: 451 case ODEBUG_STATE_INACTIVE: 452 case ODEBUG_STATE_ACTIVE: 453 if (!obj->astate) 454 obj->state = ODEBUG_STATE_INACTIVE; 455 else 456 debug_print_object(obj, "deactivate"); 457 break; 458 459 case ODEBUG_STATE_DESTROYED: 460 debug_print_object(obj, "deactivate"); 461 break; 462 default: 463 break; 464 } 465 } else { 466 struct debug_obj o = { .object = addr, 467 .state = ODEBUG_STATE_NOTAVAILABLE, 468 .descr = descr }; 469 470 debug_print_object(&o, "deactivate"); 471 } 472 473 raw_spin_unlock_irqrestore(&db->lock, flags); 474 } 475 476 /** 477 * debug_object_destroy - debug checks when an object is destroyed 478 * @addr: address of the object 479 * @descr: pointer to an object specific debug description structure 480 */ 481 void debug_object_destroy(void *addr, struct debug_obj_descr *descr) 482 { 483 enum debug_obj_state state; 484 struct debug_bucket *db; 485 struct debug_obj *obj; 486 unsigned long flags; 487 488 if (!debug_objects_enabled) 489 return; 490 491 db = get_bucket((unsigned long) addr); 492 493 raw_spin_lock_irqsave(&db->lock, flags); 494 495 obj = lookup_object(addr, db); 496 if (!obj) 497 goto out_unlock; 498 499 switch (obj->state) { 500 case ODEBUG_STATE_NONE: 501 case ODEBUG_STATE_INIT: 502 case ODEBUG_STATE_INACTIVE: 503 obj->state = ODEBUG_STATE_DESTROYED; 504 break; 505 case ODEBUG_STATE_ACTIVE: 506 debug_print_object(obj, "destroy"); 507 state = obj->state; 508 raw_spin_unlock_irqrestore(&db->lock, flags); 509 debug_object_fixup(descr->fixup_destroy, addr, state); 510 return; 511 512 case ODEBUG_STATE_DESTROYED: 513 debug_print_object(obj, "destroy"); 514 break; 515 default: 516 break; 517 } 518 out_unlock: 519 raw_spin_unlock_irqrestore(&db->lock, flags); 520 } 521 522 /** 523 * debug_object_free - debug checks when an object is freed 524 * @addr: address of the object 525 * @descr: pointer to an object specific debug description structure 526 */ 527 void debug_object_free(void *addr, struct debug_obj_descr *descr) 528 { 529 enum debug_obj_state state; 530 struct debug_bucket *db; 531 struct debug_obj *obj; 532 unsigned long flags; 533 534 if (!debug_objects_enabled) 535 return; 536 537 db = get_bucket((unsigned long) addr); 538 539 raw_spin_lock_irqsave(&db->lock, flags); 540 541 obj = lookup_object(addr, db); 542 if (!obj) 543 goto out_unlock; 544 545 switch (obj->state) { 546 case ODEBUG_STATE_ACTIVE: 547 debug_print_object(obj, "free"); 548 state = obj->state; 549 raw_spin_unlock_irqrestore(&db->lock, flags); 550 debug_object_fixup(descr->fixup_free, addr, state); 551 return; 552 default: 553 hlist_del(&obj->node); 554 raw_spin_unlock_irqrestore(&db->lock, flags); 555 free_object(obj); 556 return; 557 } 558 out_unlock: 559 raw_spin_unlock_irqrestore(&db->lock, flags); 560 } 561 562 /** 563 * debug_object_active_state - debug checks object usage state machine 564 * @addr: address of the object 565 * @descr: pointer to an object specific debug description structure 566 * @expect: expected state 567 * @next: state to move to if expected state is found 568 */ 569 void 570 debug_object_active_state(void *addr, struct debug_obj_descr *descr, 571 unsigned int expect, unsigned int next) 572 { 573 struct debug_bucket *db; 574 struct debug_obj *obj; 575 unsigned long flags; 576 577 if (!debug_objects_enabled) 578 return; 579 580 db = get_bucket((unsigned long) addr); 581 582 raw_spin_lock_irqsave(&db->lock, flags); 583 584 obj = lookup_object(addr, db); 585 if (obj) { 586 switch (obj->state) { 587 case ODEBUG_STATE_ACTIVE: 588 if (obj->astate == expect) 589 obj->astate = next; 590 else 591 debug_print_object(obj, "active_state"); 592 break; 593 594 default: 595 debug_print_object(obj, "active_state"); 596 break; 597 } 598 } else { 599 struct debug_obj o = { .object = addr, 600 .state = ODEBUG_STATE_NOTAVAILABLE, 601 .descr = descr }; 602 603 debug_print_object(&o, "active_state"); 604 } 605 606 raw_spin_unlock_irqrestore(&db->lock, flags); 607 } 608 609 #ifdef CONFIG_DEBUG_OBJECTS_FREE 610 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 611 { 612 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 613 struct hlist_node *node, *tmp; 614 HLIST_HEAD(freelist); 615 struct debug_obj_descr *descr; 616 enum debug_obj_state state; 617 struct debug_bucket *db; 618 struct debug_obj *obj; 619 int cnt; 620 621 saddr = (unsigned long) address; 622 eaddr = saddr + size; 623 paddr = saddr & ODEBUG_CHUNK_MASK; 624 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 625 chunks >>= ODEBUG_CHUNK_SHIFT; 626 627 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 628 db = get_bucket(paddr); 629 630 repeat: 631 cnt = 0; 632 raw_spin_lock_irqsave(&db->lock, flags); 633 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 634 cnt++; 635 oaddr = (unsigned long) obj->object; 636 if (oaddr < saddr || oaddr >= eaddr) 637 continue; 638 639 switch (obj->state) { 640 case ODEBUG_STATE_ACTIVE: 641 debug_print_object(obj, "free"); 642 descr = obj->descr; 643 state = obj->state; 644 raw_spin_unlock_irqrestore(&db->lock, flags); 645 debug_object_fixup(descr->fixup_free, 646 (void *) oaddr, state); 647 goto repeat; 648 default: 649 hlist_del(&obj->node); 650 hlist_add_head(&obj->node, &freelist); 651 break; 652 } 653 } 654 raw_spin_unlock_irqrestore(&db->lock, flags); 655 656 /* Now free them */ 657 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 658 hlist_del(&obj->node); 659 free_object(obj); 660 } 661 662 if (cnt > debug_objects_maxchain) 663 debug_objects_maxchain = cnt; 664 } 665 } 666 667 void debug_check_no_obj_freed(const void *address, unsigned long size) 668 { 669 if (debug_objects_enabled) 670 __debug_check_no_obj_freed(address, size); 671 } 672 #endif 673 674 #ifdef CONFIG_DEBUG_FS 675 676 static int debug_stats_show(struct seq_file *m, void *v) 677 { 678 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 679 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 680 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 681 seq_printf(m, "pool_free :%d\n", obj_pool_free); 682 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 683 seq_printf(m, "pool_used :%d\n", obj_pool_used); 684 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 685 return 0; 686 } 687 688 static int debug_stats_open(struct inode *inode, struct file *filp) 689 { 690 return single_open(filp, debug_stats_show, NULL); 691 } 692 693 static const struct file_operations debug_stats_fops = { 694 .open = debug_stats_open, 695 .read = seq_read, 696 .llseek = seq_lseek, 697 .release = single_release, 698 }; 699 700 static int __init debug_objects_init_debugfs(void) 701 { 702 struct dentry *dbgdir, *dbgstats; 703 704 if (!debug_objects_enabled) 705 return 0; 706 707 dbgdir = debugfs_create_dir("debug_objects", NULL); 708 if (!dbgdir) 709 return -ENOMEM; 710 711 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 712 &debug_stats_fops); 713 if (!dbgstats) 714 goto err; 715 716 return 0; 717 718 err: 719 debugfs_remove(dbgdir); 720 721 return -ENOMEM; 722 } 723 __initcall(debug_objects_init_debugfs); 724 725 #else 726 static inline void debug_objects_init_debugfs(void) { } 727 #endif 728 729 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 730 731 /* Random data structure for the self test */ 732 struct self_test { 733 unsigned long dummy1[6]; 734 int static_init; 735 unsigned long dummy2[3]; 736 }; 737 738 static __initdata struct debug_obj_descr descr_type_test; 739 740 /* 741 * fixup_init is called when: 742 * - an active object is initialized 743 */ 744 static int __init fixup_init(void *addr, enum debug_obj_state state) 745 { 746 struct self_test *obj = addr; 747 748 switch (state) { 749 case ODEBUG_STATE_ACTIVE: 750 debug_object_deactivate(obj, &descr_type_test); 751 debug_object_init(obj, &descr_type_test); 752 return 1; 753 default: 754 return 0; 755 } 756 } 757 758 /* 759 * fixup_activate is called when: 760 * - an active object is activated 761 * - an unknown object is activated (might be a statically initialized object) 762 */ 763 static int __init fixup_activate(void *addr, enum debug_obj_state state) 764 { 765 struct self_test *obj = addr; 766 767 switch (state) { 768 case ODEBUG_STATE_NOTAVAILABLE: 769 if (obj->static_init == 1) { 770 debug_object_init(obj, &descr_type_test); 771 debug_object_activate(obj, &descr_type_test); 772 /* 773 * Real code should return 0 here ! This is 774 * not a fixup of some bad behaviour. We 775 * merily call the debug_init function to keep 776 * track of the object. 777 */ 778 return 1; 779 } else { 780 /* Real code needs to emit a warning here */ 781 } 782 return 0; 783 784 case ODEBUG_STATE_ACTIVE: 785 debug_object_deactivate(obj, &descr_type_test); 786 debug_object_activate(obj, &descr_type_test); 787 return 1; 788 789 default: 790 return 0; 791 } 792 } 793 794 /* 795 * fixup_destroy is called when: 796 * - an active object is destroyed 797 */ 798 static int __init fixup_destroy(void *addr, enum debug_obj_state state) 799 { 800 struct self_test *obj = addr; 801 802 switch (state) { 803 case ODEBUG_STATE_ACTIVE: 804 debug_object_deactivate(obj, &descr_type_test); 805 debug_object_destroy(obj, &descr_type_test); 806 return 1; 807 default: 808 return 0; 809 } 810 } 811 812 /* 813 * fixup_free is called when: 814 * - an active object is freed 815 */ 816 static int __init fixup_free(void *addr, enum debug_obj_state state) 817 { 818 struct self_test *obj = addr; 819 820 switch (state) { 821 case ODEBUG_STATE_ACTIVE: 822 debug_object_deactivate(obj, &descr_type_test); 823 debug_object_free(obj, &descr_type_test); 824 return 1; 825 default: 826 return 0; 827 } 828 } 829 830 static int __init 831 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 832 { 833 struct debug_bucket *db; 834 struct debug_obj *obj; 835 unsigned long flags; 836 int res = -EINVAL; 837 838 db = get_bucket((unsigned long) addr); 839 840 raw_spin_lock_irqsave(&db->lock, flags); 841 842 obj = lookup_object(addr, db); 843 if (!obj && state != ODEBUG_STATE_NONE) { 844 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 845 goto out; 846 } 847 if (obj && obj->state != state) { 848 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 849 obj->state, state); 850 goto out; 851 } 852 if (fixups != debug_objects_fixups) { 853 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 854 fixups, debug_objects_fixups); 855 goto out; 856 } 857 if (warnings != debug_objects_warnings) { 858 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 859 warnings, debug_objects_warnings); 860 goto out; 861 } 862 res = 0; 863 out: 864 raw_spin_unlock_irqrestore(&db->lock, flags); 865 if (res) 866 debug_objects_enabled = 0; 867 return res; 868 } 869 870 static __initdata struct debug_obj_descr descr_type_test = { 871 .name = "selftest", 872 .fixup_init = fixup_init, 873 .fixup_activate = fixup_activate, 874 .fixup_destroy = fixup_destroy, 875 .fixup_free = fixup_free, 876 }; 877 878 static __initdata struct self_test obj = { .static_init = 0 }; 879 880 static void __init debug_objects_selftest(void) 881 { 882 int fixups, oldfixups, warnings, oldwarnings; 883 unsigned long flags; 884 885 local_irq_save(flags); 886 887 fixups = oldfixups = debug_objects_fixups; 888 warnings = oldwarnings = debug_objects_warnings; 889 descr_test = &descr_type_test; 890 891 debug_object_init(&obj, &descr_type_test); 892 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 893 goto out; 894 debug_object_activate(&obj, &descr_type_test); 895 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 896 goto out; 897 debug_object_activate(&obj, &descr_type_test); 898 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 899 goto out; 900 debug_object_deactivate(&obj, &descr_type_test); 901 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 902 goto out; 903 debug_object_destroy(&obj, &descr_type_test); 904 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 905 goto out; 906 debug_object_init(&obj, &descr_type_test); 907 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 908 goto out; 909 debug_object_activate(&obj, &descr_type_test); 910 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 911 goto out; 912 debug_object_deactivate(&obj, &descr_type_test); 913 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 914 goto out; 915 debug_object_free(&obj, &descr_type_test); 916 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 917 goto out; 918 919 obj.static_init = 1; 920 debug_object_activate(&obj, &descr_type_test); 921 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) 922 goto out; 923 debug_object_init(&obj, &descr_type_test); 924 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 925 goto out; 926 debug_object_free(&obj, &descr_type_test); 927 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 928 goto out; 929 930 #ifdef CONFIG_DEBUG_OBJECTS_FREE 931 debug_object_init(&obj, &descr_type_test); 932 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 933 goto out; 934 debug_object_activate(&obj, &descr_type_test); 935 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 936 goto out; 937 __debug_check_no_obj_freed(&obj, sizeof(obj)); 938 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 939 goto out; 940 #endif 941 printk(KERN_INFO "ODEBUG: selftest passed\n"); 942 943 out: 944 debug_objects_fixups = oldfixups; 945 debug_objects_warnings = oldwarnings; 946 descr_test = NULL; 947 948 local_irq_restore(flags); 949 } 950 #else 951 static inline void debug_objects_selftest(void) { } 952 #endif 953 954 /* 955 * Called during early boot to initialize the hash buckets and link 956 * the static object pool objects into the poll list. After this call 957 * the object tracker is fully operational. 958 */ 959 void __init debug_objects_early_init(void) 960 { 961 int i; 962 963 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 964 raw_spin_lock_init(&obj_hash[i].lock); 965 966 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 967 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 968 } 969 970 /* 971 * Convert the statically allocated objects to dynamic ones: 972 */ 973 static int __init debug_objects_replace_static_objects(void) 974 { 975 struct debug_bucket *db = obj_hash; 976 struct hlist_node *node, *tmp; 977 struct debug_obj *obj, *new; 978 HLIST_HEAD(objects); 979 int i, cnt = 0; 980 981 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 982 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 983 if (!obj) 984 goto free; 985 hlist_add_head(&obj->node, &objects); 986 } 987 988 /* 989 * When debug_objects_mem_init() is called we know that only 990 * one CPU is up, so disabling interrupts is enough 991 * protection. This avoids the lockdep hell of lock ordering. 992 */ 993 local_irq_disable(); 994 995 /* Remove the statically allocated objects from the pool */ 996 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) 997 hlist_del(&obj->node); 998 /* Move the allocated objects to the pool */ 999 hlist_move_list(&objects, &obj_pool); 1000 1001 /* Replace the active object references */ 1002 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1003 hlist_move_list(&db->list, &objects); 1004 1005 hlist_for_each_entry(obj, node, &objects, node) { 1006 new = hlist_entry(obj_pool.first, typeof(*obj), node); 1007 hlist_del(&new->node); 1008 /* copy object data */ 1009 *new = *obj; 1010 hlist_add_head(&new->node, &db->list); 1011 cnt++; 1012 } 1013 } 1014 1015 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, 1016 obj_pool_used); 1017 local_irq_enable(); 1018 return 0; 1019 free: 1020 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { 1021 hlist_del(&obj->node); 1022 kmem_cache_free(obj_cache, obj); 1023 } 1024 return -ENOMEM; 1025 } 1026 1027 /* 1028 * Called after the kmem_caches are functional to setup a dedicated 1029 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 1030 * prevents that the debug code is called on kmem_cache_free() for the 1031 * debug tracker objects to avoid recursive calls. 1032 */ 1033 void __init debug_objects_mem_init(void) 1034 { 1035 if (!debug_objects_enabled) 1036 return; 1037 1038 obj_cache = kmem_cache_create("debug_objects_cache", 1039 sizeof (struct debug_obj), 0, 1040 SLAB_DEBUG_OBJECTS, NULL); 1041 1042 if (!obj_cache || debug_objects_replace_static_objects()) { 1043 debug_objects_enabled = 0; 1044 if (obj_cache) 1045 kmem_cache_destroy(obj_cache); 1046 printk(KERN_WARNING "ODEBUG: out of memory.\n"); 1047 } else 1048 debug_objects_selftest(); 1049 } 1050