1 /* 2 * Generic infrastructure for lifetime debugging of objects. 3 * 4 * Started by Thomas Gleixner 5 * 6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 7 * 8 * For licencing details see kernel-base/COPYING 9 */ 10 #include <linux/debugobjects.h> 11 #include <linux/interrupt.h> 12 #include <linux/seq_file.h> 13 #include <linux/debugfs.h> 14 #include <linux/hash.h> 15 16 #define ODEBUG_HASH_BITS 14 17 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 18 19 #define ODEBUG_POOL_SIZE 512 20 #define ODEBUG_POOL_MIN_LEVEL 256 21 22 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 23 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 24 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 25 26 struct debug_bucket { 27 struct hlist_head list; 28 spinlock_t lock; 29 }; 30 31 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 32 33 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; 34 35 static DEFINE_SPINLOCK(pool_lock); 36 37 static HLIST_HEAD(obj_pool); 38 39 static int obj_pool_min_free = ODEBUG_POOL_SIZE; 40 static int obj_pool_free = ODEBUG_POOL_SIZE; 41 static int obj_pool_used; 42 static int obj_pool_max_used; 43 static struct kmem_cache *obj_cache; 44 45 static int debug_objects_maxchain __read_mostly; 46 static int debug_objects_fixups __read_mostly; 47 static int debug_objects_warnings __read_mostly; 48 static int debug_objects_enabled __read_mostly; 49 static struct debug_obj_descr *descr_test __read_mostly; 50 51 static int __init enable_object_debug(char *str) 52 { 53 debug_objects_enabled = 1; 54 return 0; 55 } 56 early_param("debug_objects", enable_object_debug); 57 58 static const char *obj_states[ODEBUG_STATE_MAX] = { 59 [ODEBUG_STATE_NONE] = "none", 60 [ODEBUG_STATE_INIT] = "initialized", 61 [ODEBUG_STATE_INACTIVE] = "inactive", 62 [ODEBUG_STATE_ACTIVE] = "active", 63 [ODEBUG_STATE_DESTROYED] = "destroyed", 64 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 65 }; 66 67 static int fill_pool(void) 68 { 69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 70 struct debug_obj *new; 71 unsigned long flags; 72 73 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 74 return obj_pool_free; 75 76 if (unlikely(!obj_cache)) 77 return obj_pool_free; 78 79 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { 80 81 new = kmem_cache_zalloc(obj_cache, gfp); 82 if (!new) 83 return obj_pool_free; 84 85 spin_lock_irqsave(&pool_lock, flags); 86 hlist_add_head(&new->node, &obj_pool); 87 obj_pool_free++; 88 spin_unlock_irqrestore(&pool_lock, flags); 89 } 90 return obj_pool_free; 91 } 92 93 /* 94 * Lookup an object in the hash bucket. 95 */ 96 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 97 { 98 struct hlist_node *node; 99 struct debug_obj *obj; 100 int cnt = 0; 101 102 hlist_for_each_entry(obj, node, &b->list, node) { 103 cnt++; 104 if (obj->object == addr) 105 return obj; 106 } 107 if (cnt > debug_objects_maxchain) 108 debug_objects_maxchain = cnt; 109 110 return NULL; 111 } 112 113 /* 114 * Allocate a new object. If the pool is empty, switch off the debugger. 115 */ 116 static struct debug_obj * 117 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 118 { 119 struct debug_obj *obj = NULL; 120 121 spin_lock(&pool_lock); 122 if (obj_pool.first) { 123 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 124 125 obj->object = addr; 126 obj->descr = descr; 127 obj->state = ODEBUG_STATE_NONE; 128 hlist_del(&obj->node); 129 130 hlist_add_head(&obj->node, &b->list); 131 132 obj_pool_used++; 133 if (obj_pool_used > obj_pool_max_used) 134 obj_pool_max_used = obj_pool_used; 135 136 obj_pool_free--; 137 if (obj_pool_free < obj_pool_min_free) 138 obj_pool_min_free = obj_pool_free; 139 } 140 spin_unlock(&pool_lock); 141 142 return obj; 143 } 144 145 /* 146 * Put the object back into the pool or give it back to kmem_cache: 147 */ 148 static void free_object(struct debug_obj *obj) 149 { 150 unsigned long idx = (unsigned long)(obj - obj_static_pool); 151 152 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 153 spin_lock(&pool_lock); 154 hlist_add_head(&obj->node, &obj_pool); 155 obj_pool_free++; 156 obj_pool_used--; 157 spin_unlock(&pool_lock); 158 } else { 159 spin_lock(&pool_lock); 160 obj_pool_used--; 161 spin_unlock(&pool_lock); 162 kmem_cache_free(obj_cache, obj); 163 } 164 } 165 166 /* 167 * We run out of memory. That means we probably have tons of objects 168 * allocated. 169 */ 170 static void debug_objects_oom(void) 171 { 172 struct debug_bucket *db = obj_hash; 173 struct hlist_node *node, *tmp; 174 struct debug_obj *obj; 175 unsigned long flags; 176 int i; 177 178 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 179 180 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 181 spin_lock_irqsave(&db->lock, flags); 182 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 183 hlist_del(&obj->node); 184 free_object(obj); 185 } 186 spin_unlock_irqrestore(&db->lock, flags); 187 } 188 } 189 190 /* 191 * We use the pfn of the address for the hash. That way we can check 192 * for freed objects simply by checking the affected bucket. 193 */ 194 static struct debug_bucket *get_bucket(unsigned long addr) 195 { 196 unsigned long hash; 197 198 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 199 return &obj_hash[hash]; 200 } 201 202 static void debug_print_object(struct debug_obj *obj, char *msg) 203 { 204 static int limit; 205 206 if (limit < 5 && obj->descr != descr_test) { 207 limit++; 208 printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, 209 obj_states[obj->state], obj->descr->name); 210 WARN_ON(1); 211 } 212 debug_objects_warnings++; 213 } 214 215 /* 216 * Try to repair the damage, so we have a better chance to get useful 217 * debug output. 218 */ 219 static void 220 debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), 221 void * addr, enum debug_obj_state state) 222 { 223 if (fixup) 224 debug_objects_fixups += fixup(addr, state); 225 } 226 227 static void debug_object_is_on_stack(void *addr, int onstack) 228 { 229 int is_on_stack; 230 static int limit; 231 232 if (limit > 4) 233 return; 234 235 is_on_stack = object_is_on_stack(addr); 236 if (is_on_stack == onstack) 237 return; 238 239 limit++; 240 if (is_on_stack) 241 printk(KERN_WARNING 242 "ODEBUG: object is on stack, but not annotated\n"); 243 else 244 printk(KERN_WARNING 245 "ODEBUG: object is not on stack, but annotated\n"); 246 WARN_ON(1); 247 } 248 249 static void 250 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 251 { 252 enum debug_obj_state state; 253 struct debug_bucket *db; 254 struct debug_obj *obj; 255 unsigned long flags; 256 257 fill_pool(); 258 259 db = get_bucket((unsigned long) addr); 260 261 spin_lock_irqsave(&db->lock, flags); 262 263 obj = lookup_object(addr, db); 264 if (!obj) { 265 obj = alloc_object(addr, db, descr); 266 if (!obj) { 267 debug_objects_enabled = 0; 268 spin_unlock_irqrestore(&db->lock, flags); 269 debug_objects_oom(); 270 return; 271 } 272 debug_object_is_on_stack(addr, onstack); 273 } 274 275 switch (obj->state) { 276 case ODEBUG_STATE_NONE: 277 case ODEBUG_STATE_INIT: 278 case ODEBUG_STATE_INACTIVE: 279 obj->state = ODEBUG_STATE_INIT; 280 break; 281 282 case ODEBUG_STATE_ACTIVE: 283 debug_print_object(obj, "init"); 284 state = obj->state; 285 spin_unlock_irqrestore(&db->lock, flags); 286 debug_object_fixup(descr->fixup_init, addr, state); 287 return; 288 289 case ODEBUG_STATE_DESTROYED: 290 debug_print_object(obj, "init"); 291 break; 292 default: 293 break; 294 } 295 296 spin_unlock_irqrestore(&db->lock, flags); 297 } 298 299 /** 300 * debug_object_init - debug checks when an object is initialized 301 * @addr: address of the object 302 * @descr: pointer to an object specific debug description structure 303 */ 304 void debug_object_init(void *addr, struct debug_obj_descr *descr) 305 { 306 if (!debug_objects_enabled) 307 return; 308 309 __debug_object_init(addr, descr, 0); 310 } 311 312 /** 313 * debug_object_init_on_stack - debug checks when an object on stack is 314 * initialized 315 * @addr: address of the object 316 * @descr: pointer to an object specific debug description structure 317 */ 318 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) 319 { 320 if (!debug_objects_enabled) 321 return; 322 323 __debug_object_init(addr, descr, 1); 324 } 325 326 /** 327 * debug_object_activate - debug checks when an object is activated 328 * @addr: address of the object 329 * @descr: pointer to an object specific debug description structure 330 */ 331 void debug_object_activate(void *addr, struct debug_obj_descr *descr) 332 { 333 enum debug_obj_state state; 334 struct debug_bucket *db; 335 struct debug_obj *obj; 336 unsigned long flags; 337 338 if (!debug_objects_enabled) 339 return; 340 341 db = get_bucket((unsigned long) addr); 342 343 spin_lock_irqsave(&db->lock, flags); 344 345 obj = lookup_object(addr, db); 346 if (obj) { 347 switch (obj->state) { 348 case ODEBUG_STATE_INIT: 349 case ODEBUG_STATE_INACTIVE: 350 obj->state = ODEBUG_STATE_ACTIVE; 351 break; 352 353 case ODEBUG_STATE_ACTIVE: 354 debug_print_object(obj, "activate"); 355 state = obj->state; 356 spin_unlock_irqrestore(&db->lock, flags); 357 debug_object_fixup(descr->fixup_activate, addr, state); 358 return; 359 360 case ODEBUG_STATE_DESTROYED: 361 debug_print_object(obj, "activate"); 362 break; 363 default: 364 break; 365 } 366 spin_unlock_irqrestore(&db->lock, flags); 367 return; 368 } 369 370 spin_unlock_irqrestore(&db->lock, flags); 371 /* 372 * This happens when a static object is activated. We 373 * let the type specific code decide whether this is 374 * true or not. 375 */ 376 debug_object_fixup(descr->fixup_activate, addr, 377 ODEBUG_STATE_NOTAVAILABLE); 378 } 379 380 /** 381 * debug_object_deactivate - debug checks when an object is deactivated 382 * @addr: address of the object 383 * @descr: pointer to an object specific debug description structure 384 */ 385 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) 386 { 387 struct debug_bucket *db; 388 struct debug_obj *obj; 389 unsigned long flags; 390 391 if (!debug_objects_enabled) 392 return; 393 394 db = get_bucket((unsigned long) addr); 395 396 spin_lock_irqsave(&db->lock, flags); 397 398 obj = lookup_object(addr, db); 399 if (obj) { 400 switch (obj->state) { 401 case ODEBUG_STATE_INIT: 402 case ODEBUG_STATE_INACTIVE: 403 case ODEBUG_STATE_ACTIVE: 404 obj->state = ODEBUG_STATE_INACTIVE; 405 break; 406 407 case ODEBUG_STATE_DESTROYED: 408 debug_print_object(obj, "deactivate"); 409 break; 410 default: 411 break; 412 } 413 } else { 414 struct debug_obj o = { .object = addr, 415 .state = ODEBUG_STATE_NOTAVAILABLE, 416 .descr = descr }; 417 418 debug_print_object(&o, "deactivate"); 419 } 420 421 spin_unlock_irqrestore(&db->lock, flags); 422 } 423 424 /** 425 * debug_object_destroy - debug checks when an object is destroyed 426 * @addr: address of the object 427 * @descr: pointer to an object specific debug description structure 428 */ 429 void debug_object_destroy(void *addr, struct debug_obj_descr *descr) 430 { 431 enum debug_obj_state state; 432 struct debug_bucket *db; 433 struct debug_obj *obj; 434 unsigned long flags; 435 436 if (!debug_objects_enabled) 437 return; 438 439 db = get_bucket((unsigned long) addr); 440 441 spin_lock_irqsave(&db->lock, flags); 442 443 obj = lookup_object(addr, db); 444 if (!obj) 445 goto out_unlock; 446 447 switch (obj->state) { 448 case ODEBUG_STATE_NONE: 449 case ODEBUG_STATE_INIT: 450 case ODEBUG_STATE_INACTIVE: 451 obj->state = ODEBUG_STATE_DESTROYED; 452 break; 453 case ODEBUG_STATE_ACTIVE: 454 debug_print_object(obj, "destroy"); 455 state = obj->state; 456 spin_unlock_irqrestore(&db->lock, flags); 457 debug_object_fixup(descr->fixup_destroy, addr, state); 458 return; 459 460 case ODEBUG_STATE_DESTROYED: 461 debug_print_object(obj, "destroy"); 462 break; 463 default: 464 break; 465 } 466 out_unlock: 467 spin_unlock_irqrestore(&db->lock, flags); 468 } 469 470 /** 471 * debug_object_free - debug checks when an object is freed 472 * @addr: address of the object 473 * @descr: pointer to an object specific debug description structure 474 */ 475 void debug_object_free(void *addr, struct debug_obj_descr *descr) 476 { 477 enum debug_obj_state state; 478 struct debug_bucket *db; 479 struct debug_obj *obj; 480 unsigned long flags; 481 482 if (!debug_objects_enabled) 483 return; 484 485 db = get_bucket((unsigned long) addr); 486 487 spin_lock_irqsave(&db->lock, flags); 488 489 obj = lookup_object(addr, db); 490 if (!obj) 491 goto out_unlock; 492 493 switch (obj->state) { 494 case ODEBUG_STATE_ACTIVE: 495 debug_print_object(obj, "free"); 496 state = obj->state; 497 spin_unlock_irqrestore(&db->lock, flags); 498 debug_object_fixup(descr->fixup_free, addr, state); 499 return; 500 default: 501 hlist_del(&obj->node); 502 free_object(obj); 503 break; 504 } 505 out_unlock: 506 spin_unlock_irqrestore(&db->lock, flags); 507 } 508 509 #ifdef CONFIG_DEBUG_OBJECTS_FREE 510 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 511 { 512 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 513 struct hlist_node *node, *tmp; 514 struct debug_obj_descr *descr; 515 enum debug_obj_state state; 516 struct debug_bucket *db; 517 struct debug_obj *obj; 518 int cnt; 519 520 saddr = (unsigned long) address; 521 eaddr = saddr + size; 522 paddr = saddr & ODEBUG_CHUNK_MASK; 523 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 524 chunks >>= ODEBUG_CHUNK_SHIFT; 525 526 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 527 db = get_bucket(paddr); 528 529 repeat: 530 cnt = 0; 531 spin_lock_irqsave(&db->lock, flags); 532 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 533 cnt++; 534 oaddr = (unsigned long) obj->object; 535 if (oaddr < saddr || oaddr >= eaddr) 536 continue; 537 538 switch (obj->state) { 539 case ODEBUG_STATE_ACTIVE: 540 debug_print_object(obj, "free"); 541 descr = obj->descr; 542 state = obj->state; 543 spin_unlock_irqrestore(&db->lock, flags); 544 debug_object_fixup(descr->fixup_free, 545 (void *) oaddr, state); 546 goto repeat; 547 default: 548 hlist_del(&obj->node); 549 free_object(obj); 550 break; 551 } 552 } 553 spin_unlock_irqrestore(&db->lock, flags); 554 if (cnt > debug_objects_maxchain) 555 debug_objects_maxchain = cnt; 556 } 557 } 558 559 void debug_check_no_obj_freed(const void *address, unsigned long size) 560 { 561 if (debug_objects_enabled) 562 __debug_check_no_obj_freed(address, size); 563 } 564 #endif 565 566 #ifdef CONFIG_DEBUG_FS 567 568 static int debug_stats_show(struct seq_file *m, void *v) 569 { 570 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 571 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 572 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 573 seq_printf(m, "pool_free :%d\n", obj_pool_free); 574 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 575 seq_printf(m, "pool_used :%d\n", obj_pool_used); 576 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 577 return 0; 578 } 579 580 static int debug_stats_open(struct inode *inode, struct file *filp) 581 { 582 return single_open(filp, debug_stats_show, NULL); 583 } 584 585 static const struct file_operations debug_stats_fops = { 586 .open = debug_stats_open, 587 .read = seq_read, 588 .llseek = seq_lseek, 589 .release = single_release, 590 }; 591 592 static int __init debug_objects_init_debugfs(void) 593 { 594 struct dentry *dbgdir, *dbgstats; 595 596 if (!debug_objects_enabled) 597 return 0; 598 599 dbgdir = debugfs_create_dir("debug_objects", NULL); 600 if (!dbgdir) 601 return -ENOMEM; 602 603 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 604 &debug_stats_fops); 605 if (!dbgstats) 606 goto err; 607 608 return 0; 609 610 err: 611 debugfs_remove(dbgdir); 612 613 return -ENOMEM; 614 } 615 __initcall(debug_objects_init_debugfs); 616 617 #else 618 static inline void debug_objects_init_debugfs(void) { } 619 #endif 620 621 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 622 623 /* Random data structure for the self test */ 624 struct self_test { 625 unsigned long dummy1[6]; 626 int static_init; 627 unsigned long dummy2[3]; 628 }; 629 630 static __initdata struct debug_obj_descr descr_type_test; 631 632 /* 633 * fixup_init is called when: 634 * - an active object is initialized 635 */ 636 static int __init fixup_init(void *addr, enum debug_obj_state state) 637 { 638 struct self_test *obj = addr; 639 640 switch (state) { 641 case ODEBUG_STATE_ACTIVE: 642 debug_object_deactivate(obj, &descr_type_test); 643 debug_object_init(obj, &descr_type_test); 644 return 1; 645 default: 646 return 0; 647 } 648 } 649 650 /* 651 * fixup_activate is called when: 652 * - an active object is activated 653 * - an unknown object is activated (might be a statically initialized object) 654 */ 655 static int __init fixup_activate(void *addr, enum debug_obj_state state) 656 { 657 struct self_test *obj = addr; 658 659 switch (state) { 660 case ODEBUG_STATE_NOTAVAILABLE: 661 if (obj->static_init == 1) { 662 debug_object_init(obj, &descr_type_test); 663 debug_object_activate(obj, &descr_type_test); 664 /* 665 * Real code should return 0 here ! This is 666 * not a fixup of some bad behaviour. We 667 * merily call the debug_init function to keep 668 * track of the object. 669 */ 670 return 1; 671 } else { 672 /* Real code needs to emit a warning here */ 673 } 674 return 0; 675 676 case ODEBUG_STATE_ACTIVE: 677 debug_object_deactivate(obj, &descr_type_test); 678 debug_object_activate(obj, &descr_type_test); 679 return 1; 680 681 default: 682 return 0; 683 } 684 } 685 686 /* 687 * fixup_destroy is called when: 688 * - an active object is destroyed 689 */ 690 static int __init fixup_destroy(void *addr, enum debug_obj_state state) 691 { 692 struct self_test *obj = addr; 693 694 switch (state) { 695 case ODEBUG_STATE_ACTIVE: 696 debug_object_deactivate(obj, &descr_type_test); 697 debug_object_destroy(obj, &descr_type_test); 698 return 1; 699 default: 700 return 0; 701 } 702 } 703 704 /* 705 * fixup_free is called when: 706 * - an active object is freed 707 */ 708 static int __init fixup_free(void *addr, enum debug_obj_state state) 709 { 710 struct self_test *obj = addr; 711 712 switch (state) { 713 case ODEBUG_STATE_ACTIVE: 714 debug_object_deactivate(obj, &descr_type_test); 715 debug_object_free(obj, &descr_type_test); 716 return 1; 717 default: 718 return 0; 719 } 720 } 721 722 static int 723 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 724 { 725 struct debug_bucket *db; 726 struct debug_obj *obj; 727 unsigned long flags; 728 int res = -EINVAL; 729 730 db = get_bucket((unsigned long) addr); 731 732 spin_lock_irqsave(&db->lock, flags); 733 734 obj = lookup_object(addr, db); 735 if (!obj && state != ODEBUG_STATE_NONE) { 736 printk(KERN_ERR "ODEBUG: selftest object not found\n"); 737 WARN_ON(1); 738 goto out; 739 } 740 if (obj && obj->state != state) { 741 printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 742 obj->state, state); 743 WARN_ON(1); 744 goto out; 745 } 746 if (fixups != debug_objects_fixups) { 747 printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 748 fixups, debug_objects_fixups); 749 WARN_ON(1); 750 goto out; 751 } 752 if (warnings != debug_objects_warnings) { 753 printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 754 warnings, debug_objects_warnings); 755 WARN_ON(1); 756 goto out; 757 } 758 res = 0; 759 out: 760 spin_unlock_irqrestore(&db->lock, flags); 761 if (res) 762 debug_objects_enabled = 0; 763 return res; 764 } 765 766 static __initdata struct debug_obj_descr descr_type_test = { 767 .name = "selftest", 768 .fixup_init = fixup_init, 769 .fixup_activate = fixup_activate, 770 .fixup_destroy = fixup_destroy, 771 .fixup_free = fixup_free, 772 }; 773 774 static __initdata struct self_test obj = { .static_init = 0 }; 775 776 static void __init debug_objects_selftest(void) 777 { 778 int fixups, oldfixups, warnings, oldwarnings; 779 unsigned long flags; 780 781 local_irq_save(flags); 782 783 fixups = oldfixups = debug_objects_fixups; 784 warnings = oldwarnings = debug_objects_warnings; 785 descr_test = &descr_type_test; 786 787 debug_object_init(&obj, &descr_type_test); 788 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 789 goto out; 790 debug_object_activate(&obj, &descr_type_test); 791 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 792 goto out; 793 debug_object_activate(&obj, &descr_type_test); 794 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 795 goto out; 796 debug_object_deactivate(&obj, &descr_type_test); 797 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 798 goto out; 799 debug_object_destroy(&obj, &descr_type_test); 800 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 801 goto out; 802 debug_object_init(&obj, &descr_type_test); 803 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 804 goto out; 805 debug_object_activate(&obj, &descr_type_test); 806 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 807 goto out; 808 debug_object_deactivate(&obj, &descr_type_test); 809 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 810 goto out; 811 debug_object_free(&obj, &descr_type_test); 812 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 813 goto out; 814 815 obj.static_init = 1; 816 debug_object_activate(&obj, &descr_type_test); 817 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) 818 goto out; 819 debug_object_init(&obj, &descr_type_test); 820 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 821 goto out; 822 debug_object_free(&obj, &descr_type_test); 823 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 824 goto out; 825 826 #ifdef CONFIG_DEBUG_OBJECTS_FREE 827 debug_object_init(&obj, &descr_type_test); 828 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 829 goto out; 830 debug_object_activate(&obj, &descr_type_test); 831 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 832 goto out; 833 __debug_check_no_obj_freed(&obj, sizeof(obj)); 834 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 835 goto out; 836 #endif 837 printk(KERN_INFO "ODEBUG: selftest passed\n"); 838 839 out: 840 debug_objects_fixups = oldfixups; 841 debug_objects_warnings = oldwarnings; 842 descr_test = NULL; 843 844 local_irq_restore(flags); 845 } 846 #else 847 static inline void debug_objects_selftest(void) { } 848 #endif 849 850 /* 851 * Called during early boot to initialize the hash buckets and link 852 * the static object pool objects into the poll list. After this call 853 * the object tracker is fully operational. 854 */ 855 void __init debug_objects_early_init(void) 856 { 857 int i; 858 859 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 860 spin_lock_init(&obj_hash[i].lock); 861 862 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 863 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 864 } 865 866 /* 867 * Called after the kmem_caches are functional to setup a dedicated 868 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 869 * prevents that the debug code is called on kmem_cache_free() for the 870 * debug tracker objects to avoid recursive calls. 871 */ 872 void __init debug_objects_mem_init(void) 873 { 874 if (!debug_objects_enabled) 875 return; 876 877 obj_cache = kmem_cache_create("debug_objects_cache", 878 sizeof (struct debug_obj), 0, 879 SLAB_DEBUG_OBJECTS, NULL); 880 881 if (!obj_cache) 882 debug_objects_enabled = 0; 883 else 884 debug_objects_selftest(); 885 } 886