1 /* 2 * SLUB: A slab allocator that limits cache line use instead of queuing 3 * objects in per cpu and per node lists. 4 * 5 * The allocator synchronizes using per slab locks and only 6 * uses a centralized lock to manage a pool of partial slabs. 7 * 8 * (C) 2007 SGI, Christoph Lameter 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/module.h> 13 #include <linux/bit_spinlock.h> 14 #include <linux/interrupt.h> 15 #include <linux/bitops.h> 16 #include <linux/slab.h> 17 #include <linux/proc_fs.h> 18 #include <linux/seq_file.h> 19 #include <linux/cpu.h> 20 #include <linux/cpuset.h> 21 #include <linux/mempolicy.h> 22 #include <linux/ctype.h> 23 #include <linux/debugobjects.h> 24 #include <linux/kallsyms.h> 25 #include <linux/memory.h> 26 #include <linux/math64.h> 27 #include <linux/fault-inject.h> 28 29 /* 30 * Lock order: 31 * 1. slab_lock(page) 32 * 2. slab->list_lock 33 * 34 * The slab_lock protects operations on the object of a particular 35 * slab and its metadata in the page struct. If the slab lock 36 * has been taken then no allocations nor frees can be performed 37 * on the objects in the slab nor can the slab be added or removed 38 * from the partial or full lists since this would mean modifying 39 * the page_struct of the slab. 40 * 41 * The list_lock protects the partial and full list on each node and 42 * the partial slab counter. If taken then no new slabs may be added or 43 * removed from the lists nor make the number of partial slabs be modified. 44 * (Note that the total number of slabs is an atomic value that may be 45 * modified without taking the list lock). 46 * 47 * The list_lock is a centralized lock and thus we avoid taking it as 48 * much as possible. As long as SLUB does not have to handle partial 49 * slabs, operations can continue without any centralized lock. F.e. 50 * allocating a long series of objects that fill up slabs does not require 51 * the list lock. 52 * 53 * The lock order is sometimes inverted when we are trying to get a slab 54 * off a list. We take the list_lock and then look for a page on the list 55 * to use. While we do that objects in the slabs may be freed. We can 56 * only operate on the slab if we have also taken the slab_lock. So we use 57 * a slab_trylock() on the slab. If trylock was successful then no frees 58 * can occur anymore and we can use the slab for allocations etc. If the 59 * slab_trylock() does not succeed then frees are in progress in the slab and 60 * we must stay away from it for a while since we may cause a bouncing 61 * cacheline if we try to acquire the lock. So go onto the next slab. 62 * If all pages are busy then we may allocate a new slab instead of reusing 63 * a partial slab. A new slab has noone operating on it and thus there is 64 * no danger of cacheline contention. 65 * 66 * Interrupts are disabled during allocation and deallocation in order to 67 * make the slab allocator safe to use in the context of an irq. In addition 68 * interrupts are disabled to ensure that the processor does not change 69 * while handling per_cpu slabs, due to kernel preemption. 70 * 71 * SLUB assigns one slab for allocation to each processor. 72 * Allocations only occur from these slabs called cpu slabs. 73 * 74 * Slabs with free elements are kept on a partial list and during regular 75 * operations no list for full slabs is used. If an object in a full slab is 76 * freed then the slab will show up again on the partial lists. 77 * We track full slabs for debugging purposes though because otherwise we 78 * cannot scan all objects. 79 * 80 * Slabs are freed when they become empty. Teardown and setup is 81 * minimal so we rely on the page allocators per cpu caches for 82 * fast frees and allocs. 83 * 84 * Overloading of page flags that are otherwise used for LRU management. 85 * 86 * PageActive The slab is frozen and exempt from list processing. 87 * This means that the slab is dedicated to a purpose 88 * such as satisfying allocations for a specific 89 * processor. Objects may be freed in the slab while 90 * it is frozen but slab_free will then skip the usual 91 * list operations. It is up to the processor holding 92 * the slab to integrate the slab into the slab lists 93 * when the slab is no longer needed. 94 * 95 * One use of this flag is to mark slabs that are 96 * used for allocations. Then such a slab becomes a cpu 97 * slab. The cpu slab may be equipped with an additional 98 * freelist that allows lockless access to 99 * free objects in addition to the regular freelist 100 * that requires the slab lock. 101 * 102 * PageError Slab requires special handling due to debug 103 * options set. This moves slab handling out of 104 * the fast path and disables lockless freelists. 105 */ 106 107 #ifdef CONFIG_SLUB_DEBUG 108 #define SLABDEBUG 1 109 #else 110 #define SLABDEBUG 0 111 #endif 112 113 /* 114 * Issues still to be resolved: 115 * 116 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 117 * 118 * - Variable sizing of the per node arrays 119 */ 120 121 /* Enable to test recovery from slab corruption on boot */ 122 #undef SLUB_RESILIENCY_TEST 123 124 /* 125 * Mininum number of partial slabs. These will be left on the partial 126 * lists even if they are empty. kmem_cache_shrink may reclaim them. 127 */ 128 #define MIN_PARTIAL 5 129 130 /* 131 * Maximum number of desirable partial slabs. 132 * The existence of more partial slabs makes kmem_cache_shrink 133 * sort the partial list by the number of objects in the. 134 */ 135 #define MAX_PARTIAL 10 136 137 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 138 SLAB_POISON | SLAB_STORE_USER) 139 140 /* 141 * Set of flags that will prevent slab merging 142 */ 143 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 144 SLAB_TRACE | SLAB_DESTROY_BY_RCU) 145 146 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 147 SLAB_CACHE_DMA) 148 149 #ifndef ARCH_KMALLOC_MINALIGN 150 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 151 #endif 152 153 #ifndef ARCH_SLAB_MINALIGN 154 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 155 #endif 156 157 #define OO_SHIFT 16 158 #define OO_MASK ((1 << OO_SHIFT) - 1) 159 #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ 160 161 /* Internal SLUB flags */ 162 #define __OBJECT_POISON 0x80000000 /* Poison object */ 163 #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 164 165 static int kmem_size = sizeof(struct kmem_cache); 166 167 #ifdef CONFIG_SMP 168 static struct notifier_block slab_notifier; 169 #endif 170 171 static enum { 172 DOWN, /* No slab functionality available */ 173 PARTIAL, /* kmem_cache_open() works but kmalloc does not */ 174 UP, /* Everything works but does not show up in sysfs */ 175 SYSFS /* Sysfs up */ 176 } slab_state = DOWN; 177 178 /* A list of all slab caches on the system */ 179 static DECLARE_RWSEM(slub_lock); 180 static LIST_HEAD(slab_caches); 181 182 /* 183 * Tracking user of a slab. 184 */ 185 struct track { 186 unsigned long addr; /* Called from address */ 187 int cpu; /* Was running on cpu */ 188 int pid; /* Pid context */ 189 unsigned long when; /* When did the operation occur */ 190 }; 191 192 enum track_item { TRACK_ALLOC, TRACK_FREE }; 193 194 #ifdef CONFIG_SLUB_DEBUG 195 static int sysfs_slab_add(struct kmem_cache *); 196 static int sysfs_slab_alias(struct kmem_cache *, const char *); 197 static void sysfs_slab_remove(struct kmem_cache *); 198 199 #else 200 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 201 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 202 { return 0; } 203 static inline void sysfs_slab_remove(struct kmem_cache *s) 204 { 205 kfree(s); 206 } 207 208 #endif 209 210 static inline void stat(struct kmem_cache_cpu *c, enum stat_item si) 211 { 212 #ifdef CONFIG_SLUB_STATS 213 c->stat[si]++; 214 #endif 215 } 216 217 /******************************************************************** 218 * Core slab cache functions 219 *******************************************************************/ 220 221 int slab_is_available(void) 222 { 223 return slab_state >= UP; 224 } 225 226 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 227 { 228 #ifdef CONFIG_NUMA 229 return s->node[node]; 230 #else 231 return &s->local_node; 232 #endif 233 } 234 235 static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu) 236 { 237 #ifdef CONFIG_SMP 238 return s->cpu_slab[cpu]; 239 #else 240 return &s->cpu_slab; 241 #endif 242 } 243 244 /* Verify that a pointer has an address that is valid within a slab page */ 245 static inline int check_valid_pointer(struct kmem_cache *s, 246 struct page *page, const void *object) 247 { 248 void *base; 249 250 if (!object) 251 return 1; 252 253 base = page_address(page); 254 if (object < base || object >= base + page->objects * s->size || 255 (object - base) % s->size) { 256 return 0; 257 } 258 259 return 1; 260 } 261 262 /* 263 * Slow version of get and set free pointer. 264 * 265 * This version requires touching the cache lines of kmem_cache which 266 * we avoid to do in the fast alloc free paths. There we obtain the offset 267 * from the page struct. 268 */ 269 static inline void *get_freepointer(struct kmem_cache *s, void *object) 270 { 271 return *(void **)(object + s->offset); 272 } 273 274 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 275 { 276 *(void **)(object + s->offset) = fp; 277 } 278 279 /* Loop over all objects in a slab */ 280 #define for_each_object(__p, __s, __addr, __objects) \ 281 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ 282 __p += (__s)->size) 283 284 /* Scan freelist */ 285 #define for_each_free_object(__p, __s, __free) \ 286 for (__p = (__free); __p; __p = get_freepointer((__s), __p)) 287 288 /* Determine object index from a given position */ 289 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 290 { 291 return (p - addr) / s->size; 292 } 293 294 static inline struct kmem_cache_order_objects oo_make(int order, 295 unsigned long size) 296 { 297 struct kmem_cache_order_objects x = { 298 (order << OO_SHIFT) + (PAGE_SIZE << order) / size 299 }; 300 301 return x; 302 } 303 304 static inline int oo_order(struct kmem_cache_order_objects x) 305 { 306 return x.x >> OO_SHIFT; 307 } 308 309 static inline int oo_objects(struct kmem_cache_order_objects x) 310 { 311 return x.x & OO_MASK; 312 } 313 314 #ifdef CONFIG_SLUB_DEBUG 315 /* 316 * Debug settings: 317 */ 318 #ifdef CONFIG_SLUB_DEBUG_ON 319 static int slub_debug = DEBUG_DEFAULT_FLAGS; 320 #else 321 static int slub_debug; 322 #endif 323 324 static char *slub_debug_slabs; 325 326 /* 327 * Object debugging 328 */ 329 static void print_section(char *text, u8 *addr, unsigned int length) 330 { 331 int i, offset; 332 int newline = 1; 333 char ascii[17]; 334 335 ascii[16] = 0; 336 337 for (i = 0; i < length; i++) { 338 if (newline) { 339 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 340 newline = 0; 341 } 342 printk(KERN_CONT " %02x", addr[i]); 343 offset = i % 16; 344 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 345 if (offset == 15) { 346 printk(KERN_CONT " %s\n", ascii); 347 newline = 1; 348 } 349 } 350 if (!newline) { 351 i %= 16; 352 while (i < 16) { 353 printk(KERN_CONT " "); 354 ascii[i] = ' '; 355 i++; 356 } 357 printk(KERN_CONT " %s\n", ascii); 358 } 359 } 360 361 static struct track *get_track(struct kmem_cache *s, void *object, 362 enum track_item alloc) 363 { 364 struct track *p; 365 366 if (s->offset) 367 p = object + s->offset + sizeof(void *); 368 else 369 p = object + s->inuse; 370 371 return p + alloc; 372 } 373 374 static void set_track(struct kmem_cache *s, void *object, 375 enum track_item alloc, unsigned long addr) 376 { 377 struct track *p = get_track(s, object, alloc); 378 379 if (addr) { 380 p->addr = addr; 381 p->cpu = smp_processor_id(); 382 p->pid = current->pid; 383 p->when = jiffies; 384 } else 385 memset(p, 0, sizeof(struct track)); 386 } 387 388 static void init_tracking(struct kmem_cache *s, void *object) 389 { 390 if (!(s->flags & SLAB_STORE_USER)) 391 return; 392 393 set_track(s, object, TRACK_FREE, 0UL); 394 set_track(s, object, TRACK_ALLOC, 0UL); 395 } 396 397 static void print_track(const char *s, struct track *t) 398 { 399 if (!t->addr) 400 return; 401 402 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 403 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); 404 } 405 406 static void print_tracking(struct kmem_cache *s, void *object) 407 { 408 if (!(s->flags & SLAB_STORE_USER)) 409 return; 410 411 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); 412 print_track("Freed", get_track(s, object, TRACK_FREE)); 413 } 414 415 static void print_page_info(struct page *page) 416 { 417 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 418 page, page->objects, page->inuse, page->freelist, page->flags); 419 420 } 421 422 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 423 { 424 va_list args; 425 char buf[100]; 426 427 va_start(args, fmt); 428 vsnprintf(buf, sizeof(buf), fmt, args); 429 va_end(args); 430 printk(KERN_ERR "========================================" 431 "=====================================\n"); 432 printk(KERN_ERR "BUG %s: %s\n", s->name, buf); 433 printk(KERN_ERR "----------------------------------------" 434 "-------------------------------------\n\n"); 435 } 436 437 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 438 { 439 va_list args; 440 char buf[100]; 441 442 va_start(args, fmt); 443 vsnprintf(buf, sizeof(buf), fmt, args); 444 va_end(args); 445 printk(KERN_ERR "FIX %s: %s\n", s->name, buf); 446 } 447 448 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 449 { 450 unsigned int off; /* Offset of last byte */ 451 u8 *addr = page_address(page); 452 453 print_tracking(s, p); 454 455 print_page_info(page); 456 457 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 458 p, p - addr, get_freepointer(s, p)); 459 460 if (p > addr + 16) 461 print_section("Bytes b4", p - 16, 16); 462 463 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE)); 464 465 if (s->flags & SLAB_RED_ZONE) 466 print_section("Redzone", p + s->objsize, 467 s->inuse - s->objsize); 468 469 if (s->offset) 470 off = s->offset + sizeof(void *); 471 else 472 off = s->inuse; 473 474 if (s->flags & SLAB_STORE_USER) 475 off += 2 * sizeof(struct track); 476 477 if (off != s->size) 478 /* Beginning of the filler is the free pointer */ 479 print_section("Padding", p + off, s->size - off); 480 481 dump_stack(); 482 } 483 484 static void object_err(struct kmem_cache *s, struct page *page, 485 u8 *object, char *reason) 486 { 487 slab_bug(s, "%s", reason); 488 print_trailer(s, page, object); 489 } 490 491 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 492 { 493 va_list args; 494 char buf[100]; 495 496 va_start(args, fmt); 497 vsnprintf(buf, sizeof(buf), fmt, args); 498 va_end(args); 499 slab_bug(s, "%s", buf); 500 print_page_info(page); 501 dump_stack(); 502 } 503 504 static void init_object(struct kmem_cache *s, void *object, int active) 505 { 506 u8 *p = object; 507 508 if (s->flags & __OBJECT_POISON) { 509 memset(p, POISON_FREE, s->objsize - 1); 510 p[s->objsize - 1] = POISON_END; 511 } 512 513 if (s->flags & SLAB_RED_ZONE) 514 memset(p + s->objsize, 515 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, 516 s->inuse - s->objsize); 517 } 518 519 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) 520 { 521 while (bytes) { 522 if (*start != (u8)value) 523 return start; 524 start++; 525 bytes--; 526 } 527 return NULL; 528 } 529 530 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 531 void *from, void *to) 532 { 533 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 534 memset(from, data, to - from); 535 } 536 537 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 538 u8 *object, char *what, 539 u8 *start, unsigned int value, unsigned int bytes) 540 { 541 u8 *fault; 542 u8 *end; 543 544 fault = check_bytes(start, value, bytes); 545 if (!fault) 546 return 1; 547 548 end = start + bytes; 549 while (end > fault && end[-1] == value) 550 end--; 551 552 slab_bug(s, "%s overwritten", what); 553 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", 554 fault, end - 1, fault[0], value); 555 print_trailer(s, page, object); 556 557 restore_bytes(s, what, value, fault, end); 558 return 0; 559 } 560 561 /* 562 * Object layout: 563 * 564 * object address 565 * Bytes of the object to be managed. 566 * If the freepointer may overlay the object then the free 567 * pointer is the first word of the object. 568 * 569 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 570 * 0xa5 (POISON_END) 571 * 572 * object + s->objsize 573 * Padding to reach word boundary. This is also used for Redzoning. 574 * Padding is extended by another word if Redzoning is enabled and 575 * objsize == inuse. 576 * 577 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 578 * 0xcc (RED_ACTIVE) for objects in use. 579 * 580 * object + s->inuse 581 * Meta data starts here. 582 * 583 * A. Free pointer (if we cannot overwrite object on free) 584 * B. Tracking data for SLAB_STORE_USER 585 * C. Padding to reach required alignment boundary or at mininum 586 * one word if debugging is on to be able to detect writes 587 * before the word boundary. 588 * 589 * Padding is done using 0x5a (POISON_INUSE) 590 * 591 * object + s->size 592 * Nothing is used beyond s->size. 593 * 594 * If slabcaches are merged then the objsize and inuse boundaries are mostly 595 * ignored. And therefore no slab options that rely on these boundaries 596 * may be used with merged slabcaches. 597 */ 598 599 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 600 { 601 unsigned long off = s->inuse; /* The end of info */ 602 603 if (s->offset) 604 /* Freepointer is placed after the object. */ 605 off += sizeof(void *); 606 607 if (s->flags & SLAB_STORE_USER) 608 /* We also have user information there */ 609 off += 2 * sizeof(struct track); 610 611 if (s->size == off) 612 return 1; 613 614 return check_bytes_and_report(s, page, p, "Object padding", 615 p + off, POISON_INUSE, s->size - off); 616 } 617 618 /* Check the pad bytes at the end of a slab page */ 619 static int slab_pad_check(struct kmem_cache *s, struct page *page) 620 { 621 u8 *start; 622 u8 *fault; 623 u8 *end; 624 int length; 625 int remainder; 626 627 if (!(s->flags & SLAB_POISON)) 628 return 1; 629 630 start = page_address(page); 631 length = (PAGE_SIZE << compound_order(page)); 632 end = start + length; 633 remainder = length % s->size; 634 if (!remainder) 635 return 1; 636 637 fault = check_bytes(end - remainder, POISON_INUSE, remainder); 638 if (!fault) 639 return 1; 640 while (end > fault && end[-1] == POISON_INUSE) 641 end--; 642 643 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 644 print_section("Padding", end - remainder, remainder); 645 646 restore_bytes(s, "slab padding", POISON_INUSE, start, end); 647 return 0; 648 } 649 650 static int check_object(struct kmem_cache *s, struct page *page, 651 void *object, int active) 652 { 653 u8 *p = object; 654 u8 *endobject = object + s->objsize; 655 656 if (s->flags & SLAB_RED_ZONE) { 657 unsigned int red = 658 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; 659 660 if (!check_bytes_and_report(s, page, object, "Redzone", 661 endobject, red, s->inuse - s->objsize)) 662 return 0; 663 } else { 664 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 665 check_bytes_and_report(s, page, p, "Alignment padding", 666 endobject, POISON_INUSE, s->inuse - s->objsize); 667 } 668 } 669 670 if (s->flags & SLAB_POISON) { 671 if (!active && (s->flags & __OBJECT_POISON) && 672 (!check_bytes_and_report(s, page, p, "Poison", p, 673 POISON_FREE, s->objsize - 1) || 674 !check_bytes_and_report(s, page, p, "Poison", 675 p + s->objsize - 1, POISON_END, 1))) 676 return 0; 677 /* 678 * check_pad_bytes cleans up on its own. 679 */ 680 check_pad_bytes(s, page, p); 681 } 682 683 if (!s->offset && active) 684 /* 685 * Object and freepointer overlap. Cannot check 686 * freepointer while object is allocated. 687 */ 688 return 1; 689 690 /* Check free pointer validity */ 691 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 692 object_err(s, page, p, "Freepointer corrupt"); 693 /* 694 * No choice but to zap it and thus lose the remainder 695 * of the free objects in this slab. May cause 696 * another error because the object count is now wrong. 697 */ 698 set_freepointer(s, p, NULL); 699 return 0; 700 } 701 return 1; 702 } 703 704 static int check_slab(struct kmem_cache *s, struct page *page) 705 { 706 int maxobj; 707 708 VM_BUG_ON(!irqs_disabled()); 709 710 if (!PageSlab(page)) { 711 slab_err(s, page, "Not a valid slab page"); 712 return 0; 713 } 714 715 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 716 if (page->objects > maxobj) { 717 slab_err(s, page, "objects %u > max %u", 718 s->name, page->objects, maxobj); 719 return 0; 720 } 721 if (page->inuse > page->objects) { 722 slab_err(s, page, "inuse %u > max %u", 723 s->name, page->inuse, page->objects); 724 return 0; 725 } 726 /* Slab_pad_check fixes things up after itself */ 727 slab_pad_check(s, page); 728 return 1; 729 } 730 731 /* 732 * Determine if a certain object on a page is on the freelist. Must hold the 733 * slab lock to guarantee that the chains are in a consistent state. 734 */ 735 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 736 { 737 int nr = 0; 738 void *fp = page->freelist; 739 void *object = NULL; 740 unsigned long max_objects; 741 742 while (fp && nr <= page->objects) { 743 if (fp == search) 744 return 1; 745 if (!check_valid_pointer(s, page, fp)) { 746 if (object) { 747 object_err(s, page, object, 748 "Freechain corrupt"); 749 set_freepointer(s, object, NULL); 750 break; 751 } else { 752 slab_err(s, page, "Freepointer corrupt"); 753 page->freelist = NULL; 754 page->inuse = page->objects; 755 slab_fix(s, "Freelist cleared"); 756 return 0; 757 } 758 break; 759 } 760 object = fp; 761 fp = get_freepointer(s, object); 762 nr++; 763 } 764 765 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 766 if (max_objects > MAX_OBJS_PER_PAGE) 767 max_objects = MAX_OBJS_PER_PAGE; 768 769 if (page->objects != max_objects) { 770 slab_err(s, page, "Wrong number of objects. Found %d but " 771 "should be %d", page->objects, max_objects); 772 page->objects = max_objects; 773 slab_fix(s, "Number of objects adjusted."); 774 } 775 if (page->inuse != page->objects - nr) { 776 slab_err(s, page, "Wrong object count. Counter is %d but " 777 "counted were %d", page->inuse, page->objects - nr); 778 page->inuse = page->objects - nr; 779 slab_fix(s, "Object count adjusted."); 780 } 781 return search == NULL; 782 } 783 784 static void trace(struct kmem_cache *s, struct page *page, void *object, 785 int alloc) 786 { 787 if (s->flags & SLAB_TRACE) { 788 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 789 s->name, 790 alloc ? "alloc" : "free", 791 object, page->inuse, 792 page->freelist); 793 794 if (!alloc) 795 print_section("Object", (void *)object, s->objsize); 796 797 dump_stack(); 798 } 799 } 800 801 /* 802 * Tracking of fully allocated slabs for debugging purposes. 803 */ 804 static void add_full(struct kmem_cache_node *n, struct page *page) 805 { 806 spin_lock(&n->list_lock); 807 list_add(&page->lru, &n->full); 808 spin_unlock(&n->list_lock); 809 } 810 811 static void remove_full(struct kmem_cache *s, struct page *page) 812 { 813 struct kmem_cache_node *n; 814 815 if (!(s->flags & SLAB_STORE_USER)) 816 return; 817 818 n = get_node(s, page_to_nid(page)); 819 820 spin_lock(&n->list_lock); 821 list_del(&page->lru); 822 spin_unlock(&n->list_lock); 823 } 824 825 /* Tracking of the number of slabs for debugging purposes */ 826 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 827 { 828 struct kmem_cache_node *n = get_node(s, node); 829 830 return atomic_long_read(&n->nr_slabs); 831 } 832 833 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 834 { 835 struct kmem_cache_node *n = get_node(s, node); 836 837 /* 838 * May be called early in order to allocate a slab for the 839 * kmem_cache_node structure. Solve the chicken-egg 840 * dilemma by deferring the increment of the count during 841 * bootstrap (see early_kmem_cache_node_alloc). 842 */ 843 if (!NUMA_BUILD || n) { 844 atomic_long_inc(&n->nr_slabs); 845 atomic_long_add(objects, &n->total_objects); 846 } 847 } 848 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 849 { 850 struct kmem_cache_node *n = get_node(s, node); 851 852 atomic_long_dec(&n->nr_slabs); 853 atomic_long_sub(objects, &n->total_objects); 854 } 855 856 /* Object debug checks for alloc/free paths */ 857 static void setup_object_debug(struct kmem_cache *s, struct page *page, 858 void *object) 859 { 860 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 861 return; 862 863 init_object(s, object, 0); 864 init_tracking(s, object); 865 } 866 867 static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 868 void *object, unsigned long addr) 869 { 870 if (!check_slab(s, page)) 871 goto bad; 872 873 if (!on_freelist(s, page, object)) { 874 object_err(s, page, object, "Object already allocated"); 875 goto bad; 876 } 877 878 if (!check_valid_pointer(s, page, object)) { 879 object_err(s, page, object, "Freelist Pointer check fails"); 880 goto bad; 881 } 882 883 if (!check_object(s, page, object, 0)) 884 goto bad; 885 886 /* Success perform special debug activities for allocs */ 887 if (s->flags & SLAB_STORE_USER) 888 set_track(s, object, TRACK_ALLOC, addr); 889 trace(s, page, object, 1); 890 init_object(s, object, 1); 891 return 1; 892 893 bad: 894 if (PageSlab(page)) { 895 /* 896 * If this is a slab page then lets do the best we can 897 * to avoid issues in the future. Marking all objects 898 * as used avoids touching the remaining objects. 899 */ 900 slab_fix(s, "Marking all objects used"); 901 page->inuse = page->objects; 902 page->freelist = NULL; 903 } 904 return 0; 905 } 906 907 static int free_debug_processing(struct kmem_cache *s, struct page *page, 908 void *object, unsigned long addr) 909 { 910 if (!check_slab(s, page)) 911 goto fail; 912 913 if (!check_valid_pointer(s, page, object)) { 914 slab_err(s, page, "Invalid object pointer 0x%p", object); 915 goto fail; 916 } 917 918 if (on_freelist(s, page, object)) { 919 object_err(s, page, object, "Object already free"); 920 goto fail; 921 } 922 923 if (!check_object(s, page, object, 1)) 924 return 0; 925 926 if (unlikely(s != page->slab)) { 927 if (!PageSlab(page)) { 928 slab_err(s, page, "Attempt to free object(0x%p) " 929 "outside of slab", object); 930 } else if (!page->slab) { 931 printk(KERN_ERR 932 "SLUB <none>: no slab for object 0x%p.\n", 933 object); 934 dump_stack(); 935 } else 936 object_err(s, page, object, 937 "page slab pointer corrupt."); 938 goto fail; 939 } 940 941 /* Special debug activities for freeing objects */ 942 if (!PageSlubFrozen(page) && !page->freelist) 943 remove_full(s, page); 944 if (s->flags & SLAB_STORE_USER) 945 set_track(s, object, TRACK_FREE, addr); 946 trace(s, page, object, 0); 947 init_object(s, object, 0); 948 return 1; 949 950 fail: 951 slab_fix(s, "Object at 0x%p not freed", object); 952 return 0; 953 } 954 955 static int __init setup_slub_debug(char *str) 956 { 957 slub_debug = DEBUG_DEFAULT_FLAGS; 958 if (*str++ != '=' || !*str) 959 /* 960 * No options specified. Switch on full debugging. 961 */ 962 goto out; 963 964 if (*str == ',') 965 /* 966 * No options but restriction on slabs. This means full 967 * debugging for slabs matching a pattern. 968 */ 969 goto check_slabs; 970 971 slub_debug = 0; 972 if (*str == '-') 973 /* 974 * Switch off all debugging measures. 975 */ 976 goto out; 977 978 /* 979 * Determine which debug features should be switched on 980 */ 981 for (; *str && *str != ','; str++) { 982 switch (tolower(*str)) { 983 case 'f': 984 slub_debug |= SLAB_DEBUG_FREE; 985 break; 986 case 'z': 987 slub_debug |= SLAB_RED_ZONE; 988 break; 989 case 'p': 990 slub_debug |= SLAB_POISON; 991 break; 992 case 'u': 993 slub_debug |= SLAB_STORE_USER; 994 break; 995 case 't': 996 slub_debug |= SLAB_TRACE; 997 break; 998 default: 999 printk(KERN_ERR "slub_debug option '%c' " 1000 "unknown. skipped\n", *str); 1001 } 1002 } 1003 1004 check_slabs: 1005 if (*str == ',') 1006 slub_debug_slabs = str + 1; 1007 out: 1008 return 1; 1009 } 1010 1011 __setup("slub_debug", setup_slub_debug); 1012 1013 static unsigned long kmem_cache_flags(unsigned long objsize, 1014 unsigned long flags, const char *name, 1015 void (*ctor)(void *)) 1016 { 1017 /* 1018 * Enable debugging if selected on the kernel commandline. 1019 */ 1020 if (slub_debug && (!slub_debug_slabs || 1021 strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) 1022 flags |= slub_debug; 1023 1024 return flags; 1025 } 1026 #else 1027 static inline void setup_object_debug(struct kmem_cache *s, 1028 struct page *page, void *object) {} 1029 1030 static inline int alloc_debug_processing(struct kmem_cache *s, 1031 struct page *page, void *object, unsigned long addr) { return 0; } 1032 1033 static inline int free_debug_processing(struct kmem_cache *s, 1034 struct page *page, void *object, unsigned long addr) { return 0; } 1035 1036 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1037 { return 1; } 1038 static inline int check_object(struct kmem_cache *s, struct page *page, 1039 void *object, int active) { return 1; } 1040 static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 1041 static inline unsigned long kmem_cache_flags(unsigned long objsize, 1042 unsigned long flags, const char *name, 1043 void (*ctor)(void *)) 1044 { 1045 return flags; 1046 } 1047 #define slub_debug 0 1048 1049 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1050 { return 0; } 1051 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1052 int objects) {} 1053 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1054 int objects) {} 1055 #endif 1056 1057 /* 1058 * Slab allocation and freeing 1059 */ 1060 static inline struct page *alloc_slab_page(gfp_t flags, int node, 1061 struct kmem_cache_order_objects oo) 1062 { 1063 int order = oo_order(oo); 1064 1065 if (node == -1) 1066 return alloc_pages(flags, order); 1067 else 1068 return alloc_pages_node(node, flags, order); 1069 } 1070 1071 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1072 { 1073 struct page *page; 1074 struct kmem_cache_order_objects oo = s->oo; 1075 1076 flags |= s->allocflags; 1077 1078 page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, 1079 oo); 1080 if (unlikely(!page)) { 1081 oo = s->min; 1082 /* 1083 * Allocation may have failed due to fragmentation. 1084 * Try a lower order alloc if possible 1085 */ 1086 page = alloc_slab_page(flags, node, oo); 1087 if (!page) 1088 return NULL; 1089 1090 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); 1091 } 1092 page->objects = oo_objects(oo); 1093 mod_zone_page_state(page_zone(page), 1094 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1095 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1096 1 << oo_order(oo)); 1097 1098 return page; 1099 } 1100 1101 static void setup_object(struct kmem_cache *s, struct page *page, 1102 void *object) 1103 { 1104 setup_object_debug(s, page, object); 1105 if (unlikely(s->ctor)) 1106 s->ctor(object); 1107 } 1108 1109 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1110 { 1111 struct page *page; 1112 void *start; 1113 void *last; 1114 void *p; 1115 1116 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1117 1118 page = allocate_slab(s, 1119 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1120 if (!page) 1121 goto out; 1122 1123 inc_slabs_node(s, page_to_nid(page), page->objects); 1124 page->slab = s; 1125 page->flags |= 1 << PG_slab; 1126 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1127 SLAB_STORE_USER | SLAB_TRACE)) 1128 __SetPageSlubDebug(page); 1129 1130 start = page_address(page); 1131 1132 if (unlikely(s->flags & SLAB_POISON)) 1133 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1134 1135 last = start; 1136 for_each_object(p, s, start, page->objects) { 1137 setup_object(s, page, last); 1138 set_freepointer(s, last, p); 1139 last = p; 1140 } 1141 setup_object(s, page, last); 1142 set_freepointer(s, last, NULL); 1143 1144 page->freelist = start; 1145 page->inuse = 0; 1146 out: 1147 return page; 1148 } 1149 1150 static void __free_slab(struct kmem_cache *s, struct page *page) 1151 { 1152 int order = compound_order(page); 1153 int pages = 1 << order; 1154 1155 if (unlikely(SLABDEBUG && PageSlubDebug(page))) { 1156 void *p; 1157 1158 slab_pad_check(s, page); 1159 for_each_object(p, s, page_address(page), 1160 page->objects) 1161 check_object(s, page, p, 0); 1162 __ClearPageSlubDebug(page); 1163 } 1164 1165 mod_zone_page_state(page_zone(page), 1166 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1167 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1168 -pages); 1169 1170 __ClearPageSlab(page); 1171 reset_page_mapcount(page); 1172 __free_pages(page, order); 1173 } 1174 1175 static void rcu_free_slab(struct rcu_head *h) 1176 { 1177 struct page *page; 1178 1179 page = container_of((struct list_head *)h, struct page, lru); 1180 __free_slab(page->slab, page); 1181 } 1182 1183 static void free_slab(struct kmem_cache *s, struct page *page) 1184 { 1185 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1186 /* 1187 * RCU free overloads the RCU head over the LRU 1188 */ 1189 struct rcu_head *head = (void *)&page->lru; 1190 1191 call_rcu(head, rcu_free_slab); 1192 } else 1193 __free_slab(s, page); 1194 } 1195 1196 static void discard_slab(struct kmem_cache *s, struct page *page) 1197 { 1198 dec_slabs_node(s, page_to_nid(page), page->objects); 1199 free_slab(s, page); 1200 } 1201 1202 /* 1203 * Per slab locking using the pagelock 1204 */ 1205 static __always_inline void slab_lock(struct page *page) 1206 { 1207 bit_spin_lock(PG_locked, &page->flags); 1208 } 1209 1210 static __always_inline void slab_unlock(struct page *page) 1211 { 1212 __bit_spin_unlock(PG_locked, &page->flags); 1213 } 1214 1215 static __always_inline int slab_trylock(struct page *page) 1216 { 1217 int rc = 1; 1218 1219 rc = bit_spin_trylock(PG_locked, &page->flags); 1220 return rc; 1221 } 1222 1223 /* 1224 * Management of partially allocated slabs 1225 */ 1226 static void add_partial(struct kmem_cache_node *n, 1227 struct page *page, int tail) 1228 { 1229 spin_lock(&n->list_lock); 1230 n->nr_partial++; 1231 if (tail) 1232 list_add_tail(&page->lru, &n->partial); 1233 else 1234 list_add(&page->lru, &n->partial); 1235 spin_unlock(&n->list_lock); 1236 } 1237 1238 static void remove_partial(struct kmem_cache *s, struct page *page) 1239 { 1240 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1241 1242 spin_lock(&n->list_lock); 1243 list_del(&page->lru); 1244 n->nr_partial--; 1245 spin_unlock(&n->list_lock); 1246 } 1247 1248 /* 1249 * Lock slab and remove from the partial list. 1250 * 1251 * Must hold list_lock. 1252 */ 1253 static inline int lock_and_freeze_slab(struct kmem_cache_node *n, 1254 struct page *page) 1255 { 1256 if (slab_trylock(page)) { 1257 list_del(&page->lru); 1258 n->nr_partial--; 1259 __SetPageSlubFrozen(page); 1260 return 1; 1261 } 1262 return 0; 1263 } 1264 1265 /* 1266 * Try to allocate a partial slab from a specific node. 1267 */ 1268 static struct page *get_partial_node(struct kmem_cache_node *n) 1269 { 1270 struct page *page; 1271 1272 /* 1273 * Racy check. If we mistakenly see no partial slabs then we 1274 * just allocate an empty slab. If we mistakenly try to get a 1275 * partial slab and there is none available then get_partials() 1276 * will return NULL. 1277 */ 1278 if (!n || !n->nr_partial) 1279 return NULL; 1280 1281 spin_lock(&n->list_lock); 1282 list_for_each_entry(page, &n->partial, lru) 1283 if (lock_and_freeze_slab(n, page)) 1284 goto out; 1285 page = NULL; 1286 out: 1287 spin_unlock(&n->list_lock); 1288 return page; 1289 } 1290 1291 /* 1292 * Get a page from somewhere. Search in increasing NUMA distances. 1293 */ 1294 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) 1295 { 1296 #ifdef CONFIG_NUMA 1297 struct zonelist *zonelist; 1298 struct zoneref *z; 1299 struct zone *zone; 1300 enum zone_type high_zoneidx = gfp_zone(flags); 1301 struct page *page; 1302 1303 /* 1304 * The defrag ratio allows a configuration of the tradeoffs between 1305 * inter node defragmentation and node local allocations. A lower 1306 * defrag_ratio increases the tendency to do local allocations 1307 * instead of attempting to obtain partial slabs from other nodes. 1308 * 1309 * If the defrag_ratio is set to 0 then kmalloc() always 1310 * returns node local objects. If the ratio is higher then kmalloc() 1311 * may return off node objects because partial slabs are obtained 1312 * from other nodes and filled up. 1313 * 1314 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes 1315 * defrag_ratio = 1000) then every (well almost) allocation will 1316 * first attempt to defrag slab caches on other nodes. This means 1317 * scanning over all nodes to look for partial slabs which may be 1318 * expensive if we do it every time we are trying to find a slab 1319 * with available objects. 1320 */ 1321 if (!s->remote_node_defrag_ratio || 1322 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1323 return NULL; 1324 1325 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1326 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1327 struct kmem_cache_node *n; 1328 1329 n = get_node(s, zone_to_nid(zone)); 1330 1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1332 n->nr_partial > s->min_partial) { 1333 page = get_partial_node(n); 1334 if (page) 1335 return page; 1336 } 1337 } 1338 #endif 1339 return NULL; 1340 } 1341 1342 /* 1343 * Get a partial page, lock it and return it. 1344 */ 1345 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1346 { 1347 struct page *page; 1348 int searchnode = (node == -1) ? numa_node_id() : node; 1349 1350 page = get_partial_node(get_node(s, searchnode)); 1351 if (page || (flags & __GFP_THISNODE)) 1352 return page; 1353 1354 return get_any_partial(s, flags); 1355 } 1356 1357 /* 1358 * Move a page back to the lists. 1359 * 1360 * Must be called with the slab lock held. 1361 * 1362 * On exit the slab lock will have been dropped. 1363 */ 1364 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1365 { 1366 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1367 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); 1368 1369 __ClearPageSlubFrozen(page); 1370 if (page->inuse) { 1371 1372 if (page->freelist) { 1373 add_partial(n, page, tail); 1374 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1375 } else { 1376 stat(c, DEACTIVATE_FULL); 1377 if (SLABDEBUG && PageSlubDebug(page) && 1378 (s->flags & SLAB_STORE_USER)) 1379 add_full(n, page); 1380 } 1381 slab_unlock(page); 1382 } else { 1383 stat(c, DEACTIVATE_EMPTY); 1384 if (n->nr_partial < s->min_partial) { 1385 /* 1386 * Adding an empty slab to the partial slabs in order 1387 * to avoid page allocator overhead. This slab needs 1388 * to come after the other slabs with objects in 1389 * so that the others get filled first. That way the 1390 * size of the partial list stays small. 1391 * 1392 * kmem_cache_shrink can reclaim any empty slabs from 1393 * the partial list. 1394 */ 1395 add_partial(n, page, 1); 1396 slab_unlock(page); 1397 } else { 1398 slab_unlock(page); 1399 stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB); 1400 discard_slab(s, page); 1401 } 1402 } 1403 } 1404 1405 /* 1406 * Remove the cpu slab 1407 */ 1408 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1409 { 1410 struct page *page = c->page; 1411 int tail = 1; 1412 1413 if (page->freelist) 1414 stat(c, DEACTIVATE_REMOTE_FREES); 1415 /* 1416 * Merge cpu freelist into slab freelist. Typically we get here 1417 * because both freelists are empty. So this is unlikely 1418 * to occur. 1419 */ 1420 while (unlikely(c->freelist)) { 1421 void **object; 1422 1423 tail = 0; /* Hot objects. Put the slab first */ 1424 1425 /* Retrieve object from cpu_freelist */ 1426 object = c->freelist; 1427 c->freelist = c->freelist[c->offset]; 1428 1429 /* And put onto the regular freelist */ 1430 object[c->offset] = page->freelist; 1431 page->freelist = object; 1432 page->inuse--; 1433 } 1434 c->page = NULL; 1435 unfreeze_slab(s, page, tail); 1436 } 1437 1438 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1439 { 1440 stat(c, CPUSLAB_FLUSH); 1441 slab_lock(c->page); 1442 deactivate_slab(s, c); 1443 } 1444 1445 /* 1446 * Flush cpu slab. 1447 * 1448 * Called from IPI handler with interrupts disabled. 1449 */ 1450 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 1451 { 1452 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 1453 1454 if (likely(c && c->page)) 1455 flush_slab(s, c); 1456 } 1457 1458 static void flush_cpu_slab(void *d) 1459 { 1460 struct kmem_cache *s = d; 1461 1462 __flush_cpu_slab(s, smp_processor_id()); 1463 } 1464 1465 static void flush_all(struct kmem_cache *s) 1466 { 1467 on_each_cpu(flush_cpu_slab, s, 1); 1468 } 1469 1470 /* 1471 * Check if the objects in a per cpu structure fit numa 1472 * locality expectations. 1473 */ 1474 static inline int node_match(struct kmem_cache_cpu *c, int node) 1475 { 1476 #ifdef CONFIG_NUMA 1477 if (node != -1 && c->node != node) 1478 return 0; 1479 #endif 1480 return 1; 1481 } 1482 1483 /* 1484 * Slow path. The lockless freelist is empty or we need to perform 1485 * debugging duties. 1486 * 1487 * Interrupts are disabled. 1488 * 1489 * Processing is still very fast if new objects have been freed to the 1490 * regular freelist. In that case we simply take over the regular freelist 1491 * as the lockless freelist and zap the regular freelist. 1492 * 1493 * If that is not working then we fall back to the partial lists. We take the 1494 * first element of the freelist as the object to allocate now and move the 1495 * rest of the freelist to the lockless freelist. 1496 * 1497 * And if we were unable to get a new slab from the partial slab lists then 1498 * we need to allocate a new slab. This is the slowest path since it involves 1499 * a call to the page allocator and the setup of a new slab. 1500 */ 1501 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 1502 unsigned long addr, struct kmem_cache_cpu *c) 1503 { 1504 void **object; 1505 struct page *new; 1506 1507 /* We handle __GFP_ZERO in the caller */ 1508 gfpflags &= ~__GFP_ZERO; 1509 1510 if (!c->page) 1511 goto new_slab; 1512 1513 slab_lock(c->page); 1514 if (unlikely(!node_match(c, node))) 1515 goto another_slab; 1516 1517 stat(c, ALLOC_REFILL); 1518 1519 load_freelist: 1520 object = c->page->freelist; 1521 if (unlikely(!object)) 1522 goto another_slab; 1523 if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) 1524 goto debug; 1525 1526 c->freelist = object[c->offset]; 1527 c->page->inuse = c->page->objects; 1528 c->page->freelist = NULL; 1529 c->node = page_to_nid(c->page); 1530 unlock_out: 1531 slab_unlock(c->page); 1532 stat(c, ALLOC_SLOWPATH); 1533 return object; 1534 1535 another_slab: 1536 deactivate_slab(s, c); 1537 1538 new_slab: 1539 new = get_partial(s, gfpflags, node); 1540 if (new) { 1541 c->page = new; 1542 stat(c, ALLOC_FROM_PARTIAL); 1543 goto load_freelist; 1544 } 1545 1546 if (gfpflags & __GFP_WAIT) 1547 local_irq_enable(); 1548 1549 new = new_slab(s, gfpflags, node); 1550 1551 if (gfpflags & __GFP_WAIT) 1552 local_irq_disable(); 1553 1554 if (new) { 1555 c = get_cpu_slab(s, smp_processor_id()); 1556 stat(c, ALLOC_SLAB); 1557 if (c->page) 1558 flush_slab(s, c); 1559 slab_lock(new); 1560 __SetPageSlubFrozen(new); 1561 c->page = new; 1562 goto load_freelist; 1563 } 1564 return NULL; 1565 debug: 1566 if (!alloc_debug_processing(s, c->page, object, addr)) 1567 goto another_slab; 1568 1569 c->page->inuse++; 1570 c->page->freelist = object[c->offset]; 1571 c->node = -1; 1572 goto unlock_out; 1573 } 1574 1575 /* 1576 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 1577 * have the fastpath folded into their functions. So no function call 1578 * overhead for requests that can be satisfied on the fastpath. 1579 * 1580 * The fastpath works by first checking if the lockless freelist can be used. 1581 * If not then __slab_alloc is called for slow processing. 1582 * 1583 * Otherwise we can simply pick the next object from the lockless free list. 1584 */ 1585 static __always_inline void *slab_alloc(struct kmem_cache *s, 1586 gfp_t gfpflags, int node, unsigned long addr) 1587 { 1588 void **object; 1589 struct kmem_cache_cpu *c; 1590 unsigned long flags; 1591 unsigned int objsize; 1592 1593 might_sleep_if(gfpflags & __GFP_WAIT); 1594 1595 if (should_failslab(s->objsize, gfpflags)) 1596 return NULL; 1597 1598 local_irq_save(flags); 1599 c = get_cpu_slab(s, smp_processor_id()); 1600 objsize = c->objsize; 1601 if (unlikely(!c->freelist || !node_match(c, node))) 1602 1603 object = __slab_alloc(s, gfpflags, node, addr, c); 1604 1605 else { 1606 object = c->freelist; 1607 c->freelist = object[c->offset]; 1608 stat(c, ALLOC_FASTPATH); 1609 } 1610 local_irq_restore(flags); 1611 1612 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1613 memset(object, 0, objsize); 1614 1615 return object; 1616 } 1617 1618 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1619 { 1620 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1621 } 1622 EXPORT_SYMBOL(kmem_cache_alloc); 1623 1624 #ifdef CONFIG_NUMA 1625 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1626 { 1627 return slab_alloc(s, gfpflags, node, _RET_IP_); 1628 } 1629 EXPORT_SYMBOL(kmem_cache_alloc_node); 1630 #endif 1631 1632 /* 1633 * Slow patch handling. This may still be called frequently since objects 1634 * have a longer lifetime than the cpu slabs in most processing loads. 1635 * 1636 * So we still attempt to reduce cache line usage. Just take the slab 1637 * lock and free the item. If there is no additional partial page 1638 * handling required then we can return immediately. 1639 */ 1640 static void __slab_free(struct kmem_cache *s, struct page *page, 1641 void *x, unsigned long addr, unsigned int offset) 1642 { 1643 void *prior; 1644 void **object = (void *)x; 1645 struct kmem_cache_cpu *c; 1646 1647 c = get_cpu_slab(s, raw_smp_processor_id()); 1648 stat(c, FREE_SLOWPATH); 1649 slab_lock(page); 1650 1651 if (unlikely(SLABDEBUG && PageSlubDebug(page))) 1652 goto debug; 1653 1654 checks_ok: 1655 prior = object[offset] = page->freelist; 1656 page->freelist = object; 1657 page->inuse--; 1658 1659 if (unlikely(PageSlubFrozen(page))) { 1660 stat(c, FREE_FROZEN); 1661 goto out_unlock; 1662 } 1663 1664 if (unlikely(!page->inuse)) 1665 goto slab_empty; 1666 1667 /* 1668 * Objects left in the slab. If it was not on the partial list before 1669 * then add it. 1670 */ 1671 if (unlikely(!prior)) { 1672 add_partial(get_node(s, page_to_nid(page)), page, 1); 1673 stat(c, FREE_ADD_PARTIAL); 1674 } 1675 1676 out_unlock: 1677 slab_unlock(page); 1678 return; 1679 1680 slab_empty: 1681 if (prior) { 1682 /* 1683 * Slab still on the partial list. 1684 */ 1685 remove_partial(s, page); 1686 stat(c, FREE_REMOVE_PARTIAL); 1687 } 1688 slab_unlock(page); 1689 stat(c, FREE_SLAB); 1690 discard_slab(s, page); 1691 return; 1692 1693 debug: 1694 if (!free_debug_processing(s, page, x, addr)) 1695 goto out_unlock; 1696 goto checks_ok; 1697 } 1698 1699 /* 1700 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 1701 * can perform fastpath freeing without additional function calls. 1702 * 1703 * The fastpath is only possible if we are freeing to the current cpu slab 1704 * of this processor. This typically the case if we have just allocated 1705 * the item before. 1706 * 1707 * If fastpath is not possible then fall back to __slab_free where we deal 1708 * with all sorts of special processing. 1709 */ 1710 static __always_inline void slab_free(struct kmem_cache *s, 1711 struct page *page, void *x, unsigned long addr) 1712 { 1713 void **object = (void *)x; 1714 struct kmem_cache_cpu *c; 1715 unsigned long flags; 1716 1717 local_irq_save(flags); 1718 c = get_cpu_slab(s, smp_processor_id()); 1719 debug_check_no_locks_freed(object, c->objsize); 1720 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1721 debug_check_no_obj_freed(object, c->objsize); 1722 if (likely(page == c->page && c->node >= 0)) { 1723 object[c->offset] = c->freelist; 1724 c->freelist = object; 1725 stat(c, FREE_FASTPATH); 1726 } else 1727 __slab_free(s, page, x, addr, c->offset); 1728 1729 local_irq_restore(flags); 1730 } 1731 1732 void kmem_cache_free(struct kmem_cache *s, void *x) 1733 { 1734 struct page *page; 1735 1736 page = virt_to_head_page(x); 1737 1738 slab_free(s, page, x, _RET_IP_); 1739 } 1740 EXPORT_SYMBOL(kmem_cache_free); 1741 1742 /* Figure out on which slab page the object resides */ 1743 static struct page *get_object_page(const void *x) 1744 { 1745 struct page *page = virt_to_head_page(x); 1746 1747 if (!PageSlab(page)) 1748 return NULL; 1749 1750 return page; 1751 } 1752 1753 /* 1754 * Object placement in a slab is made very easy because we always start at 1755 * offset 0. If we tune the size of the object to the alignment then we can 1756 * get the required alignment by putting one properly sized object after 1757 * another. 1758 * 1759 * Notice that the allocation order determines the sizes of the per cpu 1760 * caches. Each processor has always one slab available for allocations. 1761 * Increasing the allocation order reduces the number of times that slabs 1762 * must be moved on and off the partial lists and is therefore a factor in 1763 * locking overhead. 1764 */ 1765 1766 /* 1767 * Mininum / Maximum order of slab pages. This influences locking overhead 1768 * and slab fragmentation. A higher order reduces the number of partial slabs 1769 * and increases the number of allocations possible without having to 1770 * take the list_lock. 1771 */ 1772 static int slub_min_order; 1773 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 1774 static int slub_min_objects; 1775 1776 /* 1777 * Merge control. If this is set then no merging of slab caches will occur. 1778 * (Could be removed. This was introduced to pacify the merge skeptics.) 1779 */ 1780 static int slub_nomerge; 1781 1782 /* 1783 * Calculate the order of allocation given an slab object size. 1784 * 1785 * The order of allocation has significant impact on performance and other 1786 * system components. Generally order 0 allocations should be preferred since 1787 * order 0 does not cause fragmentation in the page allocator. Larger objects 1788 * be problematic to put into order 0 slabs because there may be too much 1789 * unused space left. We go to a higher order if more than 1/16th of the slab 1790 * would be wasted. 1791 * 1792 * In order to reach satisfactory performance we must ensure that a minimum 1793 * number of objects is in one slab. Otherwise we may generate too much 1794 * activity on the partial lists which requires taking the list_lock. This is 1795 * less a concern for large slabs though which are rarely used. 1796 * 1797 * slub_max_order specifies the order where we begin to stop considering the 1798 * number of objects in a slab as critical. If we reach slub_max_order then 1799 * we try to keep the page order as low as possible. So we accept more waste 1800 * of space in favor of a small page order. 1801 * 1802 * Higher order allocations also allow the placement of more objects in a 1803 * slab and thereby reduce object handling overhead. If the user has 1804 * requested a higher mininum order then we start with that one instead of 1805 * the smallest order which will fit the object. 1806 */ 1807 static inline int slab_order(int size, int min_objects, 1808 int max_order, int fract_leftover) 1809 { 1810 int order; 1811 int rem; 1812 int min_order = slub_min_order; 1813 1814 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) 1815 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 1816 1817 for (order = max(min_order, 1818 fls(min_objects * size - 1) - PAGE_SHIFT); 1819 order <= max_order; order++) { 1820 1821 unsigned long slab_size = PAGE_SIZE << order; 1822 1823 if (slab_size < min_objects * size) 1824 continue; 1825 1826 rem = slab_size % size; 1827 1828 if (rem <= slab_size / fract_leftover) 1829 break; 1830 1831 } 1832 1833 return order; 1834 } 1835 1836 static inline int calculate_order(int size) 1837 { 1838 int order; 1839 int min_objects; 1840 int fraction; 1841 int max_objects; 1842 1843 /* 1844 * Attempt to find best configuration for a slab. This 1845 * works by first attempting to generate a layout with 1846 * the best configuration and backing off gradually. 1847 * 1848 * First we reduce the acceptable waste in a slab. Then 1849 * we reduce the minimum objects required in a slab. 1850 */ 1851 min_objects = slub_min_objects; 1852 if (!min_objects) 1853 min_objects = 4 * (fls(nr_cpu_ids) + 1); 1854 max_objects = (PAGE_SIZE << slub_max_order)/size; 1855 min_objects = min(min_objects, max_objects); 1856 1857 while (min_objects > 1) { 1858 fraction = 16; 1859 while (fraction >= 4) { 1860 order = slab_order(size, min_objects, 1861 slub_max_order, fraction); 1862 if (order <= slub_max_order) 1863 return order; 1864 fraction /= 2; 1865 } 1866 min_objects --; 1867 } 1868 1869 /* 1870 * We were unable to place multiple objects in a slab. Now 1871 * lets see if we can place a single object there. 1872 */ 1873 order = slab_order(size, 1, slub_max_order, 1); 1874 if (order <= slub_max_order) 1875 return order; 1876 1877 /* 1878 * Doh this slab cannot be placed using slub_max_order. 1879 */ 1880 order = slab_order(size, 1, MAX_ORDER, 1); 1881 if (order <= MAX_ORDER) 1882 return order; 1883 return -ENOSYS; 1884 } 1885 1886 /* 1887 * Figure out what the alignment of the objects will be. 1888 */ 1889 static unsigned long calculate_alignment(unsigned long flags, 1890 unsigned long align, unsigned long size) 1891 { 1892 /* 1893 * If the user wants hardware cache aligned objects then follow that 1894 * suggestion if the object is sufficiently large. 1895 * 1896 * The hardware cache alignment cannot override the specified 1897 * alignment though. If that is greater then use it. 1898 */ 1899 if (flags & SLAB_HWCACHE_ALIGN) { 1900 unsigned long ralign = cache_line_size(); 1901 while (size <= ralign / 2) 1902 ralign /= 2; 1903 align = max(align, ralign); 1904 } 1905 1906 if (align < ARCH_SLAB_MINALIGN) 1907 align = ARCH_SLAB_MINALIGN; 1908 1909 return ALIGN(align, sizeof(void *)); 1910 } 1911 1912 static void init_kmem_cache_cpu(struct kmem_cache *s, 1913 struct kmem_cache_cpu *c) 1914 { 1915 c->page = NULL; 1916 c->freelist = NULL; 1917 c->node = 0; 1918 c->offset = s->offset / sizeof(void *); 1919 c->objsize = s->objsize; 1920 #ifdef CONFIG_SLUB_STATS 1921 memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned)); 1922 #endif 1923 } 1924 1925 static void 1926 init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 1927 { 1928 n->nr_partial = 0; 1929 spin_lock_init(&n->list_lock); 1930 INIT_LIST_HEAD(&n->partial); 1931 #ifdef CONFIG_SLUB_DEBUG 1932 atomic_long_set(&n->nr_slabs, 0); 1933 atomic_long_set(&n->total_objects, 0); 1934 INIT_LIST_HEAD(&n->full); 1935 #endif 1936 } 1937 1938 #ifdef CONFIG_SMP 1939 /* 1940 * Per cpu array for per cpu structures. 1941 * 1942 * The per cpu array places all kmem_cache_cpu structures from one processor 1943 * close together meaning that it becomes possible that multiple per cpu 1944 * structures are contained in one cacheline. This may be particularly 1945 * beneficial for the kmalloc caches. 1946 * 1947 * A desktop system typically has around 60-80 slabs. With 100 here we are 1948 * likely able to get per cpu structures for all caches from the array defined 1949 * here. We must be able to cover all kmalloc caches during bootstrap. 1950 * 1951 * If the per cpu array is exhausted then fall back to kmalloc 1952 * of individual cachelines. No sharing is possible then. 1953 */ 1954 #define NR_KMEM_CACHE_CPU 100 1955 1956 static DEFINE_PER_CPU(struct kmem_cache_cpu, 1957 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 1958 1959 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 1960 static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); 1961 1962 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 1963 int cpu, gfp_t flags) 1964 { 1965 struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu); 1966 1967 if (c) 1968 per_cpu(kmem_cache_cpu_free, cpu) = 1969 (void *)c->freelist; 1970 else { 1971 /* Table overflow: So allocate ourselves */ 1972 c = kmalloc_node( 1973 ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()), 1974 flags, cpu_to_node(cpu)); 1975 if (!c) 1976 return NULL; 1977 } 1978 1979 init_kmem_cache_cpu(s, c); 1980 return c; 1981 } 1982 1983 static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) 1984 { 1985 if (c < per_cpu(kmem_cache_cpu, cpu) || 1986 c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { 1987 kfree(c); 1988 return; 1989 } 1990 c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu); 1991 per_cpu(kmem_cache_cpu_free, cpu) = c; 1992 } 1993 1994 static void free_kmem_cache_cpus(struct kmem_cache *s) 1995 { 1996 int cpu; 1997 1998 for_each_online_cpu(cpu) { 1999 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 2000 2001 if (c) { 2002 s->cpu_slab[cpu] = NULL; 2003 free_kmem_cache_cpu(c, cpu); 2004 } 2005 } 2006 } 2007 2008 static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) 2009 { 2010 int cpu; 2011 2012 for_each_online_cpu(cpu) { 2013 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 2014 2015 if (c) 2016 continue; 2017 2018 c = alloc_kmem_cache_cpu(s, cpu, flags); 2019 if (!c) { 2020 free_kmem_cache_cpus(s); 2021 return 0; 2022 } 2023 s->cpu_slab[cpu] = c; 2024 } 2025 return 1; 2026 } 2027 2028 /* 2029 * Initialize the per cpu array. 2030 */ 2031 static void init_alloc_cpu_cpu(int cpu) 2032 { 2033 int i; 2034 2035 if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once))) 2036 return; 2037 2038 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2039 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2040 2041 cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)); 2042 } 2043 2044 static void __init init_alloc_cpu(void) 2045 { 2046 int cpu; 2047 2048 for_each_online_cpu(cpu) 2049 init_alloc_cpu_cpu(cpu); 2050 } 2051 2052 #else 2053 static inline void free_kmem_cache_cpus(struct kmem_cache *s) {} 2054 static inline void init_alloc_cpu(void) {} 2055 2056 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) 2057 { 2058 init_kmem_cache_cpu(s, &s->cpu_slab); 2059 return 1; 2060 } 2061 #endif 2062 2063 #ifdef CONFIG_NUMA 2064 /* 2065 * No kmalloc_node yet so do it by hand. We know that this is the first 2066 * slab on the node for this slabcache. There are no concurrent accesses 2067 * possible. 2068 * 2069 * Note that this function only works on the kmalloc_node_cache 2070 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2071 * memory on a fresh node that has no slab structures yet. 2072 */ 2073 static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) 2074 { 2075 struct page *page; 2076 struct kmem_cache_node *n; 2077 unsigned long flags; 2078 2079 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 2080 2081 page = new_slab(kmalloc_caches, gfpflags, node); 2082 2083 BUG_ON(!page); 2084 if (page_to_nid(page) != node) { 2085 printk(KERN_ERR "SLUB: Unable to allocate memory from " 2086 "node %d\n", node); 2087 printk(KERN_ERR "SLUB: Allocating a useless per node structure " 2088 "in order to be able to continue\n"); 2089 } 2090 2091 n = page->freelist; 2092 BUG_ON(!n); 2093 page->freelist = get_freepointer(kmalloc_caches, n); 2094 page->inuse++; 2095 kmalloc_caches->node[node] = n; 2096 #ifdef CONFIG_SLUB_DEBUG 2097 init_object(kmalloc_caches, n, 1); 2098 init_tracking(kmalloc_caches, n); 2099 #endif 2100 init_kmem_cache_node(n, kmalloc_caches); 2101 inc_slabs_node(kmalloc_caches, node, page->objects); 2102 2103 /* 2104 * lockdep requires consistent irq usage for each lock 2105 * so even though there cannot be a race this early in 2106 * the boot sequence, we still disable irqs. 2107 */ 2108 local_irq_save(flags); 2109 add_partial(n, page, 0); 2110 local_irq_restore(flags); 2111 } 2112 2113 static void free_kmem_cache_nodes(struct kmem_cache *s) 2114 { 2115 int node; 2116 2117 for_each_node_state(node, N_NORMAL_MEMORY) { 2118 struct kmem_cache_node *n = s->node[node]; 2119 if (n && n != &s->local_node) 2120 kmem_cache_free(kmalloc_caches, n); 2121 s->node[node] = NULL; 2122 } 2123 } 2124 2125 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2126 { 2127 int node; 2128 int local_node; 2129 2130 if (slab_state >= UP) 2131 local_node = page_to_nid(virt_to_page(s)); 2132 else 2133 local_node = 0; 2134 2135 for_each_node_state(node, N_NORMAL_MEMORY) { 2136 struct kmem_cache_node *n; 2137 2138 if (local_node == node) 2139 n = &s->local_node; 2140 else { 2141 if (slab_state == DOWN) { 2142 early_kmem_cache_node_alloc(gfpflags, node); 2143 continue; 2144 } 2145 n = kmem_cache_alloc_node(kmalloc_caches, 2146 gfpflags, node); 2147 2148 if (!n) { 2149 free_kmem_cache_nodes(s); 2150 return 0; 2151 } 2152 2153 } 2154 s->node[node] = n; 2155 init_kmem_cache_node(n, s); 2156 } 2157 return 1; 2158 } 2159 #else 2160 static void free_kmem_cache_nodes(struct kmem_cache *s) 2161 { 2162 } 2163 2164 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2165 { 2166 init_kmem_cache_node(&s->local_node, s); 2167 return 1; 2168 } 2169 #endif 2170 2171 static void set_min_partial(struct kmem_cache *s, unsigned long min) 2172 { 2173 if (min < MIN_PARTIAL) 2174 min = MIN_PARTIAL; 2175 else if (min > MAX_PARTIAL) 2176 min = MAX_PARTIAL; 2177 s->min_partial = min; 2178 } 2179 2180 /* 2181 * calculate_sizes() determines the order and the distribution of data within 2182 * a slab object. 2183 */ 2184 static int calculate_sizes(struct kmem_cache *s, int forced_order) 2185 { 2186 unsigned long flags = s->flags; 2187 unsigned long size = s->objsize; 2188 unsigned long align = s->align; 2189 int order; 2190 2191 /* 2192 * Round up object size to the next word boundary. We can only 2193 * place the free pointer at word boundaries and this determines 2194 * the possible location of the free pointer. 2195 */ 2196 size = ALIGN(size, sizeof(void *)); 2197 2198 #ifdef CONFIG_SLUB_DEBUG 2199 /* 2200 * Determine if we can poison the object itself. If the user of 2201 * the slab may touch the object after free or before allocation 2202 * then we should never poison the object itself. 2203 */ 2204 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 2205 !s->ctor) 2206 s->flags |= __OBJECT_POISON; 2207 else 2208 s->flags &= ~__OBJECT_POISON; 2209 2210 2211 /* 2212 * If we are Redzoning then check if there is some space between the 2213 * end of the object and the free pointer. If not then add an 2214 * additional word to have some bytes to store Redzone information. 2215 */ 2216 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 2217 size += sizeof(void *); 2218 #endif 2219 2220 /* 2221 * With that we have determined the number of bytes in actual use 2222 * by the object. This is the potential offset to the free pointer. 2223 */ 2224 s->inuse = size; 2225 2226 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 2227 s->ctor)) { 2228 /* 2229 * Relocate free pointer after the object if it is not 2230 * permitted to overwrite the first word of the object on 2231 * kmem_cache_free. 2232 * 2233 * This is the case if we do RCU, have a constructor or 2234 * destructor or are poisoning the objects. 2235 */ 2236 s->offset = size; 2237 size += sizeof(void *); 2238 } 2239 2240 #ifdef CONFIG_SLUB_DEBUG 2241 if (flags & SLAB_STORE_USER) 2242 /* 2243 * Need to store information about allocs and frees after 2244 * the object. 2245 */ 2246 size += 2 * sizeof(struct track); 2247 2248 if (flags & SLAB_RED_ZONE) 2249 /* 2250 * Add some empty padding so that we can catch 2251 * overwrites from earlier objects rather than let 2252 * tracking information or the free pointer be 2253 * corrupted if a user writes before the start 2254 * of the object. 2255 */ 2256 size += sizeof(void *); 2257 #endif 2258 2259 /* 2260 * Determine the alignment based on various parameters that the 2261 * user specified and the dynamic determination of cache line size 2262 * on bootup. 2263 */ 2264 align = calculate_alignment(flags, align, s->objsize); 2265 2266 /* 2267 * SLUB stores one object immediately after another beginning from 2268 * offset 0. In order to align the objects we have to simply size 2269 * each object to conform to the alignment. 2270 */ 2271 size = ALIGN(size, align); 2272 s->size = size; 2273 if (forced_order >= 0) 2274 order = forced_order; 2275 else 2276 order = calculate_order(size); 2277 2278 if (order < 0) 2279 return 0; 2280 2281 s->allocflags = 0; 2282 if (order) 2283 s->allocflags |= __GFP_COMP; 2284 2285 if (s->flags & SLAB_CACHE_DMA) 2286 s->allocflags |= SLUB_DMA; 2287 2288 if (s->flags & SLAB_RECLAIM_ACCOUNT) 2289 s->allocflags |= __GFP_RECLAIMABLE; 2290 2291 /* 2292 * Determine the number of objects per slab 2293 */ 2294 s->oo = oo_make(order, size); 2295 s->min = oo_make(get_order(size), size); 2296 if (oo_objects(s->oo) > oo_objects(s->max)) 2297 s->max = s->oo; 2298 2299 return !!oo_objects(s->oo); 2300 2301 } 2302 2303 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 2304 const char *name, size_t size, 2305 size_t align, unsigned long flags, 2306 void (*ctor)(void *)) 2307 { 2308 memset(s, 0, kmem_size); 2309 s->name = name; 2310 s->ctor = ctor; 2311 s->objsize = size; 2312 s->align = align; 2313 s->flags = kmem_cache_flags(size, flags, name, ctor); 2314 2315 if (!calculate_sizes(s, -1)) 2316 goto error; 2317 2318 /* 2319 * The larger the object size is, the more pages we want on the partial 2320 * list to avoid pounding the page allocator excessively. 2321 */ 2322 set_min_partial(s, ilog2(s->size)); 2323 s->refcount = 1; 2324 #ifdef CONFIG_NUMA 2325 s->remote_node_defrag_ratio = 1000; 2326 #endif 2327 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2328 goto error; 2329 2330 if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) 2331 return 1; 2332 free_kmem_cache_nodes(s); 2333 error: 2334 if (flags & SLAB_PANIC) 2335 panic("Cannot create slab %s size=%lu realsize=%u " 2336 "order=%u offset=%u flags=%lx\n", 2337 s->name, (unsigned long)size, s->size, oo_order(s->oo), 2338 s->offset, flags); 2339 return 0; 2340 } 2341 2342 /* 2343 * Check if a given pointer is valid 2344 */ 2345 int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2346 { 2347 struct page *page; 2348 2349 page = get_object_page(object); 2350 2351 if (!page || s != page->slab) 2352 /* No slab or wrong slab */ 2353 return 0; 2354 2355 if (!check_valid_pointer(s, page, object)) 2356 return 0; 2357 2358 /* 2359 * We could also check if the object is on the slabs freelist. 2360 * But this would be too expensive and it seems that the main 2361 * purpose of kmem_ptr_valid() is to check if the object belongs 2362 * to a certain slab. 2363 */ 2364 return 1; 2365 } 2366 EXPORT_SYMBOL(kmem_ptr_validate); 2367 2368 /* 2369 * Determine the size of a slab object 2370 */ 2371 unsigned int kmem_cache_size(struct kmem_cache *s) 2372 { 2373 return s->objsize; 2374 } 2375 EXPORT_SYMBOL(kmem_cache_size); 2376 2377 const char *kmem_cache_name(struct kmem_cache *s) 2378 { 2379 return s->name; 2380 } 2381 EXPORT_SYMBOL(kmem_cache_name); 2382 2383 static void list_slab_objects(struct kmem_cache *s, struct page *page, 2384 const char *text) 2385 { 2386 #ifdef CONFIG_SLUB_DEBUG 2387 void *addr = page_address(page); 2388 void *p; 2389 DECLARE_BITMAP(map, page->objects); 2390 2391 bitmap_zero(map, page->objects); 2392 slab_err(s, page, "%s", text); 2393 slab_lock(page); 2394 for_each_free_object(p, s, page->freelist) 2395 set_bit(slab_index(p, s, addr), map); 2396 2397 for_each_object(p, s, addr, page->objects) { 2398 2399 if (!test_bit(slab_index(p, s, addr), map)) { 2400 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", 2401 p, p - addr); 2402 print_tracking(s, p); 2403 } 2404 } 2405 slab_unlock(page); 2406 #endif 2407 } 2408 2409 /* 2410 * Attempt to free all partial slabs on a node. 2411 */ 2412 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 2413 { 2414 unsigned long flags; 2415 struct page *page, *h; 2416 2417 spin_lock_irqsave(&n->list_lock, flags); 2418 list_for_each_entry_safe(page, h, &n->partial, lru) { 2419 if (!page->inuse) { 2420 list_del(&page->lru); 2421 discard_slab(s, page); 2422 n->nr_partial--; 2423 } else { 2424 list_slab_objects(s, page, 2425 "Objects remaining on kmem_cache_close()"); 2426 } 2427 } 2428 spin_unlock_irqrestore(&n->list_lock, flags); 2429 } 2430 2431 /* 2432 * Release all resources used by a slab cache. 2433 */ 2434 static inline int kmem_cache_close(struct kmem_cache *s) 2435 { 2436 int node; 2437 2438 flush_all(s); 2439 2440 /* Attempt to free all objects */ 2441 free_kmem_cache_cpus(s); 2442 for_each_node_state(node, N_NORMAL_MEMORY) { 2443 struct kmem_cache_node *n = get_node(s, node); 2444 2445 free_partial(s, n); 2446 if (n->nr_partial || slabs_node(s, node)) 2447 return 1; 2448 } 2449 free_kmem_cache_nodes(s); 2450 return 0; 2451 } 2452 2453 /* 2454 * Close a cache and release the kmem_cache structure 2455 * (must be used for caches created using kmem_cache_create) 2456 */ 2457 void kmem_cache_destroy(struct kmem_cache *s) 2458 { 2459 down_write(&slub_lock); 2460 s->refcount--; 2461 if (!s->refcount) { 2462 list_del(&s->list); 2463 up_write(&slub_lock); 2464 if (kmem_cache_close(s)) { 2465 printk(KERN_ERR "SLUB %s: %s called for cache that " 2466 "still has objects.\n", s->name, __func__); 2467 dump_stack(); 2468 } 2469 sysfs_slab_remove(s); 2470 } else 2471 up_write(&slub_lock); 2472 } 2473 EXPORT_SYMBOL(kmem_cache_destroy); 2474 2475 /******************************************************************** 2476 * Kmalloc subsystem 2477 *******************************************************************/ 2478 2479 struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; 2480 EXPORT_SYMBOL(kmalloc_caches); 2481 2482 static int __init setup_slub_min_order(char *str) 2483 { 2484 get_option(&str, &slub_min_order); 2485 2486 return 1; 2487 } 2488 2489 __setup("slub_min_order=", setup_slub_min_order); 2490 2491 static int __init setup_slub_max_order(char *str) 2492 { 2493 get_option(&str, &slub_max_order); 2494 2495 return 1; 2496 } 2497 2498 __setup("slub_max_order=", setup_slub_max_order); 2499 2500 static int __init setup_slub_min_objects(char *str) 2501 { 2502 get_option(&str, &slub_min_objects); 2503 2504 return 1; 2505 } 2506 2507 __setup("slub_min_objects=", setup_slub_min_objects); 2508 2509 static int __init setup_slub_nomerge(char *str) 2510 { 2511 slub_nomerge = 1; 2512 return 1; 2513 } 2514 2515 __setup("slub_nomerge", setup_slub_nomerge); 2516 2517 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, 2518 const char *name, int size, gfp_t gfp_flags) 2519 { 2520 unsigned int flags = 0; 2521 2522 if (gfp_flags & SLUB_DMA) 2523 flags = SLAB_CACHE_DMA; 2524 2525 down_write(&slub_lock); 2526 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2527 flags, NULL)) 2528 goto panic; 2529 2530 list_add(&s->list, &slab_caches); 2531 up_write(&slub_lock); 2532 if (sysfs_slab_add(s)) 2533 goto panic; 2534 return s; 2535 2536 panic: 2537 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2538 } 2539 2540 #ifdef CONFIG_ZONE_DMA 2541 static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; 2542 2543 static void sysfs_add_func(struct work_struct *w) 2544 { 2545 struct kmem_cache *s; 2546 2547 down_write(&slub_lock); 2548 list_for_each_entry(s, &slab_caches, list) { 2549 if (s->flags & __SYSFS_ADD_DEFERRED) { 2550 s->flags &= ~__SYSFS_ADD_DEFERRED; 2551 sysfs_slab_add(s); 2552 } 2553 } 2554 up_write(&slub_lock); 2555 } 2556 2557 static DECLARE_WORK(sysfs_add_work, sysfs_add_func); 2558 2559 static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) 2560 { 2561 struct kmem_cache *s; 2562 char *text; 2563 size_t realsize; 2564 2565 s = kmalloc_caches_dma[index]; 2566 if (s) 2567 return s; 2568 2569 /* Dynamically create dma cache */ 2570 if (flags & __GFP_WAIT) 2571 down_write(&slub_lock); 2572 else { 2573 if (!down_write_trylock(&slub_lock)) 2574 goto out; 2575 } 2576 2577 if (kmalloc_caches_dma[index]) 2578 goto unlock_out; 2579 2580 realsize = kmalloc_caches[index].objsize; 2581 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", 2582 (unsigned int)realsize); 2583 s = kmalloc(kmem_size, flags & ~SLUB_DMA); 2584 2585 if (!s || !text || !kmem_cache_open(s, flags, text, 2586 realsize, ARCH_KMALLOC_MINALIGN, 2587 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { 2588 kfree(s); 2589 kfree(text); 2590 goto unlock_out; 2591 } 2592 2593 list_add(&s->list, &slab_caches); 2594 kmalloc_caches_dma[index] = s; 2595 2596 schedule_work(&sysfs_add_work); 2597 2598 unlock_out: 2599 up_write(&slub_lock); 2600 out: 2601 return kmalloc_caches_dma[index]; 2602 } 2603 #endif 2604 2605 /* 2606 * Conversion table for small slabs sizes / 8 to the index in the 2607 * kmalloc array. This is necessary for slabs < 192 since we have non power 2608 * of two cache sizes there. The size of larger slabs can be determined using 2609 * fls. 2610 */ 2611 static s8 size_index[24] = { 2612 3, /* 8 */ 2613 4, /* 16 */ 2614 5, /* 24 */ 2615 5, /* 32 */ 2616 6, /* 40 */ 2617 6, /* 48 */ 2618 6, /* 56 */ 2619 6, /* 64 */ 2620 1, /* 72 */ 2621 1, /* 80 */ 2622 1, /* 88 */ 2623 1, /* 96 */ 2624 7, /* 104 */ 2625 7, /* 112 */ 2626 7, /* 120 */ 2627 7, /* 128 */ 2628 2, /* 136 */ 2629 2, /* 144 */ 2630 2, /* 152 */ 2631 2, /* 160 */ 2632 2, /* 168 */ 2633 2, /* 176 */ 2634 2, /* 184 */ 2635 2 /* 192 */ 2636 }; 2637 2638 static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2639 { 2640 int index; 2641 2642 if (size <= 192) { 2643 if (!size) 2644 return ZERO_SIZE_PTR; 2645 2646 index = size_index[(size - 1) / 8]; 2647 } else 2648 index = fls(size - 1); 2649 2650 #ifdef CONFIG_ZONE_DMA 2651 if (unlikely((flags & SLUB_DMA))) 2652 return dma_kmalloc_cache(index, flags); 2653 2654 #endif 2655 return &kmalloc_caches[index]; 2656 } 2657 2658 void *__kmalloc(size_t size, gfp_t flags) 2659 { 2660 struct kmem_cache *s; 2661 2662 if (unlikely(size > SLUB_MAX_SIZE)) 2663 return kmalloc_large(size, flags); 2664 2665 s = get_slab(size, flags); 2666 2667 if (unlikely(ZERO_OR_NULL_PTR(s))) 2668 return s; 2669 2670 return slab_alloc(s, flags, -1, _RET_IP_); 2671 } 2672 EXPORT_SYMBOL(__kmalloc); 2673 2674 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2675 { 2676 struct page *page = alloc_pages_node(node, flags | __GFP_COMP, 2677 get_order(size)); 2678 2679 if (page) 2680 return page_address(page); 2681 else 2682 return NULL; 2683 } 2684 2685 #ifdef CONFIG_NUMA 2686 void *__kmalloc_node(size_t size, gfp_t flags, int node) 2687 { 2688 struct kmem_cache *s; 2689 2690 if (unlikely(size > SLUB_MAX_SIZE)) 2691 return kmalloc_large_node(size, flags, node); 2692 2693 s = get_slab(size, flags); 2694 2695 if (unlikely(ZERO_OR_NULL_PTR(s))) 2696 return s; 2697 2698 return slab_alloc(s, flags, node, _RET_IP_); 2699 } 2700 EXPORT_SYMBOL(__kmalloc_node); 2701 #endif 2702 2703 size_t ksize(const void *object) 2704 { 2705 struct page *page; 2706 struct kmem_cache *s; 2707 2708 if (unlikely(object == ZERO_SIZE_PTR)) 2709 return 0; 2710 2711 page = virt_to_head_page(object); 2712 2713 if (unlikely(!PageSlab(page))) { 2714 WARN_ON(!PageCompound(page)); 2715 return PAGE_SIZE << compound_order(page); 2716 } 2717 s = page->slab; 2718 2719 #ifdef CONFIG_SLUB_DEBUG 2720 /* 2721 * Debugging requires use of the padding between object 2722 * and whatever may come after it. 2723 */ 2724 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 2725 return s->objsize; 2726 2727 #endif 2728 /* 2729 * If we have the need to store the freelist pointer 2730 * back there or track user information then we can 2731 * only use the space before that information. 2732 */ 2733 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 2734 return s->inuse; 2735 /* 2736 * Else we can use all the padding etc for the allocation 2737 */ 2738 return s->size; 2739 } 2740 EXPORT_SYMBOL(ksize); 2741 2742 void kfree(const void *x) 2743 { 2744 struct page *page; 2745 void *object = (void *)x; 2746 2747 if (unlikely(ZERO_OR_NULL_PTR(x))) 2748 return; 2749 2750 page = virt_to_head_page(x); 2751 if (unlikely(!PageSlab(page))) { 2752 BUG_ON(!PageCompound(page)); 2753 put_page(page); 2754 return; 2755 } 2756 slab_free(page->slab, page, object, _RET_IP_); 2757 } 2758 EXPORT_SYMBOL(kfree); 2759 2760 /* 2761 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2762 * the remaining slabs by the number of items in use. The slabs with the 2763 * most items in use come first. New allocations will then fill those up 2764 * and thus they can be removed from the partial lists. 2765 * 2766 * The slabs with the least items are placed last. This results in them 2767 * being allocated from last increasing the chance that the last objects 2768 * are freed in them. 2769 */ 2770 int kmem_cache_shrink(struct kmem_cache *s) 2771 { 2772 int node; 2773 int i; 2774 struct kmem_cache_node *n; 2775 struct page *page; 2776 struct page *t; 2777 int objects = oo_objects(s->max); 2778 struct list_head *slabs_by_inuse = 2779 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); 2780 unsigned long flags; 2781 2782 if (!slabs_by_inuse) 2783 return -ENOMEM; 2784 2785 flush_all(s); 2786 for_each_node_state(node, N_NORMAL_MEMORY) { 2787 n = get_node(s, node); 2788 2789 if (!n->nr_partial) 2790 continue; 2791 2792 for (i = 0; i < objects; i++) 2793 INIT_LIST_HEAD(slabs_by_inuse + i); 2794 2795 spin_lock_irqsave(&n->list_lock, flags); 2796 2797 /* 2798 * Build lists indexed by the items in use in each slab. 2799 * 2800 * Note that concurrent frees may occur while we hold the 2801 * list_lock. page->inuse here is the upper limit. 2802 */ 2803 list_for_each_entry_safe(page, t, &n->partial, lru) { 2804 if (!page->inuse && slab_trylock(page)) { 2805 /* 2806 * Must hold slab lock here because slab_free 2807 * may have freed the last object and be 2808 * waiting to release the slab. 2809 */ 2810 list_del(&page->lru); 2811 n->nr_partial--; 2812 slab_unlock(page); 2813 discard_slab(s, page); 2814 } else { 2815 list_move(&page->lru, 2816 slabs_by_inuse + page->inuse); 2817 } 2818 } 2819 2820 /* 2821 * Rebuild the partial list with the slabs filled up most 2822 * first and the least used slabs at the end. 2823 */ 2824 for (i = objects - 1; i >= 0; i--) 2825 list_splice(slabs_by_inuse + i, n->partial.prev); 2826 2827 spin_unlock_irqrestore(&n->list_lock, flags); 2828 } 2829 2830 kfree(slabs_by_inuse); 2831 return 0; 2832 } 2833 EXPORT_SYMBOL(kmem_cache_shrink); 2834 2835 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 2836 static int slab_mem_going_offline_callback(void *arg) 2837 { 2838 struct kmem_cache *s; 2839 2840 down_read(&slub_lock); 2841 list_for_each_entry(s, &slab_caches, list) 2842 kmem_cache_shrink(s); 2843 up_read(&slub_lock); 2844 2845 return 0; 2846 } 2847 2848 static void slab_mem_offline_callback(void *arg) 2849 { 2850 struct kmem_cache_node *n; 2851 struct kmem_cache *s; 2852 struct memory_notify *marg = arg; 2853 int offline_node; 2854 2855 offline_node = marg->status_change_nid; 2856 2857 /* 2858 * If the node still has available memory. we need kmem_cache_node 2859 * for it yet. 2860 */ 2861 if (offline_node < 0) 2862 return; 2863 2864 down_read(&slub_lock); 2865 list_for_each_entry(s, &slab_caches, list) { 2866 n = get_node(s, offline_node); 2867 if (n) { 2868 /* 2869 * if n->nr_slabs > 0, slabs still exist on the node 2870 * that is going down. We were unable to free them, 2871 * and offline_pages() function shoudn't call this 2872 * callback. So, we must fail. 2873 */ 2874 BUG_ON(slabs_node(s, offline_node)); 2875 2876 s->node[offline_node] = NULL; 2877 kmem_cache_free(kmalloc_caches, n); 2878 } 2879 } 2880 up_read(&slub_lock); 2881 } 2882 2883 static int slab_mem_going_online_callback(void *arg) 2884 { 2885 struct kmem_cache_node *n; 2886 struct kmem_cache *s; 2887 struct memory_notify *marg = arg; 2888 int nid = marg->status_change_nid; 2889 int ret = 0; 2890 2891 /* 2892 * If the node's memory is already available, then kmem_cache_node is 2893 * already created. Nothing to do. 2894 */ 2895 if (nid < 0) 2896 return 0; 2897 2898 /* 2899 * We are bringing a node online. No memory is available yet. We must 2900 * allocate a kmem_cache_node structure in order to bring the node 2901 * online. 2902 */ 2903 down_read(&slub_lock); 2904 list_for_each_entry(s, &slab_caches, list) { 2905 /* 2906 * XXX: kmem_cache_alloc_node will fallback to other nodes 2907 * since memory is not yet available from the node that 2908 * is brought up. 2909 */ 2910 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL); 2911 if (!n) { 2912 ret = -ENOMEM; 2913 goto out; 2914 } 2915 init_kmem_cache_node(n, s); 2916 s->node[nid] = n; 2917 } 2918 out: 2919 up_read(&slub_lock); 2920 return ret; 2921 } 2922 2923 static int slab_memory_callback(struct notifier_block *self, 2924 unsigned long action, void *arg) 2925 { 2926 int ret = 0; 2927 2928 switch (action) { 2929 case MEM_GOING_ONLINE: 2930 ret = slab_mem_going_online_callback(arg); 2931 break; 2932 case MEM_GOING_OFFLINE: 2933 ret = slab_mem_going_offline_callback(arg); 2934 break; 2935 case MEM_OFFLINE: 2936 case MEM_CANCEL_ONLINE: 2937 slab_mem_offline_callback(arg); 2938 break; 2939 case MEM_ONLINE: 2940 case MEM_CANCEL_OFFLINE: 2941 break; 2942 } 2943 if (ret) 2944 ret = notifier_from_errno(ret); 2945 else 2946 ret = NOTIFY_OK; 2947 return ret; 2948 } 2949 2950 #endif /* CONFIG_MEMORY_HOTPLUG */ 2951 2952 /******************************************************************** 2953 * Basic setup of slabs 2954 *******************************************************************/ 2955 2956 void __init kmem_cache_init(void) 2957 { 2958 int i; 2959 int caches = 0; 2960 2961 init_alloc_cpu(); 2962 2963 #ifdef CONFIG_NUMA 2964 /* 2965 * Must first have the slab cache available for the allocations of the 2966 * struct kmem_cache_node's. There is special bootstrap code in 2967 * kmem_cache_open for slab_state == DOWN. 2968 */ 2969 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", 2970 sizeof(struct kmem_cache_node), GFP_KERNEL); 2971 kmalloc_caches[0].refcount = -1; 2972 caches++; 2973 2974 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 2975 #endif 2976 2977 /* Able to allocate the per node structures */ 2978 slab_state = PARTIAL; 2979 2980 /* Caches that are not of the two-to-the-power-of size */ 2981 if (KMALLOC_MIN_SIZE <= 64) { 2982 create_kmalloc_cache(&kmalloc_caches[1], 2983 "kmalloc-96", 96, GFP_KERNEL); 2984 caches++; 2985 create_kmalloc_cache(&kmalloc_caches[2], 2986 "kmalloc-192", 192, GFP_KERNEL); 2987 caches++; 2988 } 2989 2990 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 2991 create_kmalloc_cache(&kmalloc_caches[i], 2992 "kmalloc", 1 << i, GFP_KERNEL); 2993 caches++; 2994 } 2995 2996 2997 /* 2998 * Patch up the size_index table if we have strange large alignment 2999 * requirements for the kmalloc array. This is only the case for 3000 * MIPS it seems. The standard arches will not generate any code here. 3001 * 3002 * Largest permitted alignment is 256 bytes due to the way we 3003 * handle the index determination for the smaller caches. 3004 * 3005 * Make sure that nothing crazy happens if someone starts tinkering 3006 * around with ARCH_KMALLOC_MINALIGN 3007 */ 3008 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3009 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3010 3011 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3012 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3013 3014 if (KMALLOC_MIN_SIZE == 128) { 3015 /* 3016 * The 192 byte sized cache is not used if the alignment 3017 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3018 * instead. 3019 */ 3020 for (i = 128 + 8; i <= 192; i += 8) 3021 size_index[(i - 1) / 8] = 8; 3022 } 3023 3024 slab_state = UP; 3025 3026 /* Provide the correct kmalloc names now that the caches are up */ 3027 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) 3028 kmalloc_caches[i]. name = 3029 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3030 3031 #ifdef CONFIG_SMP 3032 register_cpu_notifier(&slab_notifier); 3033 kmem_size = offsetof(struct kmem_cache, cpu_slab) + 3034 nr_cpu_ids * sizeof(struct kmem_cache_cpu *); 3035 #else 3036 kmem_size = sizeof(struct kmem_cache); 3037 #endif 3038 3039 printk(KERN_INFO 3040 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3041 " CPUs=%d, Nodes=%d\n", 3042 caches, cache_line_size(), 3043 slub_min_order, slub_max_order, slub_min_objects, 3044 nr_cpu_ids, nr_node_ids); 3045 } 3046 3047 /* 3048 * Find a mergeable slab cache 3049 */ 3050 static int slab_unmergeable(struct kmem_cache *s) 3051 { 3052 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3053 return 1; 3054 3055 if (s->ctor) 3056 return 1; 3057 3058 /* 3059 * We may have set a slab to be unmergeable during bootstrap. 3060 */ 3061 if (s->refcount < 0) 3062 return 1; 3063 3064 return 0; 3065 } 3066 3067 static struct kmem_cache *find_mergeable(size_t size, 3068 size_t align, unsigned long flags, const char *name, 3069 void (*ctor)(void *)) 3070 { 3071 struct kmem_cache *s; 3072 3073 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 3074 return NULL; 3075 3076 if (ctor) 3077 return NULL; 3078 3079 size = ALIGN(size, sizeof(void *)); 3080 align = calculate_alignment(flags, align, size); 3081 size = ALIGN(size, align); 3082 flags = kmem_cache_flags(size, flags, name, NULL); 3083 3084 list_for_each_entry(s, &slab_caches, list) { 3085 if (slab_unmergeable(s)) 3086 continue; 3087 3088 if (size > s->size) 3089 continue; 3090 3091 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) 3092 continue; 3093 /* 3094 * Check if alignment is compatible. 3095 * Courtesy of Adrian Drzewiecki 3096 */ 3097 if ((s->size & ~(align - 1)) != s->size) 3098 continue; 3099 3100 if (s->size - size >= sizeof(void *)) 3101 continue; 3102 3103 return s; 3104 } 3105 return NULL; 3106 } 3107 3108 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 3109 size_t align, unsigned long flags, void (*ctor)(void *)) 3110 { 3111 struct kmem_cache *s; 3112 3113 down_write(&slub_lock); 3114 s = find_mergeable(size, align, flags, name, ctor); 3115 if (s) { 3116 int cpu; 3117 3118 s->refcount++; 3119 /* 3120 * Adjust the object sizes so that we clear 3121 * the complete object on kzalloc. 3122 */ 3123 s->objsize = max(s->objsize, (int)size); 3124 3125 /* 3126 * And then we need to update the object size in the 3127 * per cpu structures 3128 */ 3129 for_each_online_cpu(cpu) 3130 get_cpu_slab(s, cpu)->objsize = s->objsize; 3131 3132 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3133 up_write(&slub_lock); 3134 3135 if (sysfs_slab_alias(s, name)) { 3136 down_write(&slub_lock); 3137 s->refcount--; 3138 up_write(&slub_lock); 3139 goto err; 3140 } 3141 return s; 3142 } 3143 3144 s = kmalloc(kmem_size, GFP_KERNEL); 3145 if (s) { 3146 if (kmem_cache_open(s, GFP_KERNEL, name, 3147 size, align, flags, ctor)) { 3148 list_add(&s->list, &slab_caches); 3149 up_write(&slub_lock); 3150 if (sysfs_slab_add(s)) { 3151 down_write(&slub_lock); 3152 list_del(&s->list); 3153 up_write(&slub_lock); 3154 kfree(s); 3155 goto err; 3156 } 3157 return s; 3158 } 3159 kfree(s); 3160 } 3161 up_write(&slub_lock); 3162 3163 err: 3164 if (flags & SLAB_PANIC) 3165 panic("Cannot create slabcache %s\n", name); 3166 else 3167 s = NULL; 3168 return s; 3169 } 3170 EXPORT_SYMBOL(kmem_cache_create); 3171 3172 #ifdef CONFIG_SMP 3173 /* 3174 * Use the cpu notifier to insure that the cpu slabs are flushed when 3175 * necessary. 3176 */ 3177 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3178 unsigned long action, void *hcpu) 3179 { 3180 long cpu = (long)hcpu; 3181 struct kmem_cache *s; 3182 unsigned long flags; 3183 3184 switch (action) { 3185 case CPU_UP_PREPARE: 3186 case CPU_UP_PREPARE_FROZEN: 3187 init_alloc_cpu_cpu(cpu); 3188 down_read(&slub_lock); 3189 list_for_each_entry(s, &slab_caches, list) 3190 s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu, 3191 GFP_KERNEL); 3192 up_read(&slub_lock); 3193 break; 3194 3195 case CPU_UP_CANCELED: 3196 case CPU_UP_CANCELED_FROZEN: 3197 case CPU_DEAD: 3198 case CPU_DEAD_FROZEN: 3199 down_read(&slub_lock); 3200 list_for_each_entry(s, &slab_caches, list) { 3201 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3202 3203 local_irq_save(flags); 3204 __flush_cpu_slab(s, cpu); 3205 local_irq_restore(flags); 3206 free_kmem_cache_cpu(c, cpu); 3207 s->cpu_slab[cpu] = NULL; 3208 } 3209 up_read(&slub_lock); 3210 break; 3211 default: 3212 break; 3213 } 3214 return NOTIFY_OK; 3215 } 3216 3217 static struct notifier_block __cpuinitdata slab_notifier = { 3218 .notifier_call = slab_cpuup_callback 3219 }; 3220 3221 #endif 3222 3223 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3224 { 3225 struct kmem_cache *s; 3226 3227 if (unlikely(size > SLUB_MAX_SIZE)) 3228 return kmalloc_large(size, gfpflags); 3229 3230 s = get_slab(size, gfpflags); 3231 3232 if (unlikely(ZERO_OR_NULL_PTR(s))) 3233 return s; 3234 3235 return slab_alloc(s, gfpflags, -1, caller); 3236 } 3237 3238 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3239 int node, unsigned long caller) 3240 { 3241 struct kmem_cache *s; 3242 3243 if (unlikely(size > SLUB_MAX_SIZE)) 3244 return kmalloc_large_node(size, gfpflags, node); 3245 3246 s = get_slab(size, gfpflags); 3247 3248 if (unlikely(ZERO_OR_NULL_PTR(s))) 3249 return s; 3250 3251 return slab_alloc(s, gfpflags, node, caller); 3252 } 3253 3254 #ifdef CONFIG_SLUB_DEBUG 3255 static unsigned long count_partial(struct kmem_cache_node *n, 3256 int (*get_count)(struct page *)) 3257 { 3258 unsigned long flags; 3259 unsigned long x = 0; 3260 struct page *page; 3261 3262 spin_lock_irqsave(&n->list_lock, flags); 3263 list_for_each_entry(page, &n->partial, lru) 3264 x += get_count(page); 3265 spin_unlock_irqrestore(&n->list_lock, flags); 3266 return x; 3267 } 3268 3269 static int count_inuse(struct page *page) 3270 { 3271 return page->inuse; 3272 } 3273 3274 static int count_total(struct page *page) 3275 { 3276 return page->objects; 3277 } 3278 3279 static int count_free(struct page *page) 3280 { 3281 return page->objects - page->inuse; 3282 } 3283 3284 static int validate_slab(struct kmem_cache *s, struct page *page, 3285 unsigned long *map) 3286 { 3287 void *p; 3288 void *addr = page_address(page); 3289 3290 if (!check_slab(s, page) || 3291 !on_freelist(s, page, NULL)) 3292 return 0; 3293 3294 /* Now we know that a valid freelist exists */ 3295 bitmap_zero(map, page->objects); 3296 3297 for_each_free_object(p, s, page->freelist) { 3298 set_bit(slab_index(p, s, addr), map); 3299 if (!check_object(s, page, p, 0)) 3300 return 0; 3301 } 3302 3303 for_each_object(p, s, addr, page->objects) 3304 if (!test_bit(slab_index(p, s, addr), map)) 3305 if (!check_object(s, page, p, 1)) 3306 return 0; 3307 return 1; 3308 } 3309 3310 static void validate_slab_slab(struct kmem_cache *s, struct page *page, 3311 unsigned long *map) 3312 { 3313 if (slab_trylock(page)) { 3314 validate_slab(s, page, map); 3315 slab_unlock(page); 3316 } else 3317 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3318 s->name, page); 3319 3320 if (s->flags & DEBUG_DEFAULT_FLAGS) { 3321 if (!PageSlubDebug(page)) 3322 printk(KERN_ERR "SLUB %s: SlubDebug not set " 3323 "on slab 0x%p\n", s->name, page); 3324 } else { 3325 if (PageSlubDebug(page)) 3326 printk(KERN_ERR "SLUB %s: SlubDebug set on " 3327 "slab 0x%p\n", s->name, page); 3328 } 3329 } 3330 3331 static int validate_slab_node(struct kmem_cache *s, 3332 struct kmem_cache_node *n, unsigned long *map) 3333 { 3334 unsigned long count = 0; 3335 struct page *page; 3336 unsigned long flags; 3337 3338 spin_lock_irqsave(&n->list_lock, flags); 3339 3340 list_for_each_entry(page, &n->partial, lru) { 3341 validate_slab_slab(s, page, map); 3342 count++; 3343 } 3344 if (count != n->nr_partial) 3345 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " 3346 "counter=%ld\n", s->name, count, n->nr_partial); 3347 3348 if (!(s->flags & SLAB_STORE_USER)) 3349 goto out; 3350 3351 list_for_each_entry(page, &n->full, lru) { 3352 validate_slab_slab(s, page, map); 3353 count++; 3354 } 3355 if (count != atomic_long_read(&n->nr_slabs)) 3356 printk(KERN_ERR "SLUB: %s %ld slabs counted but " 3357 "counter=%ld\n", s->name, count, 3358 atomic_long_read(&n->nr_slabs)); 3359 3360 out: 3361 spin_unlock_irqrestore(&n->list_lock, flags); 3362 return count; 3363 } 3364 3365 static long validate_slab_cache(struct kmem_cache *s) 3366 { 3367 int node; 3368 unsigned long count = 0; 3369 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3370 sizeof(unsigned long), GFP_KERNEL); 3371 3372 if (!map) 3373 return -ENOMEM; 3374 3375 flush_all(s); 3376 for_each_node_state(node, N_NORMAL_MEMORY) { 3377 struct kmem_cache_node *n = get_node(s, node); 3378 3379 count += validate_slab_node(s, n, map); 3380 } 3381 kfree(map); 3382 return count; 3383 } 3384 3385 #ifdef SLUB_RESILIENCY_TEST 3386 static void resiliency_test(void) 3387 { 3388 u8 *p; 3389 3390 printk(KERN_ERR "SLUB resiliency testing\n"); 3391 printk(KERN_ERR "-----------------------\n"); 3392 printk(KERN_ERR "A. Corruption after allocation\n"); 3393 3394 p = kzalloc(16, GFP_KERNEL); 3395 p[16] = 0x12; 3396 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" 3397 " 0x12->0x%p\n\n", p + 16); 3398 3399 validate_slab_cache(kmalloc_caches + 4); 3400 3401 /* Hmmm... The next two are dangerous */ 3402 p = kzalloc(32, GFP_KERNEL); 3403 p[32 + sizeof(void *)] = 0x34; 3404 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" 3405 " 0x34 -> -0x%p\n", p); 3406 printk(KERN_ERR 3407 "If allocated object is overwritten then not detectable\n\n"); 3408 3409 validate_slab_cache(kmalloc_caches + 5); 3410 p = kzalloc(64, GFP_KERNEL); 3411 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 3412 *p = 0x56; 3413 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 3414 p); 3415 printk(KERN_ERR 3416 "If allocated object is overwritten then not detectable\n\n"); 3417 validate_slab_cache(kmalloc_caches + 6); 3418 3419 printk(KERN_ERR "\nB. Corruption after free\n"); 3420 p = kzalloc(128, GFP_KERNEL); 3421 kfree(p); 3422 *p = 0x78; 3423 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 3424 validate_slab_cache(kmalloc_caches + 7); 3425 3426 p = kzalloc(256, GFP_KERNEL); 3427 kfree(p); 3428 p[50] = 0x9a; 3429 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", 3430 p); 3431 validate_slab_cache(kmalloc_caches + 8); 3432 3433 p = kzalloc(512, GFP_KERNEL); 3434 kfree(p); 3435 p[512] = 0xab; 3436 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 3437 validate_slab_cache(kmalloc_caches + 9); 3438 } 3439 #else 3440 static void resiliency_test(void) {}; 3441 #endif 3442 3443 /* 3444 * Generate lists of code addresses where slabcache objects are allocated 3445 * and freed. 3446 */ 3447 3448 struct location { 3449 unsigned long count; 3450 unsigned long addr; 3451 long long sum_time; 3452 long min_time; 3453 long max_time; 3454 long min_pid; 3455 long max_pid; 3456 DECLARE_BITMAP(cpus, NR_CPUS); 3457 nodemask_t nodes; 3458 }; 3459 3460 struct loc_track { 3461 unsigned long max; 3462 unsigned long count; 3463 struct location *loc; 3464 }; 3465 3466 static void free_loc_track(struct loc_track *t) 3467 { 3468 if (t->max) 3469 free_pages((unsigned long)t->loc, 3470 get_order(sizeof(struct location) * t->max)); 3471 } 3472 3473 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 3474 { 3475 struct location *l; 3476 int order; 3477 3478 order = get_order(sizeof(struct location) * max); 3479 3480 l = (void *)__get_free_pages(flags, order); 3481 if (!l) 3482 return 0; 3483 3484 if (t->count) { 3485 memcpy(l, t->loc, sizeof(struct location) * t->count); 3486 free_loc_track(t); 3487 } 3488 t->max = max; 3489 t->loc = l; 3490 return 1; 3491 } 3492 3493 static int add_location(struct loc_track *t, struct kmem_cache *s, 3494 const struct track *track) 3495 { 3496 long start, end, pos; 3497 struct location *l; 3498 unsigned long caddr; 3499 unsigned long age = jiffies - track->when; 3500 3501 start = -1; 3502 end = t->count; 3503 3504 for ( ; ; ) { 3505 pos = start + (end - start + 1) / 2; 3506 3507 /* 3508 * There is nothing at "end". If we end up there 3509 * we need to add something to before end. 3510 */ 3511 if (pos == end) 3512 break; 3513 3514 caddr = t->loc[pos].addr; 3515 if (track->addr == caddr) { 3516 3517 l = &t->loc[pos]; 3518 l->count++; 3519 if (track->when) { 3520 l->sum_time += age; 3521 if (age < l->min_time) 3522 l->min_time = age; 3523 if (age > l->max_time) 3524 l->max_time = age; 3525 3526 if (track->pid < l->min_pid) 3527 l->min_pid = track->pid; 3528 if (track->pid > l->max_pid) 3529 l->max_pid = track->pid; 3530 3531 cpumask_set_cpu(track->cpu, 3532 to_cpumask(l->cpus)); 3533 } 3534 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3535 return 1; 3536 } 3537 3538 if (track->addr < caddr) 3539 end = pos; 3540 else 3541 start = pos; 3542 } 3543 3544 /* 3545 * Not found. Insert new tracking element. 3546 */ 3547 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 3548 return 0; 3549 3550 l = t->loc + pos; 3551 if (pos < t->count) 3552 memmove(l + 1, l, 3553 (t->count - pos) * sizeof(struct location)); 3554 t->count++; 3555 l->count = 1; 3556 l->addr = track->addr; 3557 l->sum_time = age; 3558 l->min_time = age; 3559 l->max_time = age; 3560 l->min_pid = track->pid; 3561 l->max_pid = track->pid; 3562 cpumask_clear(to_cpumask(l->cpus)); 3563 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 3564 nodes_clear(l->nodes); 3565 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3566 return 1; 3567 } 3568 3569 static void process_slab(struct loc_track *t, struct kmem_cache *s, 3570 struct page *page, enum track_item alloc) 3571 { 3572 void *addr = page_address(page); 3573 DECLARE_BITMAP(map, page->objects); 3574 void *p; 3575 3576 bitmap_zero(map, page->objects); 3577 for_each_free_object(p, s, page->freelist) 3578 set_bit(slab_index(p, s, addr), map); 3579 3580 for_each_object(p, s, addr, page->objects) 3581 if (!test_bit(slab_index(p, s, addr), map)) 3582 add_location(t, s, get_track(s, p, alloc)); 3583 } 3584 3585 static int list_locations(struct kmem_cache *s, char *buf, 3586 enum track_item alloc) 3587 { 3588 int len = 0; 3589 unsigned long i; 3590 struct loc_track t = { 0, 0, NULL }; 3591 int node; 3592 3593 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 3594 GFP_TEMPORARY)) 3595 return sprintf(buf, "Out of memory\n"); 3596 3597 /* Push back cpu slabs */ 3598 flush_all(s); 3599 3600 for_each_node_state(node, N_NORMAL_MEMORY) { 3601 struct kmem_cache_node *n = get_node(s, node); 3602 unsigned long flags; 3603 struct page *page; 3604 3605 if (!atomic_long_read(&n->nr_slabs)) 3606 continue; 3607 3608 spin_lock_irqsave(&n->list_lock, flags); 3609 list_for_each_entry(page, &n->partial, lru) 3610 process_slab(&t, s, page, alloc); 3611 list_for_each_entry(page, &n->full, lru) 3612 process_slab(&t, s, page, alloc); 3613 spin_unlock_irqrestore(&n->list_lock, flags); 3614 } 3615 3616 for (i = 0; i < t.count; i++) { 3617 struct location *l = &t.loc[i]; 3618 3619 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) 3620 break; 3621 len += sprintf(buf + len, "%7ld ", l->count); 3622 3623 if (l->addr) 3624 len += sprint_symbol(buf + len, (unsigned long)l->addr); 3625 else 3626 len += sprintf(buf + len, "<not-available>"); 3627 3628 if (l->sum_time != l->min_time) { 3629 len += sprintf(buf + len, " age=%ld/%ld/%ld", 3630 l->min_time, 3631 (long)div_u64(l->sum_time, l->count), 3632 l->max_time); 3633 } else 3634 len += sprintf(buf + len, " age=%ld", 3635 l->min_time); 3636 3637 if (l->min_pid != l->max_pid) 3638 len += sprintf(buf + len, " pid=%ld-%ld", 3639 l->min_pid, l->max_pid); 3640 else 3641 len += sprintf(buf + len, " pid=%ld", 3642 l->min_pid); 3643 3644 if (num_online_cpus() > 1 && 3645 !cpumask_empty(to_cpumask(l->cpus)) && 3646 len < PAGE_SIZE - 60) { 3647 len += sprintf(buf + len, " cpus="); 3648 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3649 to_cpumask(l->cpus)); 3650 } 3651 3652 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3653 len < PAGE_SIZE - 60) { 3654 len += sprintf(buf + len, " nodes="); 3655 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3656 l->nodes); 3657 } 3658 3659 len += sprintf(buf + len, "\n"); 3660 } 3661 3662 free_loc_track(&t); 3663 if (!t.count) 3664 len += sprintf(buf, "No data\n"); 3665 return len; 3666 } 3667 3668 enum slab_stat_type { 3669 SL_ALL, /* All slabs */ 3670 SL_PARTIAL, /* Only partially allocated slabs */ 3671 SL_CPU, /* Only slabs used for cpu caches */ 3672 SL_OBJECTS, /* Determine allocated objects not slabs */ 3673 SL_TOTAL /* Determine object capacity not slabs */ 3674 }; 3675 3676 #define SO_ALL (1 << SL_ALL) 3677 #define SO_PARTIAL (1 << SL_PARTIAL) 3678 #define SO_CPU (1 << SL_CPU) 3679 #define SO_OBJECTS (1 << SL_OBJECTS) 3680 #define SO_TOTAL (1 << SL_TOTAL) 3681 3682 static ssize_t show_slab_objects(struct kmem_cache *s, 3683 char *buf, unsigned long flags) 3684 { 3685 unsigned long total = 0; 3686 int node; 3687 int x; 3688 unsigned long *nodes; 3689 unsigned long *per_cpu; 3690 3691 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 3692 if (!nodes) 3693 return -ENOMEM; 3694 per_cpu = nodes + nr_node_ids; 3695 3696 if (flags & SO_CPU) { 3697 int cpu; 3698 3699 for_each_possible_cpu(cpu) { 3700 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3701 3702 if (!c || c->node < 0) 3703 continue; 3704 3705 if (c->page) { 3706 if (flags & SO_TOTAL) 3707 x = c->page->objects; 3708 else if (flags & SO_OBJECTS) 3709 x = c->page->inuse; 3710 else 3711 x = 1; 3712 3713 total += x; 3714 nodes[c->node] += x; 3715 } 3716 per_cpu[c->node]++; 3717 } 3718 } 3719 3720 if (flags & SO_ALL) { 3721 for_each_node_state(node, N_NORMAL_MEMORY) { 3722 struct kmem_cache_node *n = get_node(s, node); 3723 3724 if (flags & SO_TOTAL) 3725 x = atomic_long_read(&n->total_objects); 3726 else if (flags & SO_OBJECTS) 3727 x = atomic_long_read(&n->total_objects) - 3728 count_partial(n, count_free); 3729 3730 else 3731 x = atomic_long_read(&n->nr_slabs); 3732 total += x; 3733 nodes[node] += x; 3734 } 3735 3736 } else if (flags & SO_PARTIAL) { 3737 for_each_node_state(node, N_NORMAL_MEMORY) { 3738 struct kmem_cache_node *n = get_node(s, node); 3739 3740 if (flags & SO_TOTAL) 3741 x = count_partial(n, count_total); 3742 else if (flags & SO_OBJECTS) 3743 x = count_partial(n, count_inuse); 3744 else 3745 x = n->nr_partial; 3746 total += x; 3747 nodes[node] += x; 3748 } 3749 } 3750 x = sprintf(buf, "%lu", total); 3751 #ifdef CONFIG_NUMA 3752 for_each_node_state(node, N_NORMAL_MEMORY) 3753 if (nodes[node]) 3754 x += sprintf(buf + x, " N%d=%lu", 3755 node, nodes[node]); 3756 #endif 3757 kfree(nodes); 3758 return x + sprintf(buf + x, "\n"); 3759 } 3760 3761 static int any_slab_objects(struct kmem_cache *s) 3762 { 3763 int node; 3764 3765 for_each_online_node(node) { 3766 struct kmem_cache_node *n = get_node(s, node); 3767 3768 if (!n) 3769 continue; 3770 3771 if (atomic_long_read(&n->total_objects)) 3772 return 1; 3773 } 3774 return 0; 3775 } 3776 3777 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 3778 #define to_slab(n) container_of(n, struct kmem_cache, kobj); 3779 3780 struct slab_attribute { 3781 struct attribute attr; 3782 ssize_t (*show)(struct kmem_cache *s, char *buf); 3783 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 3784 }; 3785 3786 #define SLAB_ATTR_RO(_name) \ 3787 static struct slab_attribute _name##_attr = __ATTR_RO(_name) 3788 3789 #define SLAB_ATTR(_name) \ 3790 static struct slab_attribute _name##_attr = \ 3791 __ATTR(_name, 0644, _name##_show, _name##_store) 3792 3793 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 3794 { 3795 return sprintf(buf, "%d\n", s->size); 3796 } 3797 SLAB_ATTR_RO(slab_size); 3798 3799 static ssize_t align_show(struct kmem_cache *s, char *buf) 3800 { 3801 return sprintf(buf, "%d\n", s->align); 3802 } 3803 SLAB_ATTR_RO(align); 3804 3805 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 3806 { 3807 return sprintf(buf, "%d\n", s->objsize); 3808 } 3809 SLAB_ATTR_RO(object_size); 3810 3811 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 3812 { 3813 return sprintf(buf, "%d\n", oo_objects(s->oo)); 3814 } 3815 SLAB_ATTR_RO(objs_per_slab); 3816 3817 static ssize_t order_store(struct kmem_cache *s, 3818 const char *buf, size_t length) 3819 { 3820 unsigned long order; 3821 int err; 3822 3823 err = strict_strtoul(buf, 10, &order); 3824 if (err) 3825 return err; 3826 3827 if (order > slub_max_order || order < slub_min_order) 3828 return -EINVAL; 3829 3830 calculate_sizes(s, order); 3831 return length; 3832 } 3833 3834 static ssize_t order_show(struct kmem_cache *s, char *buf) 3835 { 3836 return sprintf(buf, "%d\n", oo_order(s->oo)); 3837 } 3838 SLAB_ATTR(order); 3839 3840 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 3841 { 3842 return sprintf(buf, "%lu\n", s->min_partial); 3843 } 3844 3845 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 3846 size_t length) 3847 { 3848 unsigned long min; 3849 int err; 3850 3851 err = strict_strtoul(buf, 10, &min); 3852 if (err) 3853 return err; 3854 3855 set_min_partial(s, min); 3856 return length; 3857 } 3858 SLAB_ATTR(min_partial); 3859 3860 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3861 { 3862 if (s->ctor) { 3863 int n = sprint_symbol(buf, (unsigned long)s->ctor); 3864 3865 return n + sprintf(buf + n, "\n"); 3866 } 3867 return 0; 3868 } 3869 SLAB_ATTR_RO(ctor); 3870 3871 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3872 { 3873 return sprintf(buf, "%d\n", s->refcount - 1); 3874 } 3875 SLAB_ATTR_RO(aliases); 3876 3877 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 3878 { 3879 return show_slab_objects(s, buf, SO_ALL); 3880 } 3881 SLAB_ATTR_RO(slabs); 3882 3883 static ssize_t partial_show(struct kmem_cache *s, char *buf) 3884 { 3885 return show_slab_objects(s, buf, SO_PARTIAL); 3886 } 3887 SLAB_ATTR_RO(partial); 3888 3889 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 3890 { 3891 return show_slab_objects(s, buf, SO_CPU); 3892 } 3893 SLAB_ATTR_RO(cpu_slabs); 3894 3895 static ssize_t objects_show(struct kmem_cache *s, char *buf) 3896 { 3897 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 3898 } 3899 SLAB_ATTR_RO(objects); 3900 3901 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 3902 { 3903 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 3904 } 3905 SLAB_ATTR_RO(objects_partial); 3906 3907 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 3908 { 3909 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 3910 } 3911 SLAB_ATTR_RO(total_objects); 3912 3913 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 3914 { 3915 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 3916 } 3917 3918 static ssize_t sanity_checks_store(struct kmem_cache *s, 3919 const char *buf, size_t length) 3920 { 3921 s->flags &= ~SLAB_DEBUG_FREE; 3922 if (buf[0] == '1') 3923 s->flags |= SLAB_DEBUG_FREE; 3924 return length; 3925 } 3926 SLAB_ATTR(sanity_checks); 3927 3928 static ssize_t trace_show(struct kmem_cache *s, char *buf) 3929 { 3930 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 3931 } 3932 3933 static ssize_t trace_store(struct kmem_cache *s, const char *buf, 3934 size_t length) 3935 { 3936 s->flags &= ~SLAB_TRACE; 3937 if (buf[0] == '1') 3938 s->flags |= SLAB_TRACE; 3939 return length; 3940 } 3941 SLAB_ATTR(trace); 3942 3943 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 3944 { 3945 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 3946 } 3947 3948 static ssize_t reclaim_account_store(struct kmem_cache *s, 3949 const char *buf, size_t length) 3950 { 3951 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 3952 if (buf[0] == '1') 3953 s->flags |= SLAB_RECLAIM_ACCOUNT; 3954 return length; 3955 } 3956 SLAB_ATTR(reclaim_account); 3957 3958 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 3959 { 3960 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 3961 } 3962 SLAB_ATTR_RO(hwcache_align); 3963 3964 #ifdef CONFIG_ZONE_DMA 3965 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 3966 { 3967 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 3968 } 3969 SLAB_ATTR_RO(cache_dma); 3970 #endif 3971 3972 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 3973 { 3974 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 3975 } 3976 SLAB_ATTR_RO(destroy_by_rcu); 3977 3978 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 3979 { 3980 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 3981 } 3982 3983 static ssize_t red_zone_store(struct kmem_cache *s, 3984 const char *buf, size_t length) 3985 { 3986 if (any_slab_objects(s)) 3987 return -EBUSY; 3988 3989 s->flags &= ~SLAB_RED_ZONE; 3990 if (buf[0] == '1') 3991 s->flags |= SLAB_RED_ZONE; 3992 calculate_sizes(s, -1); 3993 return length; 3994 } 3995 SLAB_ATTR(red_zone); 3996 3997 static ssize_t poison_show(struct kmem_cache *s, char *buf) 3998 { 3999 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 4000 } 4001 4002 static ssize_t poison_store(struct kmem_cache *s, 4003 const char *buf, size_t length) 4004 { 4005 if (any_slab_objects(s)) 4006 return -EBUSY; 4007 4008 s->flags &= ~SLAB_POISON; 4009 if (buf[0] == '1') 4010 s->flags |= SLAB_POISON; 4011 calculate_sizes(s, -1); 4012 return length; 4013 } 4014 SLAB_ATTR(poison); 4015 4016 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 4017 { 4018 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 4019 } 4020 4021 static ssize_t store_user_store(struct kmem_cache *s, 4022 const char *buf, size_t length) 4023 { 4024 if (any_slab_objects(s)) 4025 return -EBUSY; 4026 4027 s->flags &= ~SLAB_STORE_USER; 4028 if (buf[0] == '1') 4029 s->flags |= SLAB_STORE_USER; 4030 calculate_sizes(s, -1); 4031 return length; 4032 } 4033 SLAB_ATTR(store_user); 4034 4035 static ssize_t validate_show(struct kmem_cache *s, char *buf) 4036 { 4037 return 0; 4038 } 4039 4040 static ssize_t validate_store(struct kmem_cache *s, 4041 const char *buf, size_t length) 4042 { 4043 int ret = -EINVAL; 4044 4045 if (buf[0] == '1') { 4046 ret = validate_slab_cache(s); 4047 if (ret >= 0) 4048 ret = length; 4049 } 4050 return ret; 4051 } 4052 SLAB_ATTR(validate); 4053 4054 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4055 { 4056 return 0; 4057 } 4058 4059 static ssize_t shrink_store(struct kmem_cache *s, 4060 const char *buf, size_t length) 4061 { 4062 if (buf[0] == '1') { 4063 int rc = kmem_cache_shrink(s); 4064 4065 if (rc) 4066 return rc; 4067 } else 4068 return -EINVAL; 4069 return length; 4070 } 4071 SLAB_ATTR(shrink); 4072 4073 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 4074 { 4075 if (!(s->flags & SLAB_STORE_USER)) 4076 return -ENOSYS; 4077 return list_locations(s, buf, TRACK_ALLOC); 4078 } 4079 SLAB_ATTR_RO(alloc_calls); 4080 4081 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 4082 { 4083 if (!(s->flags & SLAB_STORE_USER)) 4084 return -ENOSYS; 4085 return list_locations(s, buf, TRACK_FREE); 4086 } 4087 SLAB_ATTR_RO(free_calls); 4088 4089 #ifdef CONFIG_NUMA 4090 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4091 { 4092 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 4093 } 4094 4095 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 4096 const char *buf, size_t length) 4097 { 4098 unsigned long ratio; 4099 int err; 4100 4101 err = strict_strtoul(buf, 10, &ratio); 4102 if (err) 4103 return err; 4104 4105 if (ratio <= 100) 4106 s->remote_node_defrag_ratio = ratio * 10; 4107 4108 return length; 4109 } 4110 SLAB_ATTR(remote_node_defrag_ratio); 4111 #endif 4112 4113 #ifdef CONFIG_SLUB_STATS 4114 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 4115 { 4116 unsigned long sum = 0; 4117 int cpu; 4118 int len; 4119 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); 4120 4121 if (!data) 4122 return -ENOMEM; 4123 4124 for_each_online_cpu(cpu) { 4125 unsigned x = get_cpu_slab(s, cpu)->stat[si]; 4126 4127 data[cpu] = x; 4128 sum += x; 4129 } 4130 4131 len = sprintf(buf, "%lu", sum); 4132 4133 #ifdef CONFIG_SMP 4134 for_each_online_cpu(cpu) { 4135 if (data[cpu] && len < PAGE_SIZE - 20) 4136 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); 4137 } 4138 #endif 4139 kfree(data); 4140 return len + sprintf(buf + len, "\n"); 4141 } 4142 4143 #define STAT_ATTR(si, text) \ 4144 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 4145 { \ 4146 return show_stat(s, buf, si); \ 4147 } \ 4148 SLAB_ATTR_RO(text); \ 4149 4150 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 4151 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 4152 STAT_ATTR(FREE_FASTPATH, free_fastpath); 4153 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 4154 STAT_ATTR(FREE_FROZEN, free_frozen); 4155 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 4156 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 4157 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 4158 STAT_ATTR(ALLOC_SLAB, alloc_slab); 4159 STAT_ATTR(ALLOC_REFILL, alloc_refill); 4160 STAT_ATTR(FREE_SLAB, free_slab); 4161 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 4162 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 4163 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 4164 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 4165 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4166 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4167 STAT_ATTR(ORDER_FALLBACK, order_fallback); 4168 #endif 4169 4170 static struct attribute *slab_attrs[] = { 4171 &slab_size_attr.attr, 4172 &object_size_attr.attr, 4173 &objs_per_slab_attr.attr, 4174 &order_attr.attr, 4175 &min_partial_attr.attr, 4176 &objects_attr.attr, 4177 &objects_partial_attr.attr, 4178 &total_objects_attr.attr, 4179 &slabs_attr.attr, 4180 &partial_attr.attr, 4181 &cpu_slabs_attr.attr, 4182 &ctor_attr.attr, 4183 &aliases_attr.attr, 4184 &align_attr.attr, 4185 &sanity_checks_attr.attr, 4186 &trace_attr.attr, 4187 &hwcache_align_attr.attr, 4188 &reclaim_account_attr.attr, 4189 &destroy_by_rcu_attr.attr, 4190 &red_zone_attr.attr, 4191 &poison_attr.attr, 4192 &store_user_attr.attr, 4193 &validate_attr.attr, 4194 &shrink_attr.attr, 4195 &alloc_calls_attr.attr, 4196 &free_calls_attr.attr, 4197 #ifdef CONFIG_ZONE_DMA 4198 &cache_dma_attr.attr, 4199 #endif 4200 #ifdef CONFIG_NUMA 4201 &remote_node_defrag_ratio_attr.attr, 4202 #endif 4203 #ifdef CONFIG_SLUB_STATS 4204 &alloc_fastpath_attr.attr, 4205 &alloc_slowpath_attr.attr, 4206 &free_fastpath_attr.attr, 4207 &free_slowpath_attr.attr, 4208 &free_frozen_attr.attr, 4209 &free_add_partial_attr.attr, 4210 &free_remove_partial_attr.attr, 4211 &alloc_from_partial_attr.attr, 4212 &alloc_slab_attr.attr, 4213 &alloc_refill_attr.attr, 4214 &free_slab_attr.attr, 4215 &cpuslab_flush_attr.attr, 4216 &deactivate_full_attr.attr, 4217 &deactivate_empty_attr.attr, 4218 &deactivate_to_head_attr.attr, 4219 &deactivate_to_tail_attr.attr, 4220 &deactivate_remote_frees_attr.attr, 4221 &order_fallback_attr.attr, 4222 #endif 4223 NULL 4224 }; 4225 4226 static struct attribute_group slab_attr_group = { 4227 .attrs = slab_attrs, 4228 }; 4229 4230 static ssize_t slab_attr_show(struct kobject *kobj, 4231 struct attribute *attr, 4232 char *buf) 4233 { 4234 struct slab_attribute *attribute; 4235 struct kmem_cache *s; 4236 int err; 4237 4238 attribute = to_slab_attr(attr); 4239 s = to_slab(kobj); 4240 4241 if (!attribute->show) 4242 return -EIO; 4243 4244 err = attribute->show(s, buf); 4245 4246 return err; 4247 } 4248 4249 static ssize_t slab_attr_store(struct kobject *kobj, 4250 struct attribute *attr, 4251 const char *buf, size_t len) 4252 { 4253 struct slab_attribute *attribute; 4254 struct kmem_cache *s; 4255 int err; 4256 4257 attribute = to_slab_attr(attr); 4258 s = to_slab(kobj); 4259 4260 if (!attribute->store) 4261 return -EIO; 4262 4263 err = attribute->store(s, buf, len); 4264 4265 return err; 4266 } 4267 4268 static void kmem_cache_release(struct kobject *kobj) 4269 { 4270 struct kmem_cache *s = to_slab(kobj); 4271 4272 kfree(s); 4273 } 4274 4275 static struct sysfs_ops slab_sysfs_ops = { 4276 .show = slab_attr_show, 4277 .store = slab_attr_store, 4278 }; 4279 4280 static struct kobj_type slab_ktype = { 4281 .sysfs_ops = &slab_sysfs_ops, 4282 .release = kmem_cache_release 4283 }; 4284 4285 static int uevent_filter(struct kset *kset, struct kobject *kobj) 4286 { 4287 struct kobj_type *ktype = get_ktype(kobj); 4288 4289 if (ktype == &slab_ktype) 4290 return 1; 4291 return 0; 4292 } 4293 4294 static struct kset_uevent_ops slab_uevent_ops = { 4295 .filter = uevent_filter, 4296 }; 4297 4298 static struct kset *slab_kset; 4299 4300 #define ID_STR_LENGTH 64 4301 4302 /* Create a unique string id for a slab cache: 4303 * 4304 * Format :[flags-]size 4305 */ 4306 static char *create_unique_id(struct kmem_cache *s) 4307 { 4308 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 4309 char *p = name; 4310 4311 BUG_ON(!name); 4312 4313 *p++ = ':'; 4314 /* 4315 * First flags affecting slabcache operations. We will only 4316 * get here for aliasable slabs so we do not need to support 4317 * too many flags. The flags here must cover all flags that 4318 * are matched during merging to guarantee that the id is 4319 * unique. 4320 */ 4321 if (s->flags & SLAB_CACHE_DMA) 4322 *p++ = 'd'; 4323 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4324 *p++ = 'a'; 4325 if (s->flags & SLAB_DEBUG_FREE) 4326 *p++ = 'F'; 4327 if (p != name + 1) 4328 *p++ = '-'; 4329 p += sprintf(p, "%07d", s->size); 4330 BUG_ON(p > name + ID_STR_LENGTH - 1); 4331 return name; 4332 } 4333 4334 static int sysfs_slab_add(struct kmem_cache *s) 4335 { 4336 int err; 4337 const char *name; 4338 int unmergeable; 4339 4340 if (slab_state < SYSFS) 4341 /* Defer until later */ 4342 return 0; 4343 4344 unmergeable = slab_unmergeable(s); 4345 if (unmergeable) { 4346 /* 4347 * Slabcache can never be merged so we can use the name proper. 4348 * This is typically the case for debug situations. In that 4349 * case we can catch duplicate names easily. 4350 */ 4351 sysfs_remove_link(&slab_kset->kobj, s->name); 4352 name = s->name; 4353 } else { 4354 /* 4355 * Create a unique name for the slab as a target 4356 * for the symlinks. 4357 */ 4358 name = create_unique_id(s); 4359 } 4360 4361 s->kobj.kset = slab_kset; 4362 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 4363 if (err) { 4364 kobject_put(&s->kobj); 4365 return err; 4366 } 4367 4368 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4369 if (err) 4370 return err; 4371 kobject_uevent(&s->kobj, KOBJ_ADD); 4372 if (!unmergeable) { 4373 /* Setup first alias */ 4374 sysfs_slab_alias(s, s->name); 4375 kfree(name); 4376 } 4377 return 0; 4378 } 4379 4380 static void sysfs_slab_remove(struct kmem_cache *s) 4381 { 4382 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4383 kobject_del(&s->kobj); 4384 kobject_put(&s->kobj); 4385 } 4386 4387 /* 4388 * Need to buffer aliases during bootup until sysfs becomes 4389 * available lest we lose that information. 4390 */ 4391 struct saved_alias { 4392 struct kmem_cache *s; 4393 const char *name; 4394 struct saved_alias *next; 4395 }; 4396 4397 static struct saved_alias *alias_list; 4398 4399 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 4400 { 4401 struct saved_alias *al; 4402 4403 if (slab_state == SYSFS) { 4404 /* 4405 * If we have a leftover link then remove it. 4406 */ 4407 sysfs_remove_link(&slab_kset->kobj, name); 4408 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 4409 } 4410 4411 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 4412 if (!al) 4413 return -ENOMEM; 4414 4415 al->s = s; 4416 al->name = name; 4417 al->next = alias_list; 4418 alias_list = al; 4419 return 0; 4420 } 4421 4422 static int __init slab_sysfs_init(void) 4423 { 4424 struct kmem_cache *s; 4425 int err; 4426 4427 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 4428 if (!slab_kset) { 4429 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4430 return -ENOSYS; 4431 } 4432 4433 slab_state = SYSFS; 4434 4435 list_for_each_entry(s, &slab_caches, list) { 4436 err = sysfs_slab_add(s); 4437 if (err) 4438 printk(KERN_ERR "SLUB: Unable to add boot slab %s" 4439 " to sysfs\n", s->name); 4440 } 4441 4442 while (alias_list) { 4443 struct saved_alias *al = alias_list; 4444 4445 alias_list = alias_list->next; 4446 err = sysfs_slab_alias(al->s, al->name); 4447 if (err) 4448 printk(KERN_ERR "SLUB: Unable to add boot slab alias" 4449 " %s to sysfs\n", s->name); 4450 kfree(al); 4451 } 4452 4453 resiliency_test(); 4454 return 0; 4455 } 4456 4457 __initcall(slab_sysfs_init); 4458 #endif 4459 4460 /* 4461 * The /proc/slabinfo ABI 4462 */ 4463 #ifdef CONFIG_SLABINFO 4464 static void print_slabinfo_header(struct seq_file *m) 4465 { 4466 seq_puts(m, "slabinfo - version: 2.1\n"); 4467 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4468 "<objperslab> <pagesperslab>"); 4469 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4470 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4471 seq_putc(m, '\n'); 4472 } 4473 4474 static void *s_start(struct seq_file *m, loff_t *pos) 4475 { 4476 loff_t n = *pos; 4477 4478 down_read(&slub_lock); 4479 if (!n) 4480 print_slabinfo_header(m); 4481 4482 return seq_list_start(&slab_caches, *pos); 4483 } 4484 4485 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4486 { 4487 return seq_list_next(p, &slab_caches, pos); 4488 } 4489 4490 static void s_stop(struct seq_file *m, void *p) 4491 { 4492 up_read(&slub_lock); 4493 } 4494 4495 static int s_show(struct seq_file *m, void *p) 4496 { 4497 unsigned long nr_partials = 0; 4498 unsigned long nr_slabs = 0; 4499 unsigned long nr_inuse = 0; 4500 unsigned long nr_objs = 0; 4501 unsigned long nr_free = 0; 4502 struct kmem_cache *s; 4503 int node; 4504 4505 s = list_entry(p, struct kmem_cache, list); 4506 4507 for_each_online_node(node) { 4508 struct kmem_cache_node *n = get_node(s, node); 4509 4510 if (!n) 4511 continue; 4512 4513 nr_partials += n->nr_partial; 4514 nr_slabs += atomic_long_read(&n->nr_slabs); 4515 nr_objs += atomic_long_read(&n->total_objects); 4516 nr_free += count_partial(n, count_free); 4517 } 4518 4519 nr_inuse = nr_objs - nr_free; 4520 4521 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 4522 nr_objs, s->size, oo_objects(s->oo), 4523 (1 << oo_order(s->oo))); 4524 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); 4525 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 4526 0UL); 4527 seq_putc(m, '\n'); 4528 return 0; 4529 } 4530 4531 static const struct seq_operations slabinfo_op = { 4532 .start = s_start, 4533 .next = s_next, 4534 .stop = s_stop, 4535 .show = s_show, 4536 }; 4537 4538 static int slabinfo_open(struct inode *inode, struct file *file) 4539 { 4540 return seq_open(file, &slabinfo_op); 4541 } 4542 4543 static const struct file_operations proc_slabinfo_operations = { 4544 .open = slabinfo_open, 4545 .read = seq_read, 4546 .llseek = seq_lseek, 4547 .release = seq_release, 4548 }; 4549 4550 static int __init slab_proc_init(void) 4551 { 4552 proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); 4553 return 0; 4554 } 4555 module_init(slab_proc_init); 4556 #endif /* CONFIG_SLABINFO */ 4557