1 /* 2 * SLUB: A slab allocator that limits cache line use instead of queuing 3 * objects in per cpu and per node lists. 4 * 5 * The allocator synchronizes using per slab locks and only 6 * uses a centralized lock to manage a pool of partial slabs. 7 * 8 * (C) 2007 SGI, Christoph Lameter 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/swap.h> /* struct reclaim_state */ 13 #include <linux/module.h> 14 #include <linux/bit_spinlock.h> 15 #include <linux/interrupt.h> 16 #include <linux/bitops.h> 17 #include <linux/slab.h> 18 #include <linux/proc_fs.h> 19 #include <linux/seq_file.h> 20 #include <linux/kmemtrace.h> 21 #include <linux/cpu.h> 22 #include <linux/cpuset.h> 23 #include <linux/kmemleak.h> 24 #include <linux/mempolicy.h> 25 #include <linux/ctype.h> 26 #include <linux/debugobjects.h> 27 #include <linux/kallsyms.h> 28 #include <linux/memory.h> 29 #include <linux/math64.h> 30 #include <linux/fault-inject.h> 31 32 /* 33 * Lock order: 34 * 1. slab_lock(page) 35 * 2. slab->list_lock 36 * 37 * The slab_lock protects operations on the object of a particular 38 * slab and its metadata in the page struct. If the slab lock 39 * has been taken then no allocations nor frees can be performed 40 * on the objects in the slab nor can the slab be added or removed 41 * from the partial or full lists since this would mean modifying 42 * the page_struct of the slab. 43 * 44 * The list_lock protects the partial and full list on each node and 45 * the partial slab counter. If taken then no new slabs may be added or 46 * removed from the lists nor make the number of partial slabs be modified. 47 * (Note that the total number of slabs is an atomic value that may be 48 * modified without taking the list lock). 49 * 50 * The list_lock is a centralized lock and thus we avoid taking it as 51 * much as possible. As long as SLUB does not have to handle partial 52 * slabs, operations can continue without any centralized lock. F.e. 53 * allocating a long series of objects that fill up slabs does not require 54 * the list lock. 55 * 56 * The lock order is sometimes inverted when we are trying to get a slab 57 * off a list. We take the list_lock and then look for a page on the list 58 * to use. While we do that objects in the slabs may be freed. We can 59 * only operate on the slab if we have also taken the slab_lock. So we use 60 * a slab_trylock() on the slab. If trylock was successful then no frees 61 * can occur anymore and we can use the slab for allocations etc. If the 62 * slab_trylock() does not succeed then frees are in progress in the slab and 63 * we must stay away from it for a while since we may cause a bouncing 64 * cacheline if we try to acquire the lock. So go onto the next slab. 65 * If all pages are busy then we may allocate a new slab instead of reusing 66 * a partial slab. A new slab has noone operating on it and thus there is 67 * no danger of cacheline contention. 68 * 69 * Interrupts are disabled during allocation and deallocation in order to 70 * make the slab allocator safe to use in the context of an irq. In addition 71 * interrupts are disabled to ensure that the processor does not change 72 * while handling per_cpu slabs, due to kernel preemption. 73 * 74 * SLUB assigns one slab for allocation to each processor. 75 * Allocations only occur from these slabs called cpu slabs. 76 * 77 * Slabs with free elements are kept on a partial list and during regular 78 * operations no list for full slabs is used. If an object in a full slab is 79 * freed then the slab will show up again on the partial lists. 80 * We track full slabs for debugging purposes though because otherwise we 81 * cannot scan all objects. 82 * 83 * Slabs are freed when they become empty. Teardown and setup is 84 * minimal so we rely on the page allocators per cpu caches for 85 * fast frees and allocs. 86 * 87 * Overloading of page flags that are otherwise used for LRU management. 88 * 89 * PageActive The slab is frozen and exempt from list processing. 90 * This means that the slab is dedicated to a purpose 91 * such as satisfying allocations for a specific 92 * processor. Objects may be freed in the slab while 93 * it is frozen but slab_free will then skip the usual 94 * list operations. It is up to the processor holding 95 * the slab to integrate the slab into the slab lists 96 * when the slab is no longer needed. 97 * 98 * One use of this flag is to mark slabs that are 99 * used for allocations. Then such a slab becomes a cpu 100 * slab. The cpu slab may be equipped with an additional 101 * freelist that allows lockless access to 102 * free objects in addition to the regular freelist 103 * that requires the slab lock. 104 * 105 * PageError Slab requires special handling due to debug 106 * options set. This moves slab handling out of 107 * the fast path and disables lockless freelists. 108 */ 109 110 #ifdef CONFIG_SLUB_DEBUG 111 #define SLABDEBUG 1 112 #else 113 #define SLABDEBUG 0 114 #endif 115 116 /* 117 * Issues still to be resolved: 118 * 119 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 120 * 121 * - Variable sizing of the per node arrays 122 */ 123 124 /* Enable to test recovery from slab corruption on boot */ 125 #undef SLUB_RESILIENCY_TEST 126 127 /* 128 * Mininum number of partial slabs. These will be left on the partial 129 * lists even if they are empty. kmem_cache_shrink may reclaim them. 130 */ 131 #define MIN_PARTIAL 5 132 133 /* 134 * Maximum number of desirable partial slabs. 135 * The existence of more partial slabs makes kmem_cache_shrink 136 * sort the partial list by the number of objects in the. 137 */ 138 #define MAX_PARTIAL 10 139 140 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 141 SLAB_POISON | SLAB_STORE_USER) 142 143 /* 144 * Set of flags that will prevent slab merging 145 */ 146 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 147 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) 148 149 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 150 SLAB_CACHE_DMA) 151 152 #ifndef ARCH_KMALLOC_MINALIGN 153 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 154 #endif 155 156 #ifndef ARCH_SLAB_MINALIGN 157 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 158 #endif 159 160 #define OO_SHIFT 16 161 #define OO_MASK ((1 << OO_SHIFT) - 1) 162 #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ 163 164 /* Internal SLUB flags */ 165 #define __OBJECT_POISON 0x80000000 /* Poison object */ 166 #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 167 168 static int kmem_size = sizeof(struct kmem_cache); 169 170 #ifdef CONFIG_SMP 171 static struct notifier_block slab_notifier; 172 #endif 173 174 static enum { 175 DOWN, /* No slab functionality available */ 176 PARTIAL, /* kmem_cache_open() works but kmalloc does not */ 177 UP, /* Everything works but does not show up in sysfs */ 178 SYSFS /* Sysfs up */ 179 } slab_state = DOWN; 180 181 /* 182 * The slab allocator is initialized with interrupts disabled. Therefore, make 183 * sure early boot allocations don't accidentally enable interrupts. 184 */ 185 static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; 186 187 /* A list of all slab caches on the system */ 188 static DECLARE_RWSEM(slub_lock); 189 static LIST_HEAD(slab_caches); 190 191 /* 192 * Tracking user of a slab. 193 */ 194 struct track { 195 unsigned long addr; /* Called from address */ 196 int cpu; /* Was running on cpu */ 197 int pid; /* Pid context */ 198 unsigned long when; /* When did the operation occur */ 199 }; 200 201 enum track_item { TRACK_ALLOC, TRACK_FREE }; 202 203 #ifdef CONFIG_SLUB_DEBUG 204 static int sysfs_slab_add(struct kmem_cache *); 205 static int sysfs_slab_alias(struct kmem_cache *, const char *); 206 static void sysfs_slab_remove(struct kmem_cache *); 207 208 #else 209 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 210 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 211 { return 0; } 212 static inline void sysfs_slab_remove(struct kmem_cache *s) 213 { 214 kfree(s); 215 } 216 217 #endif 218 219 static inline void stat(struct kmem_cache_cpu *c, enum stat_item si) 220 { 221 #ifdef CONFIG_SLUB_STATS 222 c->stat[si]++; 223 #endif 224 } 225 226 /******************************************************************** 227 * Core slab cache functions 228 *******************************************************************/ 229 230 int slab_is_available(void) 231 { 232 return slab_state >= UP; 233 } 234 235 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 236 { 237 #ifdef CONFIG_NUMA 238 return s->node[node]; 239 #else 240 return &s->local_node; 241 #endif 242 } 243 244 static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu) 245 { 246 #ifdef CONFIG_SMP 247 return s->cpu_slab[cpu]; 248 #else 249 return &s->cpu_slab; 250 #endif 251 } 252 253 /* Verify that a pointer has an address that is valid within a slab page */ 254 static inline int check_valid_pointer(struct kmem_cache *s, 255 struct page *page, const void *object) 256 { 257 void *base; 258 259 if (!object) 260 return 1; 261 262 base = page_address(page); 263 if (object < base || object >= base + page->objects * s->size || 264 (object - base) % s->size) { 265 return 0; 266 } 267 268 return 1; 269 } 270 271 /* 272 * Slow version of get and set free pointer. 273 * 274 * This version requires touching the cache lines of kmem_cache which 275 * we avoid to do in the fast alloc free paths. There we obtain the offset 276 * from the page struct. 277 */ 278 static inline void *get_freepointer(struct kmem_cache *s, void *object) 279 { 280 return *(void **)(object + s->offset); 281 } 282 283 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 284 { 285 *(void **)(object + s->offset) = fp; 286 } 287 288 /* Loop over all objects in a slab */ 289 #define for_each_object(__p, __s, __addr, __objects) \ 290 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ 291 __p += (__s)->size) 292 293 /* Scan freelist */ 294 #define for_each_free_object(__p, __s, __free) \ 295 for (__p = (__free); __p; __p = get_freepointer((__s), __p)) 296 297 /* Determine object index from a given position */ 298 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 299 { 300 return (p - addr) / s->size; 301 } 302 303 static inline struct kmem_cache_order_objects oo_make(int order, 304 unsigned long size) 305 { 306 struct kmem_cache_order_objects x = { 307 (order << OO_SHIFT) + (PAGE_SIZE << order) / size 308 }; 309 310 return x; 311 } 312 313 static inline int oo_order(struct kmem_cache_order_objects x) 314 { 315 return x.x >> OO_SHIFT; 316 } 317 318 static inline int oo_objects(struct kmem_cache_order_objects x) 319 { 320 return x.x & OO_MASK; 321 } 322 323 #ifdef CONFIG_SLUB_DEBUG 324 /* 325 * Debug settings: 326 */ 327 #ifdef CONFIG_SLUB_DEBUG_ON 328 static int slub_debug = DEBUG_DEFAULT_FLAGS; 329 #else 330 static int slub_debug; 331 #endif 332 333 static char *slub_debug_slabs; 334 335 /* 336 * Object debugging 337 */ 338 static void print_section(char *text, u8 *addr, unsigned int length) 339 { 340 int i, offset; 341 int newline = 1; 342 char ascii[17]; 343 344 ascii[16] = 0; 345 346 for (i = 0; i < length; i++) { 347 if (newline) { 348 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 349 newline = 0; 350 } 351 printk(KERN_CONT " %02x", addr[i]); 352 offset = i % 16; 353 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 354 if (offset == 15) { 355 printk(KERN_CONT " %s\n", ascii); 356 newline = 1; 357 } 358 } 359 if (!newline) { 360 i %= 16; 361 while (i < 16) { 362 printk(KERN_CONT " "); 363 ascii[i] = ' '; 364 i++; 365 } 366 printk(KERN_CONT " %s\n", ascii); 367 } 368 } 369 370 static struct track *get_track(struct kmem_cache *s, void *object, 371 enum track_item alloc) 372 { 373 struct track *p; 374 375 if (s->offset) 376 p = object + s->offset + sizeof(void *); 377 else 378 p = object + s->inuse; 379 380 return p + alloc; 381 } 382 383 static void set_track(struct kmem_cache *s, void *object, 384 enum track_item alloc, unsigned long addr) 385 { 386 struct track *p = get_track(s, object, alloc); 387 388 if (addr) { 389 p->addr = addr; 390 p->cpu = smp_processor_id(); 391 p->pid = current->pid; 392 p->when = jiffies; 393 } else 394 memset(p, 0, sizeof(struct track)); 395 } 396 397 static void init_tracking(struct kmem_cache *s, void *object) 398 { 399 if (!(s->flags & SLAB_STORE_USER)) 400 return; 401 402 set_track(s, object, TRACK_FREE, 0UL); 403 set_track(s, object, TRACK_ALLOC, 0UL); 404 } 405 406 static void print_track(const char *s, struct track *t) 407 { 408 if (!t->addr) 409 return; 410 411 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 412 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); 413 } 414 415 static void print_tracking(struct kmem_cache *s, void *object) 416 { 417 if (!(s->flags & SLAB_STORE_USER)) 418 return; 419 420 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); 421 print_track("Freed", get_track(s, object, TRACK_FREE)); 422 } 423 424 static void print_page_info(struct page *page) 425 { 426 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 427 page, page->objects, page->inuse, page->freelist, page->flags); 428 429 } 430 431 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 432 { 433 va_list args; 434 char buf[100]; 435 436 va_start(args, fmt); 437 vsnprintf(buf, sizeof(buf), fmt, args); 438 va_end(args); 439 printk(KERN_ERR "========================================" 440 "=====================================\n"); 441 printk(KERN_ERR "BUG %s: %s\n", s->name, buf); 442 printk(KERN_ERR "----------------------------------------" 443 "-------------------------------------\n\n"); 444 } 445 446 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 447 { 448 va_list args; 449 char buf[100]; 450 451 va_start(args, fmt); 452 vsnprintf(buf, sizeof(buf), fmt, args); 453 va_end(args); 454 printk(KERN_ERR "FIX %s: %s\n", s->name, buf); 455 } 456 457 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 458 { 459 unsigned int off; /* Offset of last byte */ 460 u8 *addr = page_address(page); 461 462 print_tracking(s, p); 463 464 print_page_info(page); 465 466 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 467 p, p - addr, get_freepointer(s, p)); 468 469 if (p > addr + 16) 470 print_section("Bytes b4", p - 16, 16); 471 472 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE)); 473 474 if (s->flags & SLAB_RED_ZONE) 475 print_section("Redzone", p + s->objsize, 476 s->inuse - s->objsize); 477 478 if (s->offset) 479 off = s->offset + sizeof(void *); 480 else 481 off = s->inuse; 482 483 if (s->flags & SLAB_STORE_USER) 484 off += 2 * sizeof(struct track); 485 486 if (off != s->size) 487 /* Beginning of the filler is the free pointer */ 488 print_section("Padding", p + off, s->size - off); 489 490 dump_stack(); 491 } 492 493 static void object_err(struct kmem_cache *s, struct page *page, 494 u8 *object, char *reason) 495 { 496 slab_bug(s, "%s", reason); 497 print_trailer(s, page, object); 498 } 499 500 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 501 { 502 va_list args; 503 char buf[100]; 504 505 va_start(args, fmt); 506 vsnprintf(buf, sizeof(buf), fmt, args); 507 va_end(args); 508 slab_bug(s, "%s", buf); 509 print_page_info(page); 510 dump_stack(); 511 } 512 513 static void init_object(struct kmem_cache *s, void *object, int active) 514 { 515 u8 *p = object; 516 517 if (s->flags & __OBJECT_POISON) { 518 memset(p, POISON_FREE, s->objsize - 1); 519 p[s->objsize - 1] = POISON_END; 520 } 521 522 if (s->flags & SLAB_RED_ZONE) 523 memset(p + s->objsize, 524 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, 525 s->inuse - s->objsize); 526 } 527 528 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) 529 { 530 while (bytes) { 531 if (*start != (u8)value) 532 return start; 533 start++; 534 bytes--; 535 } 536 return NULL; 537 } 538 539 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 540 void *from, void *to) 541 { 542 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 543 memset(from, data, to - from); 544 } 545 546 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 547 u8 *object, char *what, 548 u8 *start, unsigned int value, unsigned int bytes) 549 { 550 u8 *fault; 551 u8 *end; 552 553 fault = check_bytes(start, value, bytes); 554 if (!fault) 555 return 1; 556 557 end = start + bytes; 558 while (end > fault && end[-1] == value) 559 end--; 560 561 slab_bug(s, "%s overwritten", what); 562 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", 563 fault, end - 1, fault[0], value); 564 print_trailer(s, page, object); 565 566 restore_bytes(s, what, value, fault, end); 567 return 0; 568 } 569 570 /* 571 * Object layout: 572 * 573 * object address 574 * Bytes of the object to be managed. 575 * If the freepointer may overlay the object then the free 576 * pointer is the first word of the object. 577 * 578 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 579 * 0xa5 (POISON_END) 580 * 581 * object + s->objsize 582 * Padding to reach word boundary. This is also used for Redzoning. 583 * Padding is extended by another word if Redzoning is enabled and 584 * objsize == inuse. 585 * 586 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 587 * 0xcc (RED_ACTIVE) for objects in use. 588 * 589 * object + s->inuse 590 * Meta data starts here. 591 * 592 * A. Free pointer (if we cannot overwrite object on free) 593 * B. Tracking data for SLAB_STORE_USER 594 * C. Padding to reach required alignment boundary or at mininum 595 * one word if debugging is on to be able to detect writes 596 * before the word boundary. 597 * 598 * Padding is done using 0x5a (POISON_INUSE) 599 * 600 * object + s->size 601 * Nothing is used beyond s->size. 602 * 603 * If slabcaches are merged then the objsize and inuse boundaries are mostly 604 * ignored. And therefore no slab options that rely on these boundaries 605 * may be used with merged slabcaches. 606 */ 607 608 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 609 { 610 unsigned long off = s->inuse; /* The end of info */ 611 612 if (s->offset) 613 /* Freepointer is placed after the object. */ 614 off += sizeof(void *); 615 616 if (s->flags & SLAB_STORE_USER) 617 /* We also have user information there */ 618 off += 2 * sizeof(struct track); 619 620 if (s->size == off) 621 return 1; 622 623 return check_bytes_and_report(s, page, p, "Object padding", 624 p + off, POISON_INUSE, s->size - off); 625 } 626 627 /* Check the pad bytes at the end of a slab page */ 628 static int slab_pad_check(struct kmem_cache *s, struct page *page) 629 { 630 u8 *start; 631 u8 *fault; 632 u8 *end; 633 int length; 634 int remainder; 635 636 if (!(s->flags & SLAB_POISON)) 637 return 1; 638 639 start = page_address(page); 640 length = (PAGE_SIZE << compound_order(page)); 641 end = start + length; 642 remainder = length % s->size; 643 if (!remainder) 644 return 1; 645 646 fault = check_bytes(end - remainder, POISON_INUSE, remainder); 647 if (!fault) 648 return 1; 649 while (end > fault && end[-1] == POISON_INUSE) 650 end--; 651 652 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 653 print_section("Padding", end - remainder, remainder); 654 655 restore_bytes(s, "slab padding", POISON_INUSE, start, end); 656 return 0; 657 } 658 659 static int check_object(struct kmem_cache *s, struct page *page, 660 void *object, int active) 661 { 662 u8 *p = object; 663 u8 *endobject = object + s->objsize; 664 665 if (s->flags & SLAB_RED_ZONE) { 666 unsigned int red = 667 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; 668 669 if (!check_bytes_and_report(s, page, object, "Redzone", 670 endobject, red, s->inuse - s->objsize)) 671 return 0; 672 } else { 673 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 674 check_bytes_and_report(s, page, p, "Alignment padding", 675 endobject, POISON_INUSE, s->inuse - s->objsize); 676 } 677 } 678 679 if (s->flags & SLAB_POISON) { 680 if (!active && (s->flags & __OBJECT_POISON) && 681 (!check_bytes_and_report(s, page, p, "Poison", p, 682 POISON_FREE, s->objsize - 1) || 683 !check_bytes_and_report(s, page, p, "Poison", 684 p + s->objsize - 1, POISON_END, 1))) 685 return 0; 686 /* 687 * check_pad_bytes cleans up on its own. 688 */ 689 check_pad_bytes(s, page, p); 690 } 691 692 if (!s->offset && active) 693 /* 694 * Object and freepointer overlap. Cannot check 695 * freepointer while object is allocated. 696 */ 697 return 1; 698 699 /* Check free pointer validity */ 700 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 701 object_err(s, page, p, "Freepointer corrupt"); 702 /* 703 * No choice but to zap it and thus lose the remainder 704 * of the free objects in this slab. May cause 705 * another error because the object count is now wrong. 706 */ 707 set_freepointer(s, p, NULL); 708 return 0; 709 } 710 return 1; 711 } 712 713 static int check_slab(struct kmem_cache *s, struct page *page) 714 { 715 int maxobj; 716 717 VM_BUG_ON(!irqs_disabled()); 718 719 if (!PageSlab(page)) { 720 slab_err(s, page, "Not a valid slab page"); 721 return 0; 722 } 723 724 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 725 if (page->objects > maxobj) { 726 slab_err(s, page, "objects %u > max %u", 727 s->name, page->objects, maxobj); 728 return 0; 729 } 730 if (page->inuse > page->objects) { 731 slab_err(s, page, "inuse %u > max %u", 732 s->name, page->inuse, page->objects); 733 return 0; 734 } 735 /* Slab_pad_check fixes things up after itself */ 736 slab_pad_check(s, page); 737 return 1; 738 } 739 740 /* 741 * Determine if a certain object on a page is on the freelist. Must hold the 742 * slab lock to guarantee that the chains are in a consistent state. 743 */ 744 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 745 { 746 int nr = 0; 747 void *fp = page->freelist; 748 void *object = NULL; 749 unsigned long max_objects; 750 751 while (fp && nr <= page->objects) { 752 if (fp == search) 753 return 1; 754 if (!check_valid_pointer(s, page, fp)) { 755 if (object) { 756 object_err(s, page, object, 757 "Freechain corrupt"); 758 set_freepointer(s, object, NULL); 759 break; 760 } else { 761 slab_err(s, page, "Freepointer corrupt"); 762 page->freelist = NULL; 763 page->inuse = page->objects; 764 slab_fix(s, "Freelist cleared"); 765 return 0; 766 } 767 break; 768 } 769 object = fp; 770 fp = get_freepointer(s, object); 771 nr++; 772 } 773 774 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 775 if (max_objects > MAX_OBJS_PER_PAGE) 776 max_objects = MAX_OBJS_PER_PAGE; 777 778 if (page->objects != max_objects) { 779 slab_err(s, page, "Wrong number of objects. Found %d but " 780 "should be %d", page->objects, max_objects); 781 page->objects = max_objects; 782 slab_fix(s, "Number of objects adjusted."); 783 } 784 if (page->inuse != page->objects - nr) { 785 slab_err(s, page, "Wrong object count. Counter is %d but " 786 "counted were %d", page->inuse, page->objects - nr); 787 page->inuse = page->objects - nr; 788 slab_fix(s, "Object count adjusted."); 789 } 790 return search == NULL; 791 } 792 793 static void trace(struct kmem_cache *s, struct page *page, void *object, 794 int alloc) 795 { 796 if (s->flags & SLAB_TRACE) { 797 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 798 s->name, 799 alloc ? "alloc" : "free", 800 object, page->inuse, 801 page->freelist); 802 803 if (!alloc) 804 print_section("Object", (void *)object, s->objsize); 805 806 dump_stack(); 807 } 808 } 809 810 /* 811 * Tracking of fully allocated slabs for debugging purposes. 812 */ 813 static void add_full(struct kmem_cache_node *n, struct page *page) 814 { 815 spin_lock(&n->list_lock); 816 list_add(&page->lru, &n->full); 817 spin_unlock(&n->list_lock); 818 } 819 820 static void remove_full(struct kmem_cache *s, struct page *page) 821 { 822 struct kmem_cache_node *n; 823 824 if (!(s->flags & SLAB_STORE_USER)) 825 return; 826 827 n = get_node(s, page_to_nid(page)); 828 829 spin_lock(&n->list_lock); 830 list_del(&page->lru); 831 spin_unlock(&n->list_lock); 832 } 833 834 /* Tracking of the number of slabs for debugging purposes */ 835 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 836 { 837 struct kmem_cache_node *n = get_node(s, node); 838 839 return atomic_long_read(&n->nr_slabs); 840 } 841 842 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 843 { 844 struct kmem_cache_node *n = get_node(s, node); 845 846 /* 847 * May be called early in order to allocate a slab for the 848 * kmem_cache_node structure. Solve the chicken-egg 849 * dilemma by deferring the increment of the count during 850 * bootstrap (see early_kmem_cache_node_alloc). 851 */ 852 if (!NUMA_BUILD || n) { 853 atomic_long_inc(&n->nr_slabs); 854 atomic_long_add(objects, &n->total_objects); 855 } 856 } 857 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 858 { 859 struct kmem_cache_node *n = get_node(s, node); 860 861 atomic_long_dec(&n->nr_slabs); 862 atomic_long_sub(objects, &n->total_objects); 863 } 864 865 /* Object debug checks for alloc/free paths */ 866 static void setup_object_debug(struct kmem_cache *s, struct page *page, 867 void *object) 868 { 869 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 870 return; 871 872 init_object(s, object, 0); 873 init_tracking(s, object); 874 } 875 876 static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 877 void *object, unsigned long addr) 878 { 879 if (!check_slab(s, page)) 880 goto bad; 881 882 if (!on_freelist(s, page, object)) { 883 object_err(s, page, object, "Object already allocated"); 884 goto bad; 885 } 886 887 if (!check_valid_pointer(s, page, object)) { 888 object_err(s, page, object, "Freelist Pointer check fails"); 889 goto bad; 890 } 891 892 if (!check_object(s, page, object, 0)) 893 goto bad; 894 895 /* Success perform special debug activities for allocs */ 896 if (s->flags & SLAB_STORE_USER) 897 set_track(s, object, TRACK_ALLOC, addr); 898 trace(s, page, object, 1); 899 init_object(s, object, 1); 900 return 1; 901 902 bad: 903 if (PageSlab(page)) { 904 /* 905 * If this is a slab page then lets do the best we can 906 * to avoid issues in the future. Marking all objects 907 * as used avoids touching the remaining objects. 908 */ 909 slab_fix(s, "Marking all objects used"); 910 page->inuse = page->objects; 911 page->freelist = NULL; 912 } 913 return 0; 914 } 915 916 static int free_debug_processing(struct kmem_cache *s, struct page *page, 917 void *object, unsigned long addr) 918 { 919 if (!check_slab(s, page)) 920 goto fail; 921 922 if (!check_valid_pointer(s, page, object)) { 923 slab_err(s, page, "Invalid object pointer 0x%p", object); 924 goto fail; 925 } 926 927 if (on_freelist(s, page, object)) { 928 object_err(s, page, object, "Object already free"); 929 goto fail; 930 } 931 932 if (!check_object(s, page, object, 1)) 933 return 0; 934 935 if (unlikely(s != page->slab)) { 936 if (!PageSlab(page)) { 937 slab_err(s, page, "Attempt to free object(0x%p) " 938 "outside of slab", object); 939 } else if (!page->slab) { 940 printk(KERN_ERR 941 "SLUB <none>: no slab for object 0x%p.\n", 942 object); 943 dump_stack(); 944 } else 945 object_err(s, page, object, 946 "page slab pointer corrupt."); 947 goto fail; 948 } 949 950 /* Special debug activities for freeing objects */ 951 if (!PageSlubFrozen(page) && !page->freelist) 952 remove_full(s, page); 953 if (s->flags & SLAB_STORE_USER) 954 set_track(s, object, TRACK_FREE, addr); 955 trace(s, page, object, 0); 956 init_object(s, object, 0); 957 return 1; 958 959 fail: 960 slab_fix(s, "Object at 0x%p not freed", object); 961 return 0; 962 } 963 964 static int __init setup_slub_debug(char *str) 965 { 966 slub_debug = DEBUG_DEFAULT_FLAGS; 967 if (*str++ != '=' || !*str) 968 /* 969 * No options specified. Switch on full debugging. 970 */ 971 goto out; 972 973 if (*str == ',') 974 /* 975 * No options but restriction on slabs. This means full 976 * debugging for slabs matching a pattern. 977 */ 978 goto check_slabs; 979 980 slub_debug = 0; 981 if (*str == '-') 982 /* 983 * Switch off all debugging measures. 984 */ 985 goto out; 986 987 /* 988 * Determine which debug features should be switched on 989 */ 990 for (; *str && *str != ','; str++) { 991 switch (tolower(*str)) { 992 case 'f': 993 slub_debug |= SLAB_DEBUG_FREE; 994 break; 995 case 'z': 996 slub_debug |= SLAB_RED_ZONE; 997 break; 998 case 'p': 999 slub_debug |= SLAB_POISON; 1000 break; 1001 case 'u': 1002 slub_debug |= SLAB_STORE_USER; 1003 break; 1004 case 't': 1005 slub_debug |= SLAB_TRACE; 1006 break; 1007 default: 1008 printk(KERN_ERR "slub_debug option '%c' " 1009 "unknown. skipped\n", *str); 1010 } 1011 } 1012 1013 check_slabs: 1014 if (*str == ',') 1015 slub_debug_slabs = str + 1; 1016 out: 1017 return 1; 1018 } 1019 1020 __setup("slub_debug", setup_slub_debug); 1021 1022 static unsigned long kmem_cache_flags(unsigned long objsize, 1023 unsigned long flags, const char *name, 1024 void (*ctor)(void *)) 1025 { 1026 /* 1027 * Enable debugging if selected on the kernel commandline. 1028 */ 1029 if (slub_debug && (!slub_debug_slabs || 1030 strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) 1031 flags |= slub_debug; 1032 1033 return flags; 1034 } 1035 #else 1036 static inline void setup_object_debug(struct kmem_cache *s, 1037 struct page *page, void *object) {} 1038 1039 static inline int alloc_debug_processing(struct kmem_cache *s, 1040 struct page *page, void *object, unsigned long addr) { return 0; } 1041 1042 static inline int free_debug_processing(struct kmem_cache *s, 1043 struct page *page, void *object, unsigned long addr) { return 0; } 1044 1045 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1046 { return 1; } 1047 static inline int check_object(struct kmem_cache *s, struct page *page, 1048 void *object, int active) { return 1; } 1049 static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 1050 static inline unsigned long kmem_cache_flags(unsigned long objsize, 1051 unsigned long flags, const char *name, 1052 void (*ctor)(void *)) 1053 { 1054 return flags; 1055 } 1056 #define slub_debug 0 1057 1058 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1059 { return 0; } 1060 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1061 int objects) {} 1062 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1063 int objects) {} 1064 #endif 1065 1066 /* 1067 * Slab allocation and freeing 1068 */ 1069 static inline struct page *alloc_slab_page(gfp_t flags, int node, 1070 struct kmem_cache_order_objects oo) 1071 { 1072 int order = oo_order(oo); 1073 1074 if (node == -1) 1075 return alloc_pages(flags, order); 1076 else 1077 return alloc_pages_node(node, flags, order); 1078 } 1079 1080 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1081 { 1082 struct page *page; 1083 struct kmem_cache_order_objects oo = s->oo; 1084 1085 flags |= s->allocflags; 1086 1087 page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, 1088 oo); 1089 if (unlikely(!page)) { 1090 oo = s->min; 1091 /* 1092 * Allocation may have failed due to fragmentation. 1093 * Try a lower order alloc if possible 1094 */ 1095 page = alloc_slab_page(flags, node, oo); 1096 if (!page) 1097 return NULL; 1098 1099 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); 1100 } 1101 page->objects = oo_objects(oo); 1102 mod_zone_page_state(page_zone(page), 1103 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1104 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1105 1 << oo_order(oo)); 1106 1107 return page; 1108 } 1109 1110 static void setup_object(struct kmem_cache *s, struct page *page, 1111 void *object) 1112 { 1113 setup_object_debug(s, page, object); 1114 if (unlikely(s->ctor)) 1115 s->ctor(object); 1116 } 1117 1118 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1119 { 1120 struct page *page; 1121 void *start; 1122 void *last; 1123 void *p; 1124 1125 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1126 1127 page = allocate_slab(s, 1128 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1129 if (!page) 1130 goto out; 1131 1132 inc_slabs_node(s, page_to_nid(page), page->objects); 1133 page->slab = s; 1134 page->flags |= 1 << PG_slab; 1135 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1136 SLAB_STORE_USER | SLAB_TRACE)) 1137 __SetPageSlubDebug(page); 1138 1139 start = page_address(page); 1140 1141 if (unlikely(s->flags & SLAB_POISON)) 1142 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1143 1144 last = start; 1145 for_each_object(p, s, start, page->objects) { 1146 setup_object(s, page, last); 1147 set_freepointer(s, last, p); 1148 last = p; 1149 } 1150 setup_object(s, page, last); 1151 set_freepointer(s, last, NULL); 1152 1153 page->freelist = start; 1154 page->inuse = 0; 1155 out: 1156 return page; 1157 } 1158 1159 static void __free_slab(struct kmem_cache *s, struct page *page) 1160 { 1161 int order = compound_order(page); 1162 int pages = 1 << order; 1163 1164 if (unlikely(SLABDEBUG && PageSlubDebug(page))) { 1165 void *p; 1166 1167 slab_pad_check(s, page); 1168 for_each_object(p, s, page_address(page), 1169 page->objects) 1170 check_object(s, page, p, 0); 1171 __ClearPageSlubDebug(page); 1172 } 1173 1174 mod_zone_page_state(page_zone(page), 1175 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1176 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1177 -pages); 1178 1179 __ClearPageSlab(page); 1180 reset_page_mapcount(page); 1181 if (current->reclaim_state) 1182 current->reclaim_state->reclaimed_slab += pages; 1183 __free_pages(page, order); 1184 } 1185 1186 static void rcu_free_slab(struct rcu_head *h) 1187 { 1188 struct page *page; 1189 1190 page = container_of((struct list_head *)h, struct page, lru); 1191 __free_slab(page->slab, page); 1192 } 1193 1194 static void free_slab(struct kmem_cache *s, struct page *page) 1195 { 1196 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1197 /* 1198 * RCU free overloads the RCU head over the LRU 1199 */ 1200 struct rcu_head *head = (void *)&page->lru; 1201 1202 call_rcu(head, rcu_free_slab); 1203 } else 1204 __free_slab(s, page); 1205 } 1206 1207 static void discard_slab(struct kmem_cache *s, struct page *page) 1208 { 1209 dec_slabs_node(s, page_to_nid(page), page->objects); 1210 free_slab(s, page); 1211 } 1212 1213 /* 1214 * Per slab locking using the pagelock 1215 */ 1216 static __always_inline void slab_lock(struct page *page) 1217 { 1218 bit_spin_lock(PG_locked, &page->flags); 1219 } 1220 1221 static __always_inline void slab_unlock(struct page *page) 1222 { 1223 __bit_spin_unlock(PG_locked, &page->flags); 1224 } 1225 1226 static __always_inline int slab_trylock(struct page *page) 1227 { 1228 int rc = 1; 1229 1230 rc = bit_spin_trylock(PG_locked, &page->flags); 1231 return rc; 1232 } 1233 1234 /* 1235 * Management of partially allocated slabs 1236 */ 1237 static void add_partial(struct kmem_cache_node *n, 1238 struct page *page, int tail) 1239 { 1240 spin_lock(&n->list_lock); 1241 n->nr_partial++; 1242 if (tail) 1243 list_add_tail(&page->lru, &n->partial); 1244 else 1245 list_add(&page->lru, &n->partial); 1246 spin_unlock(&n->list_lock); 1247 } 1248 1249 static void remove_partial(struct kmem_cache *s, struct page *page) 1250 { 1251 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1252 1253 spin_lock(&n->list_lock); 1254 list_del(&page->lru); 1255 n->nr_partial--; 1256 spin_unlock(&n->list_lock); 1257 } 1258 1259 /* 1260 * Lock slab and remove from the partial list. 1261 * 1262 * Must hold list_lock. 1263 */ 1264 static inline int lock_and_freeze_slab(struct kmem_cache_node *n, 1265 struct page *page) 1266 { 1267 if (slab_trylock(page)) { 1268 list_del(&page->lru); 1269 n->nr_partial--; 1270 __SetPageSlubFrozen(page); 1271 return 1; 1272 } 1273 return 0; 1274 } 1275 1276 /* 1277 * Try to allocate a partial slab from a specific node. 1278 */ 1279 static struct page *get_partial_node(struct kmem_cache_node *n) 1280 { 1281 struct page *page; 1282 1283 /* 1284 * Racy check. If we mistakenly see no partial slabs then we 1285 * just allocate an empty slab. If we mistakenly try to get a 1286 * partial slab and there is none available then get_partials() 1287 * will return NULL. 1288 */ 1289 if (!n || !n->nr_partial) 1290 return NULL; 1291 1292 spin_lock(&n->list_lock); 1293 list_for_each_entry(page, &n->partial, lru) 1294 if (lock_and_freeze_slab(n, page)) 1295 goto out; 1296 page = NULL; 1297 out: 1298 spin_unlock(&n->list_lock); 1299 return page; 1300 } 1301 1302 /* 1303 * Get a page from somewhere. Search in increasing NUMA distances. 1304 */ 1305 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) 1306 { 1307 #ifdef CONFIG_NUMA 1308 struct zonelist *zonelist; 1309 struct zoneref *z; 1310 struct zone *zone; 1311 enum zone_type high_zoneidx = gfp_zone(flags); 1312 struct page *page; 1313 1314 /* 1315 * The defrag ratio allows a configuration of the tradeoffs between 1316 * inter node defragmentation and node local allocations. A lower 1317 * defrag_ratio increases the tendency to do local allocations 1318 * instead of attempting to obtain partial slabs from other nodes. 1319 * 1320 * If the defrag_ratio is set to 0 then kmalloc() always 1321 * returns node local objects. If the ratio is higher then kmalloc() 1322 * may return off node objects because partial slabs are obtained 1323 * from other nodes and filled up. 1324 * 1325 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes 1326 * defrag_ratio = 1000) then every (well almost) allocation will 1327 * first attempt to defrag slab caches on other nodes. This means 1328 * scanning over all nodes to look for partial slabs which may be 1329 * expensive if we do it every time we are trying to find a slab 1330 * with available objects. 1331 */ 1332 if (!s->remote_node_defrag_ratio || 1333 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1334 return NULL; 1335 1336 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1337 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1338 struct kmem_cache_node *n; 1339 1340 n = get_node(s, zone_to_nid(zone)); 1341 1342 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1343 n->nr_partial > s->min_partial) { 1344 page = get_partial_node(n); 1345 if (page) 1346 return page; 1347 } 1348 } 1349 #endif 1350 return NULL; 1351 } 1352 1353 /* 1354 * Get a partial page, lock it and return it. 1355 */ 1356 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1357 { 1358 struct page *page; 1359 int searchnode = (node == -1) ? numa_node_id() : node; 1360 1361 page = get_partial_node(get_node(s, searchnode)); 1362 if (page || (flags & __GFP_THISNODE)) 1363 return page; 1364 1365 return get_any_partial(s, flags); 1366 } 1367 1368 /* 1369 * Move a page back to the lists. 1370 * 1371 * Must be called with the slab lock held. 1372 * 1373 * On exit the slab lock will have been dropped. 1374 */ 1375 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1376 { 1377 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1378 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); 1379 1380 __ClearPageSlubFrozen(page); 1381 if (page->inuse) { 1382 1383 if (page->freelist) { 1384 add_partial(n, page, tail); 1385 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1386 } else { 1387 stat(c, DEACTIVATE_FULL); 1388 if (SLABDEBUG && PageSlubDebug(page) && 1389 (s->flags & SLAB_STORE_USER)) 1390 add_full(n, page); 1391 } 1392 slab_unlock(page); 1393 } else { 1394 stat(c, DEACTIVATE_EMPTY); 1395 if (n->nr_partial < s->min_partial) { 1396 /* 1397 * Adding an empty slab to the partial slabs in order 1398 * to avoid page allocator overhead. This slab needs 1399 * to come after the other slabs with objects in 1400 * so that the others get filled first. That way the 1401 * size of the partial list stays small. 1402 * 1403 * kmem_cache_shrink can reclaim any empty slabs from 1404 * the partial list. 1405 */ 1406 add_partial(n, page, 1); 1407 slab_unlock(page); 1408 } else { 1409 slab_unlock(page); 1410 stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB); 1411 discard_slab(s, page); 1412 } 1413 } 1414 } 1415 1416 /* 1417 * Remove the cpu slab 1418 */ 1419 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1420 { 1421 struct page *page = c->page; 1422 int tail = 1; 1423 1424 if (page->freelist) 1425 stat(c, DEACTIVATE_REMOTE_FREES); 1426 /* 1427 * Merge cpu freelist into slab freelist. Typically we get here 1428 * because both freelists are empty. So this is unlikely 1429 * to occur. 1430 */ 1431 while (unlikely(c->freelist)) { 1432 void **object; 1433 1434 tail = 0; /* Hot objects. Put the slab first */ 1435 1436 /* Retrieve object from cpu_freelist */ 1437 object = c->freelist; 1438 c->freelist = c->freelist[c->offset]; 1439 1440 /* And put onto the regular freelist */ 1441 object[c->offset] = page->freelist; 1442 page->freelist = object; 1443 page->inuse--; 1444 } 1445 c->page = NULL; 1446 unfreeze_slab(s, page, tail); 1447 } 1448 1449 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1450 { 1451 stat(c, CPUSLAB_FLUSH); 1452 slab_lock(c->page); 1453 deactivate_slab(s, c); 1454 } 1455 1456 /* 1457 * Flush cpu slab. 1458 * 1459 * Called from IPI handler with interrupts disabled. 1460 */ 1461 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 1462 { 1463 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 1464 1465 if (likely(c && c->page)) 1466 flush_slab(s, c); 1467 } 1468 1469 static void flush_cpu_slab(void *d) 1470 { 1471 struct kmem_cache *s = d; 1472 1473 __flush_cpu_slab(s, smp_processor_id()); 1474 } 1475 1476 static void flush_all(struct kmem_cache *s) 1477 { 1478 on_each_cpu(flush_cpu_slab, s, 1); 1479 } 1480 1481 /* 1482 * Check if the objects in a per cpu structure fit numa 1483 * locality expectations. 1484 */ 1485 static inline int node_match(struct kmem_cache_cpu *c, int node) 1486 { 1487 #ifdef CONFIG_NUMA 1488 if (node != -1 && c->node != node) 1489 return 0; 1490 #endif 1491 return 1; 1492 } 1493 1494 /* 1495 * Slow path. The lockless freelist is empty or we need to perform 1496 * debugging duties. 1497 * 1498 * Interrupts are disabled. 1499 * 1500 * Processing is still very fast if new objects have been freed to the 1501 * regular freelist. In that case we simply take over the regular freelist 1502 * as the lockless freelist and zap the regular freelist. 1503 * 1504 * If that is not working then we fall back to the partial lists. We take the 1505 * first element of the freelist as the object to allocate now and move the 1506 * rest of the freelist to the lockless freelist. 1507 * 1508 * And if we were unable to get a new slab from the partial slab lists then 1509 * we need to allocate a new slab. This is the slowest path since it involves 1510 * a call to the page allocator and the setup of a new slab. 1511 */ 1512 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 1513 unsigned long addr, struct kmem_cache_cpu *c) 1514 { 1515 void **object; 1516 struct page *new; 1517 1518 /* We handle __GFP_ZERO in the caller */ 1519 gfpflags &= ~__GFP_ZERO; 1520 1521 if (!c->page) 1522 goto new_slab; 1523 1524 slab_lock(c->page); 1525 if (unlikely(!node_match(c, node))) 1526 goto another_slab; 1527 1528 stat(c, ALLOC_REFILL); 1529 1530 load_freelist: 1531 object = c->page->freelist; 1532 if (unlikely(!object)) 1533 goto another_slab; 1534 if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) 1535 goto debug; 1536 1537 c->freelist = object[c->offset]; 1538 c->page->inuse = c->page->objects; 1539 c->page->freelist = NULL; 1540 c->node = page_to_nid(c->page); 1541 unlock_out: 1542 slab_unlock(c->page); 1543 stat(c, ALLOC_SLOWPATH); 1544 return object; 1545 1546 another_slab: 1547 deactivate_slab(s, c); 1548 1549 new_slab: 1550 new = get_partial(s, gfpflags, node); 1551 if (new) { 1552 c->page = new; 1553 stat(c, ALLOC_FROM_PARTIAL); 1554 goto load_freelist; 1555 } 1556 1557 if (gfpflags & __GFP_WAIT) 1558 local_irq_enable(); 1559 1560 new = new_slab(s, gfpflags, node); 1561 1562 if (gfpflags & __GFP_WAIT) 1563 local_irq_disable(); 1564 1565 if (new) { 1566 c = get_cpu_slab(s, smp_processor_id()); 1567 stat(c, ALLOC_SLAB); 1568 if (c->page) 1569 flush_slab(s, c); 1570 slab_lock(new); 1571 __SetPageSlubFrozen(new); 1572 c->page = new; 1573 goto load_freelist; 1574 } 1575 return NULL; 1576 debug: 1577 if (!alloc_debug_processing(s, c->page, object, addr)) 1578 goto another_slab; 1579 1580 c->page->inuse++; 1581 c->page->freelist = object[c->offset]; 1582 c->node = -1; 1583 goto unlock_out; 1584 } 1585 1586 /* 1587 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 1588 * have the fastpath folded into their functions. So no function call 1589 * overhead for requests that can be satisfied on the fastpath. 1590 * 1591 * The fastpath works by first checking if the lockless freelist can be used. 1592 * If not then __slab_alloc is called for slow processing. 1593 * 1594 * Otherwise we can simply pick the next object from the lockless free list. 1595 */ 1596 static __always_inline void *slab_alloc(struct kmem_cache *s, 1597 gfp_t gfpflags, int node, unsigned long addr) 1598 { 1599 void **object; 1600 struct kmem_cache_cpu *c; 1601 unsigned long flags; 1602 unsigned int objsize; 1603 1604 gfpflags &= slab_gfp_mask; 1605 1606 lockdep_trace_alloc(gfpflags); 1607 might_sleep_if(gfpflags & __GFP_WAIT); 1608 1609 if (should_failslab(s->objsize, gfpflags)) 1610 return NULL; 1611 1612 local_irq_save(flags); 1613 c = get_cpu_slab(s, smp_processor_id()); 1614 objsize = c->objsize; 1615 if (unlikely(!c->freelist || !node_match(c, node))) 1616 1617 object = __slab_alloc(s, gfpflags, node, addr, c); 1618 1619 else { 1620 object = c->freelist; 1621 c->freelist = object[c->offset]; 1622 stat(c, ALLOC_FASTPATH); 1623 } 1624 local_irq_restore(flags); 1625 1626 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1627 memset(object, 0, objsize); 1628 1629 kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); 1630 return object; 1631 } 1632 1633 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1634 { 1635 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); 1636 1637 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); 1638 1639 return ret; 1640 } 1641 EXPORT_SYMBOL(kmem_cache_alloc); 1642 1643 #ifdef CONFIG_KMEMTRACE 1644 void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 1645 { 1646 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1647 } 1648 EXPORT_SYMBOL(kmem_cache_alloc_notrace); 1649 #endif 1650 1651 #ifdef CONFIG_NUMA 1652 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1653 { 1654 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 1655 1656 trace_kmem_cache_alloc_node(_RET_IP_, ret, 1657 s->objsize, s->size, gfpflags, node); 1658 1659 return ret; 1660 } 1661 EXPORT_SYMBOL(kmem_cache_alloc_node); 1662 #endif 1663 1664 #ifdef CONFIG_KMEMTRACE 1665 void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 1666 gfp_t gfpflags, 1667 int node) 1668 { 1669 return slab_alloc(s, gfpflags, node, _RET_IP_); 1670 } 1671 EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 1672 #endif 1673 1674 /* 1675 * Slow patch handling. This may still be called frequently since objects 1676 * have a longer lifetime than the cpu slabs in most processing loads. 1677 * 1678 * So we still attempt to reduce cache line usage. Just take the slab 1679 * lock and free the item. If there is no additional partial page 1680 * handling required then we can return immediately. 1681 */ 1682 static void __slab_free(struct kmem_cache *s, struct page *page, 1683 void *x, unsigned long addr, unsigned int offset) 1684 { 1685 void *prior; 1686 void **object = (void *)x; 1687 struct kmem_cache_cpu *c; 1688 1689 c = get_cpu_slab(s, raw_smp_processor_id()); 1690 stat(c, FREE_SLOWPATH); 1691 slab_lock(page); 1692 1693 if (unlikely(SLABDEBUG && PageSlubDebug(page))) 1694 goto debug; 1695 1696 checks_ok: 1697 prior = object[offset] = page->freelist; 1698 page->freelist = object; 1699 page->inuse--; 1700 1701 if (unlikely(PageSlubFrozen(page))) { 1702 stat(c, FREE_FROZEN); 1703 goto out_unlock; 1704 } 1705 1706 if (unlikely(!page->inuse)) 1707 goto slab_empty; 1708 1709 /* 1710 * Objects left in the slab. If it was not on the partial list before 1711 * then add it. 1712 */ 1713 if (unlikely(!prior)) { 1714 add_partial(get_node(s, page_to_nid(page)), page, 1); 1715 stat(c, FREE_ADD_PARTIAL); 1716 } 1717 1718 out_unlock: 1719 slab_unlock(page); 1720 return; 1721 1722 slab_empty: 1723 if (prior) { 1724 /* 1725 * Slab still on the partial list. 1726 */ 1727 remove_partial(s, page); 1728 stat(c, FREE_REMOVE_PARTIAL); 1729 } 1730 slab_unlock(page); 1731 stat(c, FREE_SLAB); 1732 discard_slab(s, page); 1733 return; 1734 1735 debug: 1736 if (!free_debug_processing(s, page, x, addr)) 1737 goto out_unlock; 1738 goto checks_ok; 1739 } 1740 1741 /* 1742 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 1743 * can perform fastpath freeing without additional function calls. 1744 * 1745 * The fastpath is only possible if we are freeing to the current cpu slab 1746 * of this processor. This typically the case if we have just allocated 1747 * the item before. 1748 * 1749 * If fastpath is not possible then fall back to __slab_free where we deal 1750 * with all sorts of special processing. 1751 */ 1752 static __always_inline void slab_free(struct kmem_cache *s, 1753 struct page *page, void *x, unsigned long addr) 1754 { 1755 void **object = (void *)x; 1756 struct kmem_cache_cpu *c; 1757 unsigned long flags; 1758 1759 kmemleak_free_recursive(x, s->flags); 1760 local_irq_save(flags); 1761 c = get_cpu_slab(s, smp_processor_id()); 1762 debug_check_no_locks_freed(object, c->objsize); 1763 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1764 debug_check_no_obj_freed(object, c->objsize); 1765 if (likely(page == c->page && c->node >= 0)) { 1766 object[c->offset] = c->freelist; 1767 c->freelist = object; 1768 stat(c, FREE_FASTPATH); 1769 } else 1770 __slab_free(s, page, x, addr, c->offset); 1771 1772 local_irq_restore(flags); 1773 } 1774 1775 void kmem_cache_free(struct kmem_cache *s, void *x) 1776 { 1777 struct page *page; 1778 1779 page = virt_to_head_page(x); 1780 1781 slab_free(s, page, x, _RET_IP_); 1782 1783 trace_kmem_cache_free(_RET_IP_, x); 1784 } 1785 EXPORT_SYMBOL(kmem_cache_free); 1786 1787 /* Figure out on which slab page the object resides */ 1788 static struct page *get_object_page(const void *x) 1789 { 1790 struct page *page = virt_to_head_page(x); 1791 1792 if (!PageSlab(page)) 1793 return NULL; 1794 1795 return page; 1796 } 1797 1798 /* 1799 * Object placement in a slab is made very easy because we always start at 1800 * offset 0. If we tune the size of the object to the alignment then we can 1801 * get the required alignment by putting one properly sized object after 1802 * another. 1803 * 1804 * Notice that the allocation order determines the sizes of the per cpu 1805 * caches. Each processor has always one slab available for allocations. 1806 * Increasing the allocation order reduces the number of times that slabs 1807 * must be moved on and off the partial lists and is therefore a factor in 1808 * locking overhead. 1809 */ 1810 1811 /* 1812 * Mininum / Maximum order of slab pages. This influences locking overhead 1813 * and slab fragmentation. A higher order reduces the number of partial slabs 1814 * and increases the number of allocations possible without having to 1815 * take the list_lock. 1816 */ 1817 static int slub_min_order; 1818 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 1819 static int slub_min_objects; 1820 1821 /* 1822 * Merge control. If this is set then no merging of slab caches will occur. 1823 * (Could be removed. This was introduced to pacify the merge skeptics.) 1824 */ 1825 static int slub_nomerge; 1826 1827 /* 1828 * Calculate the order of allocation given an slab object size. 1829 * 1830 * The order of allocation has significant impact on performance and other 1831 * system components. Generally order 0 allocations should be preferred since 1832 * order 0 does not cause fragmentation in the page allocator. Larger objects 1833 * be problematic to put into order 0 slabs because there may be too much 1834 * unused space left. We go to a higher order if more than 1/16th of the slab 1835 * would be wasted. 1836 * 1837 * In order to reach satisfactory performance we must ensure that a minimum 1838 * number of objects is in one slab. Otherwise we may generate too much 1839 * activity on the partial lists which requires taking the list_lock. This is 1840 * less a concern for large slabs though which are rarely used. 1841 * 1842 * slub_max_order specifies the order where we begin to stop considering the 1843 * number of objects in a slab as critical. If we reach slub_max_order then 1844 * we try to keep the page order as low as possible. So we accept more waste 1845 * of space in favor of a small page order. 1846 * 1847 * Higher order allocations also allow the placement of more objects in a 1848 * slab and thereby reduce object handling overhead. If the user has 1849 * requested a higher mininum order then we start with that one instead of 1850 * the smallest order which will fit the object. 1851 */ 1852 static inline int slab_order(int size, int min_objects, 1853 int max_order, int fract_leftover) 1854 { 1855 int order; 1856 int rem; 1857 int min_order = slub_min_order; 1858 1859 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) 1860 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 1861 1862 for (order = max(min_order, 1863 fls(min_objects * size - 1) - PAGE_SHIFT); 1864 order <= max_order; order++) { 1865 1866 unsigned long slab_size = PAGE_SIZE << order; 1867 1868 if (slab_size < min_objects * size) 1869 continue; 1870 1871 rem = slab_size % size; 1872 1873 if (rem <= slab_size / fract_leftover) 1874 break; 1875 1876 } 1877 1878 return order; 1879 } 1880 1881 static inline int calculate_order(int size) 1882 { 1883 int order; 1884 int min_objects; 1885 int fraction; 1886 int max_objects; 1887 1888 /* 1889 * Attempt to find best configuration for a slab. This 1890 * works by first attempting to generate a layout with 1891 * the best configuration and backing off gradually. 1892 * 1893 * First we reduce the acceptable waste in a slab. Then 1894 * we reduce the minimum objects required in a slab. 1895 */ 1896 min_objects = slub_min_objects; 1897 if (!min_objects) 1898 min_objects = 4 * (fls(nr_cpu_ids) + 1); 1899 max_objects = (PAGE_SIZE << slub_max_order)/size; 1900 min_objects = min(min_objects, max_objects); 1901 1902 while (min_objects > 1) { 1903 fraction = 16; 1904 while (fraction >= 4) { 1905 order = slab_order(size, min_objects, 1906 slub_max_order, fraction); 1907 if (order <= slub_max_order) 1908 return order; 1909 fraction /= 2; 1910 } 1911 min_objects --; 1912 } 1913 1914 /* 1915 * We were unable to place multiple objects in a slab. Now 1916 * lets see if we can place a single object there. 1917 */ 1918 order = slab_order(size, 1, slub_max_order, 1); 1919 if (order <= slub_max_order) 1920 return order; 1921 1922 /* 1923 * Doh this slab cannot be placed using slub_max_order. 1924 */ 1925 order = slab_order(size, 1, MAX_ORDER, 1); 1926 if (order < MAX_ORDER) 1927 return order; 1928 return -ENOSYS; 1929 } 1930 1931 /* 1932 * Figure out what the alignment of the objects will be. 1933 */ 1934 static unsigned long calculate_alignment(unsigned long flags, 1935 unsigned long align, unsigned long size) 1936 { 1937 /* 1938 * If the user wants hardware cache aligned objects then follow that 1939 * suggestion if the object is sufficiently large. 1940 * 1941 * The hardware cache alignment cannot override the specified 1942 * alignment though. If that is greater then use it. 1943 */ 1944 if (flags & SLAB_HWCACHE_ALIGN) { 1945 unsigned long ralign = cache_line_size(); 1946 while (size <= ralign / 2) 1947 ralign /= 2; 1948 align = max(align, ralign); 1949 } 1950 1951 if (align < ARCH_SLAB_MINALIGN) 1952 align = ARCH_SLAB_MINALIGN; 1953 1954 return ALIGN(align, sizeof(void *)); 1955 } 1956 1957 static void init_kmem_cache_cpu(struct kmem_cache *s, 1958 struct kmem_cache_cpu *c) 1959 { 1960 c->page = NULL; 1961 c->freelist = NULL; 1962 c->node = 0; 1963 c->offset = s->offset / sizeof(void *); 1964 c->objsize = s->objsize; 1965 #ifdef CONFIG_SLUB_STATS 1966 memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned)); 1967 #endif 1968 } 1969 1970 static void 1971 init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 1972 { 1973 n->nr_partial = 0; 1974 spin_lock_init(&n->list_lock); 1975 INIT_LIST_HEAD(&n->partial); 1976 #ifdef CONFIG_SLUB_DEBUG 1977 atomic_long_set(&n->nr_slabs, 0); 1978 atomic_long_set(&n->total_objects, 0); 1979 INIT_LIST_HEAD(&n->full); 1980 #endif 1981 } 1982 1983 #ifdef CONFIG_SMP 1984 /* 1985 * Per cpu array for per cpu structures. 1986 * 1987 * The per cpu array places all kmem_cache_cpu structures from one processor 1988 * close together meaning that it becomes possible that multiple per cpu 1989 * structures are contained in one cacheline. This may be particularly 1990 * beneficial for the kmalloc caches. 1991 * 1992 * A desktop system typically has around 60-80 slabs. With 100 here we are 1993 * likely able to get per cpu structures for all caches from the array defined 1994 * here. We must be able to cover all kmalloc caches during bootstrap. 1995 * 1996 * If the per cpu array is exhausted then fall back to kmalloc 1997 * of individual cachelines. No sharing is possible then. 1998 */ 1999 #define NR_KMEM_CACHE_CPU 100 2000 2001 static DEFINE_PER_CPU(struct kmem_cache_cpu, 2002 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 2003 2004 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 2005 static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); 2006 2007 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 2008 int cpu, gfp_t flags) 2009 { 2010 struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu); 2011 2012 if (c) 2013 per_cpu(kmem_cache_cpu_free, cpu) = 2014 (void *)c->freelist; 2015 else { 2016 /* Table overflow: So allocate ourselves */ 2017 c = kmalloc_node( 2018 ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()), 2019 flags, cpu_to_node(cpu)); 2020 if (!c) 2021 return NULL; 2022 } 2023 2024 init_kmem_cache_cpu(s, c); 2025 return c; 2026 } 2027 2028 static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) 2029 { 2030 if (c < per_cpu(kmem_cache_cpu, cpu) || 2031 c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { 2032 kfree(c); 2033 return; 2034 } 2035 c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu); 2036 per_cpu(kmem_cache_cpu_free, cpu) = c; 2037 } 2038 2039 static void free_kmem_cache_cpus(struct kmem_cache *s) 2040 { 2041 int cpu; 2042 2043 for_each_online_cpu(cpu) { 2044 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 2045 2046 if (c) { 2047 s->cpu_slab[cpu] = NULL; 2048 free_kmem_cache_cpu(c, cpu); 2049 } 2050 } 2051 } 2052 2053 static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) 2054 { 2055 int cpu; 2056 2057 for_each_online_cpu(cpu) { 2058 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 2059 2060 if (c) 2061 continue; 2062 2063 c = alloc_kmem_cache_cpu(s, cpu, flags); 2064 if (!c) { 2065 free_kmem_cache_cpus(s); 2066 return 0; 2067 } 2068 s->cpu_slab[cpu] = c; 2069 } 2070 return 1; 2071 } 2072 2073 /* 2074 * Initialize the per cpu array. 2075 */ 2076 static void init_alloc_cpu_cpu(int cpu) 2077 { 2078 int i; 2079 2080 if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once))) 2081 return; 2082 2083 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2084 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2085 2086 cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)); 2087 } 2088 2089 static void __init init_alloc_cpu(void) 2090 { 2091 int cpu; 2092 2093 for_each_online_cpu(cpu) 2094 init_alloc_cpu_cpu(cpu); 2095 } 2096 2097 #else 2098 static inline void free_kmem_cache_cpus(struct kmem_cache *s) {} 2099 static inline void init_alloc_cpu(void) {} 2100 2101 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) 2102 { 2103 init_kmem_cache_cpu(s, &s->cpu_slab); 2104 return 1; 2105 } 2106 #endif 2107 2108 #ifdef CONFIG_NUMA 2109 /* 2110 * No kmalloc_node yet so do it by hand. We know that this is the first 2111 * slab on the node for this slabcache. There are no concurrent accesses 2112 * possible. 2113 * 2114 * Note that this function only works on the kmalloc_node_cache 2115 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2116 * memory on a fresh node that has no slab structures yet. 2117 */ 2118 static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) 2119 { 2120 struct page *page; 2121 struct kmem_cache_node *n; 2122 unsigned long flags; 2123 2124 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 2125 2126 page = new_slab(kmalloc_caches, gfpflags, node); 2127 2128 BUG_ON(!page); 2129 if (page_to_nid(page) != node) { 2130 printk(KERN_ERR "SLUB: Unable to allocate memory from " 2131 "node %d\n", node); 2132 printk(KERN_ERR "SLUB: Allocating a useless per node structure " 2133 "in order to be able to continue\n"); 2134 } 2135 2136 n = page->freelist; 2137 BUG_ON(!n); 2138 page->freelist = get_freepointer(kmalloc_caches, n); 2139 page->inuse++; 2140 kmalloc_caches->node[node] = n; 2141 #ifdef CONFIG_SLUB_DEBUG 2142 init_object(kmalloc_caches, n, 1); 2143 init_tracking(kmalloc_caches, n); 2144 #endif 2145 init_kmem_cache_node(n, kmalloc_caches); 2146 inc_slabs_node(kmalloc_caches, node, page->objects); 2147 2148 /* 2149 * lockdep requires consistent irq usage for each lock 2150 * so even though there cannot be a race this early in 2151 * the boot sequence, we still disable irqs. 2152 */ 2153 local_irq_save(flags); 2154 add_partial(n, page, 0); 2155 local_irq_restore(flags); 2156 } 2157 2158 static void free_kmem_cache_nodes(struct kmem_cache *s) 2159 { 2160 int node; 2161 2162 for_each_node_state(node, N_NORMAL_MEMORY) { 2163 struct kmem_cache_node *n = s->node[node]; 2164 if (n && n != &s->local_node) 2165 kmem_cache_free(kmalloc_caches, n); 2166 s->node[node] = NULL; 2167 } 2168 } 2169 2170 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2171 { 2172 int node; 2173 int local_node; 2174 2175 if (slab_state >= UP) 2176 local_node = page_to_nid(virt_to_page(s)); 2177 else 2178 local_node = 0; 2179 2180 for_each_node_state(node, N_NORMAL_MEMORY) { 2181 struct kmem_cache_node *n; 2182 2183 if (local_node == node) 2184 n = &s->local_node; 2185 else { 2186 if (slab_state == DOWN) { 2187 early_kmem_cache_node_alloc(gfpflags, node); 2188 continue; 2189 } 2190 n = kmem_cache_alloc_node(kmalloc_caches, 2191 gfpflags, node); 2192 2193 if (!n) { 2194 free_kmem_cache_nodes(s); 2195 return 0; 2196 } 2197 2198 } 2199 s->node[node] = n; 2200 init_kmem_cache_node(n, s); 2201 } 2202 return 1; 2203 } 2204 #else 2205 static void free_kmem_cache_nodes(struct kmem_cache *s) 2206 { 2207 } 2208 2209 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2210 { 2211 init_kmem_cache_node(&s->local_node, s); 2212 return 1; 2213 } 2214 #endif 2215 2216 static void set_min_partial(struct kmem_cache *s, unsigned long min) 2217 { 2218 if (min < MIN_PARTIAL) 2219 min = MIN_PARTIAL; 2220 else if (min > MAX_PARTIAL) 2221 min = MAX_PARTIAL; 2222 s->min_partial = min; 2223 } 2224 2225 /* 2226 * calculate_sizes() determines the order and the distribution of data within 2227 * a slab object. 2228 */ 2229 static int calculate_sizes(struct kmem_cache *s, int forced_order) 2230 { 2231 unsigned long flags = s->flags; 2232 unsigned long size = s->objsize; 2233 unsigned long align = s->align; 2234 int order; 2235 2236 /* 2237 * Round up object size to the next word boundary. We can only 2238 * place the free pointer at word boundaries and this determines 2239 * the possible location of the free pointer. 2240 */ 2241 size = ALIGN(size, sizeof(void *)); 2242 2243 #ifdef CONFIG_SLUB_DEBUG 2244 /* 2245 * Determine if we can poison the object itself. If the user of 2246 * the slab may touch the object after free or before allocation 2247 * then we should never poison the object itself. 2248 */ 2249 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 2250 !s->ctor) 2251 s->flags |= __OBJECT_POISON; 2252 else 2253 s->flags &= ~__OBJECT_POISON; 2254 2255 2256 /* 2257 * If we are Redzoning then check if there is some space between the 2258 * end of the object and the free pointer. If not then add an 2259 * additional word to have some bytes to store Redzone information. 2260 */ 2261 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 2262 size += sizeof(void *); 2263 #endif 2264 2265 /* 2266 * With that we have determined the number of bytes in actual use 2267 * by the object. This is the potential offset to the free pointer. 2268 */ 2269 s->inuse = size; 2270 2271 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 2272 s->ctor)) { 2273 /* 2274 * Relocate free pointer after the object if it is not 2275 * permitted to overwrite the first word of the object on 2276 * kmem_cache_free. 2277 * 2278 * This is the case if we do RCU, have a constructor or 2279 * destructor or are poisoning the objects. 2280 */ 2281 s->offset = size; 2282 size += sizeof(void *); 2283 } 2284 2285 #ifdef CONFIG_SLUB_DEBUG 2286 if (flags & SLAB_STORE_USER) 2287 /* 2288 * Need to store information about allocs and frees after 2289 * the object. 2290 */ 2291 size += 2 * sizeof(struct track); 2292 2293 if (flags & SLAB_RED_ZONE) 2294 /* 2295 * Add some empty padding so that we can catch 2296 * overwrites from earlier objects rather than let 2297 * tracking information or the free pointer be 2298 * corrupted if a user writes before the start 2299 * of the object. 2300 */ 2301 size += sizeof(void *); 2302 #endif 2303 2304 /* 2305 * Determine the alignment based on various parameters that the 2306 * user specified and the dynamic determination of cache line size 2307 * on bootup. 2308 */ 2309 align = calculate_alignment(flags, align, s->objsize); 2310 2311 /* 2312 * SLUB stores one object immediately after another beginning from 2313 * offset 0. In order to align the objects we have to simply size 2314 * each object to conform to the alignment. 2315 */ 2316 size = ALIGN(size, align); 2317 s->size = size; 2318 if (forced_order >= 0) 2319 order = forced_order; 2320 else 2321 order = calculate_order(size); 2322 2323 if (order < 0) 2324 return 0; 2325 2326 s->allocflags = 0; 2327 if (order) 2328 s->allocflags |= __GFP_COMP; 2329 2330 if (s->flags & SLAB_CACHE_DMA) 2331 s->allocflags |= SLUB_DMA; 2332 2333 if (s->flags & SLAB_RECLAIM_ACCOUNT) 2334 s->allocflags |= __GFP_RECLAIMABLE; 2335 2336 /* 2337 * Determine the number of objects per slab 2338 */ 2339 s->oo = oo_make(order, size); 2340 s->min = oo_make(get_order(size), size); 2341 if (oo_objects(s->oo) > oo_objects(s->max)) 2342 s->max = s->oo; 2343 2344 return !!oo_objects(s->oo); 2345 2346 } 2347 2348 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 2349 const char *name, size_t size, 2350 size_t align, unsigned long flags, 2351 void (*ctor)(void *)) 2352 { 2353 memset(s, 0, kmem_size); 2354 s->name = name; 2355 s->ctor = ctor; 2356 s->objsize = size; 2357 s->align = align; 2358 s->flags = kmem_cache_flags(size, flags, name, ctor); 2359 2360 if (!calculate_sizes(s, -1)) 2361 goto error; 2362 2363 /* 2364 * The larger the object size is, the more pages we want on the partial 2365 * list to avoid pounding the page allocator excessively. 2366 */ 2367 set_min_partial(s, ilog2(s->size)); 2368 s->refcount = 1; 2369 #ifdef CONFIG_NUMA 2370 s->remote_node_defrag_ratio = 1000; 2371 #endif 2372 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2373 goto error; 2374 2375 if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) 2376 return 1; 2377 free_kmem_cache_nodes(s); 2378 error: 2379 if (flags & SLAB_PANIC) 2380 panic("Cannot create slab %s size=%lu realsize=%u " 2381 "order=%u offset=%u flags=%lx\n", 2382 s->name, (unsigned long)size, s->size, oo_order(s->oo), 2383 s->offset, flags); 2384 return 0; 2385 } 2386 2387 /* 2388 * Check if a given pointer is valid 2389 */ 2390 int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2391 { 2392 struct page *page; 2393 2394 page = get_object_page(object); 2395 2396 if (!page || s != page->slab) 2397 /* No slab or wrong slab */ 2398 return 0; 2399 2400 if (!check_valid_pointer(s, page, object)) 2401 return 0; 2402 2403 /* 2404 * We could also check if the object is on the slabs freelist. 2405 * But this would be too expensive and it seems that the main 2406 * purpose of kmem_ptr_valid() is to check if the object belongs 2407 * to a certain slab. 2408 */ 2409 return 1; 2410 } 2411 EXPORT_SYMBOL(kmem_ptr_validate); 2412 2413 /* 2414 * Determine the size of a slab object 2415 */ 2416 unsigned int kmem_cache_size(struct kmem_cache *s) 2417 { 2418 return s->objsize; 2419 } 2420 EXPORT_SYMBOL(kmem_cache_size); 2421 2422 const char *kmem_cache_name(struct kmem_cache *s) 2423 { 2424 return s->name; 2425 } 2426 EXPORT_SYMBOL(kmem_cache_name); 2427 2428 static void list_slab_objects(struct kmem_cache *s, struct page *page, 2429 const char *text) 2430 { 2431 #ifdef CONFIG_SLUB_DEBUG 2432 void *addr = page_address(page); 2433 void *p; 2434 DECLARE_BITMAP(map, page->objects); 2435 2436 bitmap_zero(map, page->objects); 2437 slab_err(s, page, "%s", text); 2438 slab_lock(page); 2439 for_each_free_object(p, s, page->freelist) 2440 set_bit(slab_index(p, s, addr), map); 2441 2442 for_each_object(p, s, addr, page->objects) { 2443 2444 if (!test_bit(slab_index(p, s, addr), map)) { 2445 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", 2446 p, p - addr); 2447 print_tracking(s, p); 2448 } 2449 } 2450 slab_unlock(page); 2451 #endif 2452 } 2453 2454 /* 2455 * Attempt to free all partial slabs on a node. 2456 */ 2457 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 2458 { 2459 unsigned long flags; 2460 struct page *page, *h; 2461 2462 spin_lock_irqsave(&n->list_lock, flags); 2463 list_for_each_entry_safe(page, h, &n->partial, lru) { 2464 if (!page->inuse) { 2465 list_del(&page->lru); 2466 discard_slab(s, page); 2467 n->nr_partial--; 2468 } else { 2469 list_slab_objects(s, page, 2470 "Objects remaining on kmem_cache_close()"); 2471 } 2472 } 2473 spin_unlock_irqrestore(&n->list_lock, flags); 2474 } 2475 2476 /* 2477 * Release all resources used by a slab cache. 2478 */ 2479 static inline int kmem_cache_close(struct kmem_cache *s) 2480 { 2481 int node; 2482 2483 flush_all(s); 2484 2485 /* Attempt to free all objects */ 2486 free_kmem_cache_cpus(s); 2487 for_each_node_state(node, N_NORMAL_MEMORY) { 2488 struct kmem_cache_node *n = get_node(s, node); 2489 2490 free_partial(s, n); 2491 if (n->nr_partial || slabs_node(s, node)) 2492 return 1; 2493 } 2494 free_kmem_cache_nodes(s); 2495 return 0; 2496 } 2497 2498 /* 2499 * Close a cache and release the kmem_cache structure 2500 * (must be used for caches created using kmem_cache_create) 2501 */ 2502 void kmem_cache_destroy(struct kmem_cache *s) 2503 { 2504 down_write(&slub_lock); 2505 s->refcount--; 2506 if (!s->refcount) { 2507 list_del(&s->list); 2508 up_write(&slub_lock); 2509 if (kmem_cache_close(s)) { 2510 printk(KERN_ERR "SLUB %s: %s called for cache that " 2511 "still has objects.\n", s->name, __func__); 2512 dump_stack(); 2513 } 2514 sysfs_slab_remove(s); 2515 } else 2516 up_write(&slub_lock); 2517 } 2518 EXPORT_SYMBOL(kmem_cache_destroy); 2519 2520 /******************************************************************** 2521 * Kmalloc subsystem 2522 *******************************************************************/ 2523 2524 struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; 2525 EXPORT_SYMBOL(kmalloc_caches); 2526 2527 static int __init setup_slub_min_order(char *str) 2528 { 2529 get_option(&str, &slub_min_order); 2530 2531 return 1; 2532 } 2533 2534 __setup("slub_min_order=", setup_slub_min_order); 2535 2536 static int __init setup_slub_max_order(char *str) 2537 { 2538 get_option(&str, &slub_max_order); 2539 slub_max_order = min(slub_max_order, MAX_ORDER - 1); 2540 2541 return 1; 2542 } 2543 2544 __setup("slub_max_order=", setup_slub_max_order); 2545 2546 static int __init setup_slub_min_objects(char *str) 2547 { 2548 get_option(&str, &slub_min_objects); 2549 2550 return 1; 2551 } 2552 2553 __setup("slub_min_objects=", setup_slub_min_objects); 2554 2555 static int __init setup_slub_nomerge(char *str) 2556 { 2557 slub_nomerge = 1; 2558 return 1; 2559 } 2560 2561 __setup("slub_nomerge", setup_slub_nomerge); 2562 2563 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, 2564 const char *name, int size, gfp_t gfp_flags) 2565 { 2566 unsigned int flags = 0; 2567 2568 if (gfp_flags & SLUB_DMA) 2569 flags = SLAB_CACHE_DMA; 2570 2571 /* 2572 * This function is called with IRQs disabled during early-boot on 2573 * single CPU so there's no need to take slub_lock here. 2574 */ 2575 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2576 flags, NULL)) 2577 goto panic; 2578 2579 list_add(&s->list, &slab_caches); 2580 2581 if (sysfs_slab_add(s)) 2582 goto panic; 2583 return s; 2584 2585 panic: 2586 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2587 } 2588 2589 #ifdef CONFIG_ZONE_DMA 2590 static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; 2591 2592 static void sysfs_add_func(struct work_struct *w) 2593 { 2594 struct kmem_cache *s; 2595 2596 down_write(&slub_lock); 2597 list_for_each_entry(s, &slab_caches, list) { 2598 if (s->flags & __SYSFS_ADD_DEFERRED) { 2599 s->flags &= ~__SYSFS_ADD_DEFERRED; 2600 sysfs_slab_add(s); 2601 } 2602 } 2603 up_write(&slub_lock); 2604 } 2605 2606 static DECLARE_WORK(sysfs_add_work, sysfs_add_func); 2607 2608 static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) 2609 { 2610 struct kmem_cache *s; 2611 char *text; 2612 size_t realsize; 2613 2614 s = kmalloc_caches_dma[index]; 2615 if (s) 2616 return s; 2617 2618 /* Dynamically create dma cache */ 2619 if (flags & __GFP_WAIT) 2620 down_write(&slub_lock); 2621 else { 2622 if (!down_write_trylock(&slub_lock)) 2623 goto out; 2624 } 2625 2626 if (kmalloc_caches_dma[index]) 2627 goto unlock_out; 2628 2629 realsize = kmalloc_caches[index].objsize; 2630 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", 2631 (unsigned int)realsize); 2632 s = kmalloc(kmem_size, flags & ~SLUB_DMA); 2633 2634 if (!s || !text || !kmem_cache_open(s, flags, text, 2635 realsize, ARCH_KMALLOC_MINALIGN, 2636 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { 2637 kfree(s); 2638 kfree(text); 2639 goto unlock_out; 2640 } 2641 2642 list_add(&s->list, &slab_caches); 2643 kmalloc_caches_dma[index] = s; 2644 2645 schedule_work(&sysfs_add_work); 2646 2647 unlock_out: 2648 up_write(&slub_lock); 2649 out: 2650 return kmalloc_caches_dma[index]; 2651 } 2652 #endif 2653 2654 /* 2655 * Conversion table for small slabs sizes / 8 to the index in the 2656 * kmalloc array. This is necessary for slabs < 192 since we have non power 2657 * of two cache sizes there. The size of larger slabs can be determined using 2658 * fls. 2659 */ 2660 static s8 size_index[24] = { 2661 3, /* 8 */ 2662 4, /* 16 */ 2663 5, /* 24 */ 2664 5, /* 32 */ 2665 6, /* 40 */ 2666 6, /* 48 */ 2667 6, /* 56 */ 2668 6, /* 64 */ 2669 1, /* 72 */ 2670 1, /* 80 */ 2671 1, /* 88 */ 2672 1, /* 96 */ 2673 7, /* 104 */ 2674 7, /* 112 */ 2675 7, /* 120 */ 2676 7, /* 128 */ 2677 2, /* 136 */ 2678 2, /* 144 */ 2679 2, /* 152 */ 2680 2, /* 160 */ 2681 2, /* 168 */ 2682 2, /* 176 */ 2683 2, /* 184 */ 2684 2 /* 192 */ 2685 }; 2686 2687 static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2688 { 2689 int index; 2690 2691 if (size <= 192) { 2692 if (!size) 2693 return ZERO_SIZE_PTR; 2694 2695 index = size_index[(size - 1) / 8]; 2696 } else 2697 index = fls(size - 1); 2698 2699 #ifdef CONFIG_ZONE_DMA 2700 if (unlikely((flags & SLUB_DMA))) 2701 return dma_kmalloc_cache(index, flags); 2702 2703 #endif 2704 return &kmalloc_caches[index]; 2705 } 2706 2707 void *__kmalloc(size_t size, gfp_t flags) 2708 { 2709 struct kmem_cache *s; 2710 void *ret; 2711 2712 if (unlikely(size > SLUB_MAX_SIZE)) 2713 return kmalloc_large(size, flags); 2714 2715 s = get_slab(size, flags); 2716 2717 if (unlikely(ZERO_OR_NULL_PTR(s))) 2718 return s; 2719 2720 ret = slab_alloc(s, flags, -1, _RET_IP_); 2721 2722 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 2723 2724 return ret; 2725 } 2726 EXPORT_SYMBOL(__kmalloc); 2727 2728 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2729 { 2730 struct page *page = alloc_pages_node(node, flags | __GFP_COMP, 2731 get_order(size)); 2732 2733 if (page) 2734 return page_address(page); 2735 else 2736 return NULL; 2737 } 2738 2739 #ifdef CONFIG_NUMA 2740 void *__kmalloc_node(size_t size, gfp_t flags, int node) 2741 { 2742 struct kmem_cache *s; 2743 void *ret; 2744 2745 if (unlikely(size > SLUB_MAX_SIZE)) { 2746 ret = kmalloc_large_node(size, flags, node); 2747 2748 trace_kmalloc_node(_RET_IP_, ret, 2749 size, PAGE_SIZE << get_order(size), 2750 flags, node); 2751 2752 return ret; 2753 } 2754 2755 s = get_slab(size, flags); 2756 2757 if (unlikely(ZERO_OR_NULL_PTR(s))) 2758 return s; 2759 2760 ret = slab_alloc(s, flags, node, _RET_IP_); 2761 2762 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 2763 2764 return ret; 2765 } 2766 EXPORT_SYMBOL(__kmalloc_node); 2767 #endif 2768 2769 size_t ksize(const void *object) 2770 { 2771 struct page *page; 2772 struct kmem_cache *s; 2773 2774 if (unlikely(object == ZERO_SIZE_PTR)) 2775 return 0; 2776 2777 page = virt_to_head_page(object); 2778 2779 if (unlikely(!PageSlab(page))) { 2780 WARN_ON(!PageCompound(page)); 2781 return PAGE_SIZE << compound_order(page); 2782 } 2783 s = page->slab; 2784 2785 #ifdef CONFIG_SLUB_DEBUG 2786 /* 2787 * Debugging requires use of the padding between object 2788 * and whatever may come after it. 2789 */ 2790 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 2791 return s->objsize; 2792 2793 #endif 2794 /* 2795 * If we have the need to store the freelist pointer 2796 * back there or track user information then we can 2797 * only use the space before that information. 2798 */ 2799 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 2800 return s->inuse; 2801 /* 2802 * Else we can use all the padding etc for the allocation 2803 */ 2804 return s->size; 2805 } 2806 EXPORT_SYMBOL(ksize); 2807 2808 void kfree(const void *x) 2809 { 2810 struct page *page; 2811 void *object = (void *)x; 2812 2813 trace_kfree(_RET_IP_, x); 2814 2815 if (unlikely(ZERO_OR_NULL_PTR(x))) 2816 return; 2817 2818 page = virt_to_head_page(x); 2819 if (unlikely(!PageSlab(page))) { 2820 BUG_ON(!PageCompound(page)); 2821 put_page(page); 2822 return; 2823 } 2824 slab_free(page->slab, page, object, _RET_IP_); 2825 } 2826 EXPORT_SYMBOL(kfree); 2827 2828 /* 2829 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2830 * the remaining slabs by the number of items in use. The slabs with the 2831 * most items in use come first. New allocations will then fill those up 2832 * and thus they can be removed from the partial lists. 2833 * 2834 * The slabs with the least items are placed last. This results in them 2835 * being allocated from last increasing the chance that the last objects 2836 * are freed in them. 2837 */ 2838 int kmem_cache_shrink(struct kmem_cache *s) 2839 { 2840 int node; 2841 int i; 2842 struct kmem_cache_node *n; 2843 struct page *page; 2844 struct page *t; 2845 int objects = oo_objects(s->max); 2846 struct list_head *slabs_by_inuse = 2847 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); 2848 unsigned long flags; 2849 2850 if (!slabs_by_inuse) 2851 return -ENOMEM; 2852 2853 flush_all(s); 2854 for_each_node_state(node, N_NORMAL_MEMORY) { 2855 n = get_node(s, node); 2856 2857 if (!n->nr_partial) 2858 continue; 2859 2860 for (i = 0; i < objects; i++) 2861 INIT_LIST_HEAD(slabs_by_inuse + i); 2862 2863 spin_lock_irqsave(&n->list_lock, flags); 2864 2865 /* 2866 * Build lists indexed by the items in use in each slab. 2867 * 2868 * Note that concurrent frees may occur while we hold the 2869 * list_lock. page->inuse here is the upper limit. 2870 */ 2871 list_for_each_entry_safe(page, t, &n->partial, lru) { 2872 if (!page->inuse && slab_trylock(page)) { 2873 /* 2874 * Must hold slab lock here because slab_free 2875 * may have freed the last object and be 2876 * waiting to release the slab. 2877 */ 2878 list_del(&page->lru); 2879 n->nr_partial--; 2880 slab_unlock(page); 2881 discard_slab(s, page); 2882 } else { 2883 list_move(&page->lru, 2884 slabs_by_inuse + page->inuse); 2885 } 2886 } 2887 2888 /* 2889 * Rebuild the partial list with the slabs filled up most 2890 * first and the least used slabs at the end. 2891 */ 2892 for (i = objects - 1; i >= 0; i--) 2893 list_splice(slabs_by_inuse + i, n->partial.prev); 2894 2895 spin_unlock_irqrestore(&n->list_lock, flags); 2896 } 2897 2898 kfree(slabs_by_inuse); 2899 return 0; 2900 } 2901 EXPORT_SYMBOL(kmem_cache_shrink); 2902 2903 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 2904 static int slab_mem_going_offline_callback(void *arg) 2905 { 2906 struct kmem_cache *s; 2907 2908 down_read(&slub_lock); 2909 list_for_each_entry(s, &slab_caches, list) 2910 kmem_cache_shrink(s); 2911 up_read(&slub_lock); 2912 2913 return 0; 2914 } 2915 2916 static void slab_mem_offline_callback(void *arg) 2917 { 2918 struct kmem_cache_node *n; 2919 struct kmem_cache *s; 2920 struct memory_notify *marg = arg; 2921 int offline_node; 2922 2923 offline_node = marg->status_change_nid; 2924 2925 /* 2926 * If the node still has available memory. we need kmem_cache_node 2927 * for it yet. 2928 */ 2929 if (offline_node < 0) 2930 return; 2931 2932 down_read(&slub_lock); 2933 list_for_each_entry(s, &slab_caches, list) { 2934 n = get_node(s, offline_node); 2935 if (n) { 2936 /* 2937 * if n->nr_slabs > 0, slabs still exist on the node 2938 * that is going down. We were unable to free them, 2939 * and offline_pages() function shoudn't call this 2940 * callback. So, we must fail. 2941 */ 2942 BUG_ON(slabs_node(s, offline_node)); 2943 2944 s->node[offline_node] = NULL; 2945 kmem_cache_free(kmalloc_caches, n); 2946 } 2947 } 2948 up_read(&slub_lock); 2949 } 2950 2951 static int slab_mem_going_online_callback(void *arg) 2952 { 2953 struct kmem_cache_node *n; 2954 struct kmem_cache *s; 2955 struct memory_notify *marg = arg; 2956 int nid = marg->status_change_nid; 2957 int ret = 0; 2958 2959 /* 2960 * If the node's memory is already available, then kmem_cache_node is 2961 * already created. Nothing to do. 2962 */ 2963 if (nid < 0) 2964 return 0; 2965 2966 /* 2967 * We are bringing a node online. No memory is available yet. We must 2968 * allocate a kmem_cache_node structure in order to bring the node 2969 * online. 2970 */ 2971 down_read(&slub_lock); 2972 list_for_each_entry(s, &slab_caches, list) { 2973 /* 2974 * XXX: kmem_cache_alloc_node will fallback to other nodes 2975 * since memory is not yet available from the node that 2976 * is brought up. 2977 */ 2978 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL); 2979 if (!n) { 2980 ret = -ENOMEM; 2981 goto out; 2982 } 2983 init_kmem_cache_node(n, s); 2984 s->node[nid] = n; 2985 } 2986 out: 2987 up_read(&slub_lock); 2988 return ret; 2989 } 2990 2991 static int slab_memory_callback(struct notifier_block *self, 2992 unsigned long action, void *arg) 2993 { 2994 int ret = 0; 2995 2996 switch (action) { 2997 case MEM_GOING_ONLINE: 2998 ret = slab_mem_going_online_callback(arg); 2999 break; 3000 case MEM_GOING_OFFLINE: 3001 ret = slab_mem_going_offline_callback(arg); 3002 break; 3003 case MEM_OFFLINE: 3004 case MEM_CANCEL_ONLINE: 3005 slab_mem_offline_callback(arg); 3006 break; 3007 case MEM_ONLINE: 3008 case MEM_CANCEL_OFFLINE: 3009 break; 3010 } 3011 if (ret) 3012 ret = notifier_from_errno(ret); 3013 else 3014 ret = NOTIFY_OK; 3015 return ret; 3016 } 3017 3018 #endif /* CONFIG_MEMORY_HOTPLUG */ 3019 3020 /******************************************************************** 3021 * Basic setup of slabs 3022 *******************************************************************/ 3023 3024 void __init kmem_cache_init(void) 3025 { 3026 int i; 3027 int caches = 0; 3028 3029 init_alloc_cpu(); 3030 3031 #ifdef CONFIG_NUMA 3032 /* 3033 * Must first have the slab cache available for the allocations of the 3034 * struct kmem_cache_node's. There is special bootstrap code in 3035 * kmem_cache_open for slab_state == DOWN. 3036 */ 3037 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", 3038 sizeof(struct kmem_cache_node), GFP_NOWAIT); 3039 kmalloc_caches[0].refcount = -1; 3040 caches++; 3041 3042 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3043 #endif 3044 3045 /* Able to allocate the per node structures */ 3046 slab_state = PARTIAL; 3047 3048 /* Caches that are not of the two-to-the-power-of size */ 3049 if (KMALLOC_MIN_SIZE <= 64) { 3050 create_kmalloc_cache(&kmalloc_caches[1], 3051 "kmalloc-96", 96, GFP_NOWAIT); 3052 caches++; 3053 create_kmalloc_cache(&kmalloc_caches[2], 3054 "kmalloc-192", 192, GFP_NOWAIT); 3055 caches++; 3056 } 3057 3058 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3059 create_kmalloc_cache(&kmalloc_caches[i], 3060 "kmalloc", 1 << i, GFP_NOWAIT); 3061 caches++; 3062 } 3063 3064 3065 /* 3066 * Patch up the size_index table if we have strange large alignment 3067 * requirements for the kmalloc array. This is only the case for 3068 * MIPS it seems. The standard arches will not generate any code here. 3069 * 3070 * Largest permitted alignment is 256 bytes due to the way we 3071 * handle the index determination for the smaller caches. 3072 * 3073 * Make sure that nothing crazy happens if someone starts tinkering 3074 * around with ARCH_KMALLOC_MINALIGN 3075 */ 3076 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3077 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3078 3079 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3080 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3081 3082 if (KMALLOC_MIN_SIZE == 128) { 3083 /* 3084 * The 192 byte sized cache is not used if the alignment 3085 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3086 * instead. 3087 */ 3088 for (i = 128 + 8; i <= 192; i += 8) 3089 size_index[(i - 1) / 8] = 8; 3090 } 3091 3092 slab_state = UP; 3093 3094 /* Provide the correct kmalloc names now that the caches are up */ 3095 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) 3096 kmalloc_caches[i]. name = 3097 kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); 3098 3099 #ifdef CONFIG_SMP 3100 register_cpu_notifier(&slab_notifier); 3101 kmem_size = offsetof(struct kmem_cache, cpu_slab) + 3102 nr_cpu_ids * sizeof(struct kmem_cache_cpu *); 3103 #else 3104 kmem_size = sizeof(struct kmem_cache); 3105 #endif 3106 3107 printk(KERN_INFO 3108 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3109 " CPUs=%d, Nodes=%d\n", 3110 caches, cache_line_size(), 3111 slub_min_order, slub_max_order, slub_min_objects, 3112 nr_cpu_ids, nr_node_ids); 3113 } 3114 3115 void __init kmem_cache_init_late(void) 3116 { 3117 /* 3118 * Interrupts are enabled now so all GFP allocations are safe. 3119 */ 3120 slab_gfp_mask = __GFP_BITS_MASK; 3121 } 3122 3123 /* 3124 * Find a mergeable slab cache 3125 */ 3126 static int slab_unmergeable(struct kmem_cache *s) 3127 { 3128 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3129 return 1; 3130 3131 if (s->ctor) 3132 return 1; 3133 3134 /* 3135 * We may have set a slab to be unmergeable during bootstrap. 3136 */ 3137 if (s->refcount < 0) 3138 return 1; 3139 3140 return 0; 3141 } 3142 3143 static struct kmem_cache *find_mergeable(size_t size, 3144 size_t align, unsigned long flags, const char *name, 3145 void (*ctor)(void *)) 3146 { 3147 struct kmem_cache *s; 3148 3149 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 3150 return NULL; 3151 3152 if (ctor) 3153 return NULL; 3154 3155 size = ALIGN(size, sizeof(void *)); 3156 align = calculate_alignment(flags, align, size); 3157 size = ALIGN(size, align); 3158 flags = kmem_cache_flags(size, flags, name, NULL); 3159 3160 list_for_each_entry(s, &slab_caches, list) { 3161 if (slab_unmergeable(s)) 3162 continue; 3163 3164 if (size > s->size) 3165 continue; 3166 3167 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) 3168 continue; 3169 /* 3170 * Check if alignment is compatible. 3171 * Courtesy of Adrian Drzewiecki 3172 */ 3173 if ((s->size & ~(align - 1)) != s->size) 3174 continue; 3175 3176 if (s->size - size >= sizeof(void *)) 3177 continue; 3178 3179 return s; 3180 } 3181 return NULL; 3182 } 3183 3184 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 3185 size_t align, unsigned long flags, void (*ctor)(void *)) 3186 { 3187 struct kmem_cache *s; 3188 3189 down_write(&slub_lock); 3190 s = find_mergeable(size, align, flags, name, ctor); 3191 if (s) { 3192 int cpu; 3193 3194 s->refcount++; 3195 /* 3196 * Adjust the object sizes so that we clear 3197 * the complete object on kzalloc. 3198 */ 3199 s->objsize = max(s->objsize, (int)size); 3200 3201 /* 3202 * And then we need to update the object size in the 3203 * per cpu structures 3204 */ 3205 for_each_online_cpu(cpu) 3206 get_cpu_slab(s, cpu)->objsize = s->objsize; 3207 3208 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3209 up_write(&slub_lock); 3210 3211 if (sysfs_slab_alias(s, name)) { 3212 down_write(&slub_lock); 3213 s->refcount--; 3214 up_write(&slub_lock); 3215 goto err; 3216 } 3217 return s; 3218 } 3219 3220 s = kmalloc(kmem_size, GFP_KERNEL); 3221 if (s) { 3222 if (kmem_cache_open(s, GFP_KERNEL, name, 3223 size, align, flags, ctor)) { 3224 list_add(&s->list, &slab_caches); 3225 up_write(&slub_lock); 3226 if (sysfs_slab_add(s)) { 3227 down_write(&slub_lock); 3228 list_del(&s->list); 3229 up_write(&slub_lock); 3230 kfree(s); 3231 goto err; 3232 } 3233 return s; 3234 } 3235 kfree(s); 3236 } 3237 up_write(&slub_lock); 3238 3239 err: 3240 if (flags & SLAB_PANIC) 3241 panic("Cannot create slabcache %s\n", name); 3242 else 3243 s = NULL; 3244 return s; 3245 } 3246 EXPORT_SYMBOL(kmem_cache_create); 3247 3248 #ifdef CONFIG_SMP 3249 /* 3250 * Use the cpu notifier to insure that the cpu slabs are flushed when 3251 * necessary. 3252 */ 3253 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3254 unsigned long action, void *hcpu) 3255 { 3256 long cpu = (long)hcpu; 3257 struct kmem_cache *s; 3258 unsigned long flags; 3259 3260 switch (action) { 3261 case CPU_UP_PREPARE: 3262 case CPU_UP_PREPARE_FROZEN: 3263 init_alloc_cpu_cpu(cpu); 3264 down_read(&slub_lock); 3265 list_for_each_entry(s, &slab_caches, list) 3266 s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu, 3267 GFP_KERNEL); 3268 up_read(&slub_lock); 3269 break; 3270 3271 case CPU_UP_CANCELED: 3272 case CPU_UP_CANCELED_FROZEN: 3273 case CPU_DEAD: 3274 case CPU_DEAD_FROZEN: 3275 down_read(&slub_lock); 3276 list_for_each_entry(s, &slab_caches, list) { 3277 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3278 3279 local_irq_save(flags); 3280 __flush_cpu_slab(s, cpu); 3281 local_irq_restore(flags); 3282 free_kmem_cache_cpu(c, cpu); 3283 s->cpu_slab[cpu] = NULL; 3284 } 3285 up_read(&slub_lock); 3286 break; 3287 default: 3288 break; 3289 } 3290 return NOTIFY_OK; 3291 } 3292 3293 static struct notifier_block __cpuinitdata slab_notifier = { 3294 .notifier_call = slab_cpuup_callback 3295 }; 3296 3297 #endif 3298 3299 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3300 { 3301 struct kmem_cache *s; 3302 void *ret; 3303 3304 if (unlikely(size > SLUB_MAX_SIZE)) 3305 return kmalloc_large(size, gfpflags); 3306 3307 s = get_slab(size, gfpflags); 3308 3309 if (unlikely(ZERO_OR_NULL_PTR(s))) 3310 return s; 3311 3312 ret = slab_alloc(s, gfpflags, -1, caller); 3313 3314 /* Honor the call site pointer we recieved. */ 3315 trace_kmalloc(caller, ret, size, s->size, gfpflags); 3316 3317 return ret; 3318 } 3319 3320 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3321 int node, unsigned long caller) 3322 { 3323 struct kmem_cache *s; 3324 void *ret; 3325 3326 if (unlikely(size > SLUB_MAX_SIZE)) 3327 return kmalloc_large_node(size, gfpflags, node); 3328 3329 s = get_slab(size, gfpflags); 3330 3331 if (unlikely(ZERO_OR_NULL_PTR(s))) 3332 return s; 3333 3334 ret = slab_alloc(s, gfpflags, node, caller); 3335 3336 /* Honor the call site pointer we recieved. */ 3337 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 3338 3339 return ret; 3340 } 3341 3342 #ifdef CONFIG_SLUB_DEBUG 3343 static unsigned long count_partial(struct kmem_cache_node *n, 3344 int (*get_count)(struct page *)) 3345 { 3346 unsigned long flags; 3347 unsigned long x = 0; 3348 struct page *page; 3349 3350 spin_lock_irqsave(&n->list_lock, flags); 3351 list_for_each_entry(page, &n->partial, lru) 3352 x += get_count(page); 3353 spin_unlock_irqrestore(&n->list_lock, flags); 3354 return x; 3355 } 3356 3357 static int count_inuse(struct page *page) 3358 { 3359 return page->inuse; 3360 } 3361 3362 static int count_total(struct page *page) 3363 { 3364 return page->objects; 3365 } 3366 3367 static int count_free(struct page *page) 3368 { 3369 return page->objects - page->inuse; 3370 } 3371 3372 static int validate_slab(struct kmem_cache *s, struct page *page, 3373 unsigned long *map) 3374 { 3375 void *p; 3376 void *addr = page_address(page); 3377 3378 if (!check_slab(s, page) || 3379 !on_freelist(s, page, NULL)) 3380 return 0; 3381 3382 /* Now we know that a valid freelist exists */ 3383 bitmap_zero(map, page->objects); 3384 3385 for_each_free_object(p, s, page->freelist) { 3386 set_bit(slab_index(p, s, addr), map); 3387 if (!check_object(s, page, p, 0)) 3388 return 0; 3389 } 3390 3391 for_each_object(p, s, addr, page->objects) 3392 if (!test_bit(slab_index(p, s, addr), map)) 3393 if (!check_object(s, page, p, 1)) 3394 return 0; 3395 return 1; 3396 } 3397 3398 static void validate_slab_slab(struct kmem_cache *s, struct page *page, 3399 unsigned long *map) 3400 { 3401 if (slab_trylock(page)) { 3402 validate_slab(s, page, map); 3403 slab_unlock(page); 3404 } else 3405 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3406 s->name, page); 3407 3408 if (s->flags & DEBUG_DEFAULT_FLAGS) { 3409 if (!PageSlubDebug(page)) 3410 printk(KERN_ERR "SLUB %s: SlubDebug not set " 3411 "on slab 0x%p\n", s->name, page); 3412 } else { 3413 if (PageSlubDebug(page)) 3414 printk(KERN_ERR "SLUB %s: SlubDebug set on " 3415 "slab 0x%p\n", s->name, page); 3416 } 3417 } 3418 3419 static int validate_slab_node(struct kmem_cache *s, 3420 struct kmem_cache_node *n, unsigned long *map) 3421 { 3422 unsigned long count = 0; 3423 struct page *page; 3424 unsigned long flags; 3425 3426 spin_lock_irqsave(&n->list_lock, flags); 3427 3428 list_for_each_entry(page, &n->partial, lru) { 3429 validate_slab_slab(s, page, map); 3430 count++; 3431 } 3432 if (count != n->nr_partial) 3433 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " 3434 "counter=%ld\n", s->name, count, n->nr_partial); 3435 3436 if (!(s->flags & SLAB_STORE_USER)) 3437 goto out; 3438 3439 list_for_each_entry(page, &n->full, lru) { 3440 validate_slab_slab(s, page, map); 3441 count++; 3442 } 3443 if (count != atomic_long_read(&n->nr_slabs)) 3444 printk(KERN_ERR "SLUB: %s %ld slabs counted but " 3445 "counter=%ld\n", s->name, count, 3446 atomic_long_read(&n->nr_slabs)); 3447 3448 out: 3449 spin_unlock_irqrestore(&n->list_lock, flags); 3450 return count; 3451 } 3452 3453 static long validate_slab_cache(struct kmem_cache *s) 3454 { 3455 int node; 3456 unsigned long count = 0; 3457 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3458 sizeof(unsigned long), GFP_KERNEL); 3459 3460 if (!map) 3461 return -ENOMEM; 3462 3463 flush_all(s); 3464 for_each_node_state(node, N_NORMAL_MEMORY) { 3465 struct kmem_cache_node *n = get_node(s, node); 3466 3467 count += validate_slab_node(s, n, map); 3468 } 3469 kfree(map); 3470 return count; 3471 } 3472 3473 #ifdef SLUB_RESILIENCY_TEST 3474 static void resiliency_test(void) 3475 { 3476 u8 *p; 3477 3478 printk(KERN_ERR "SLUB resiliency testing\n"); 3479 printk(KERN_ERR "-----------------------\n"); 3480 printk(KERN_ERR "A. Corruption after allocation\n"); 3481 3482 p = kzalloc(16, GFP_KERNEL); 3483 p[16] = 0x12; 3484 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" 3485 " 0x12->0x%p\n\n", p + 16); 3486 3487 validate_slab_cache(kmalloc_caches + 4); 3488 3489 /* Hmmm... The next two are dangerous */ 3490 p = kzalloc(32, GFP_KERNEL); 3491 p[32 + sizeof(void *)] = 0x34; 3492 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" 3493 " 0x34 -> -0x%p\n", p); 3494 printk(KERN_ERR 3495 "If allocated object is overwritten then not detectable\n\n"); 3496 3497 validate_slab_cache(kmalloc_caches + 5); 3498 p = kzalloc(64, GFP_KERNEL); 3499 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 3500 *p = 0x56; 3501 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 3502 p); 3503 printk(KERN_ERR 3504 "If allocated object is overwritten then not detectable\n\n"); 3505 validate_slab_cache(kmalloc_caches + 6); 3506 3507 printk(KERN_ERR "\nB. Corruption after free\n"); 3508 p = kzalloc(128, GFP_KERNEL); 3509 kfree(p); 3510 *p = 0x78; 3511 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 3512 validate_slab_cache(kmalloc_caches + 7); 3513 3514 p = kzalloc(256, GFP_KERNEL); 3515 kfree(p); 3516 p[50] = 0x9a; 3517 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", 3518 p); 3519 validate_slab_cache(kmalloc_caches + 8); 3520 3521 p = kzalloc(512, GFP_KERNEL); 3522 kfree(p); 3523 p[512] = 0xab; 3524 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 3525 validate_slab_cache(kmalloc_caches + 9); 3526 } 3527 #else 3528 static void resiliency_test(void) {}; 3529 #endif 3530 3531 /* 3532 * Generate lists of code addresses where slabcache objects are allocated 3533 * and freed. 3534 */ 3535 3536 struct location { 3537 unsigned long count; 3538 unsigned long addr; 3539 long long sum_time; 3540 long min_time; 3541 long max_time; 3542 long min_pid; 3543 long max_pid; 3544 DECLARE_BITMAP(cpus, NR_CPUS); 3545 nodemask_t nodes; 3546 }; 3547 3548 struct loc_track { 3549 unsigned long max; 3550 unsigned long count; 3551 struct location *loc; 3552 }; 3553 3554 static void free_loc_track(struct loc_track *t) 3555 { 3556 if (t->max) 3557 free_pages((unsigned long)t->loc, 3558 get_order(sizeof(struct location) * t->max)); 3559 } 3560 3561 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 3562 { 3563 struct location *l; 3564 int order; 3565 3566 order = get_order(sizeof(struct location) * max); 3567 3568 l = (void *)__get_free_pages(flags, order); 3569 if (!l) 3570 return 0; 3571 3572 if (t->count) { 3573 memcpy(l, t->loc, sizeof(struct location) * t->count); 3574 free_loc_track(t); 3575 } 3576 t->max = max; 3577 t->loc = l; 3578 return 1; 3579 } 3580 3581 static int add_location(struct loc_track *t, struct kmem_cache *s, 3582 const struct track *track) 3583 { 3584 long start, end, pos; 3585 struct location *l; 3586 unsigned long caddr; 3587 unsigned long age = jiffies - track->when; 3588 3589 start = -1; 3590 end = t->count; 3591 3592 for ( ; ; ) { 3593 pos = start + (end - start + 1) / 2; 3594 3595 /* 3596 * There is nothing at "end". If we end up there 3597 * we need to add something to before end. 3598 */ 3599 if (pos == end) 3600 break; 3601 3602 caddr = t->loc[pos].addr; 3603 if (track->addr == caddr) { 3604 3605 l = &t->loc[pos]; 3606 l->count++; 3607 if (track->when) { 3608 l->sum_time += age; 3609 if (age < l->min_time) 3610 l->min_time = age; 3611 if (age > l->max_time) 3612 l->max_time = age; 3613 3614 if (track->pid < l->min_pid) 3615 l->min_pid = track->pid; 3616 if (track->pid > l->max_pid) 3617 l->max_pid = track->pid; 3618 3619 cpumask_set_cpu(track->cpu, 3620 to_cpumask(l->cpus)); 3621 } 3622 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3623 return 1; 3624 } 3625 3626 if (track->addr < caddr) 3627 end = pos; 3628 else 3629 start = pos; 3630 } 3631 3632 /* 3633 * Not found. Insert new tracking element. 3634 */ 3635 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 3636 return 0; 3637 3638 l = t->loc + pos; 3639 if (pos < t->count) 3640 memmove(l + 1, l, 3641 (t->count - pos) * sizeof(struct location)); 3642 t->count++; 3643 l->count = 1; 3644 l->addr = track->addr; 3645 l->sum_time = age; 3646 l->min_time = age; 3647 l->max_time = age; 3648 l->min_pid = track->pid; 3649 l->max_pid = track->pid; 3650 cpumask_clear(to_cpumask(l->cpus)); 3651 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 3652 nodes_clear(l->nodes); 3653 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3654 return 1; 3655 } 3656 3657 static void process_slab(struct loc_track *t, struct kmem_cache *s, 3658 struct page *page, enum track_item alloc) 3659 { 3660 void *addr = page_address(page); 3661 DECLARE_BITMAP(map, page->objects); 3662 void *p; 3663 3664 bitmap_zero(map, page->objects); 3665 for_each_free_object(p, s, page->freelist) 3666 set_bit(slab_index(p, s, addr), map); 3667 3668 for_each_object(p, s, addr, page->objects) 3669 if (!test_bit(slab_index(p, s, addr), map)) 3670 add_location(t, s, get_track(s, p, alloc)); 3671 } 3672 3673 static int list_locations(struct kmem_cache *s, char *buf, 3674 enum track_item alloc) 3675 { 3676 int len = 0; 3677 unsigned long i; 3678 struct loc_track t = { 0, 0, NULL }; 3679 int node; 3680 3681 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 3682 GFP_TEMPORARY)) 3683 return sprintf(buf, "Out of memory\n"); 3684 3685 /* Push back cpu slabs */ 3686 flush_all(s); 3687 3688 for_each_node_state(node, N_NORMAL_MEMORY) { 3689 struct kmem_cache_node *n = get_node(s, node); 3690 unsigned long flags; 3691 struct page *page; 3692 3693 if (!atomic_long_read(&n->nr_slabs)) 3694 continue; 3695 3696 spin_lock_irqsave(&n->list_lock, flags); 3697 list_for_each_entry(page, &n->partial, lru) 3698 process_slab(&t, s, page, alloc); 3699 list_for_each_entry(page, &n->full, lru) 3700 process_slab(&t, s, page, alloc); 3701 spin_unlock_irqrestore(&n->list_lock, flags); 3702 } 3703 3704 for (i = 0; i < t.count; i++) { 3705 struct location *l = &t.loc[i]; 3706 3707 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) 3708 break; 3709 len += sprintf(buf + len, "%7ld ", l->count); 3710 3711 if (l->addr) 3712 len += sprint_symbol(buf + len, (unsigned long)l->addr); 3713 else 3714 len += sprintf(buf + len, "<not-available>"); 3715 3716 if (l->sum_time != l->min_time) { 3717 len += sprintf(buf + len, " age=%ld/%ld/%ld", 3718 l->min_time, 3719 (long)div_u64(l->sum_time, l->count), 3720 l->max_time); 3721 } else 3722 len += sprintf(buf + len, " age=%ld", 3723 l->min_time); 3724 3725 if (l->min_pid != l->max_pid) 3726 len += sprintf(buf + len, " pid=%ld-%ld", 3727 l->min_pid, l->max_pid); 3728 else 3729 len += sprintf(buf + len, " pid=%ld", 3730 l->min_pid); 3731 3732 if (num_online_cpus() > 1 && 3733 !cpumask_empty(to_cpumask(l->cpus)) && 3734 len < PAGE_SIZE - 60) { 3735 len += sprintf(buf + len, " cpus="); 3736 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3737 to_cpumask(l->cpus)); 3738 } 3739 3740 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3741 len < PAGE_SIZE - 60) { 3742 len += sprintf(buf + len, " nodes="); 3743 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3744 l->nodes); 3745 } 3746 3747 len += sprintf(buf + len, "\n"); 3748 } 3749 3750 free_loc_track(&t); 3751 if (!t.count) 3752 len += sprintf(buf, "No data\n"); 3753 return len; 3754 } 3755 3756 enum slab_stat_type { 3757 SL_ALL, /* All slabs */ 3758 SL_PARTIAL, /* Only partially allocated slabs */ 3759 SL_CPU, /* Only slabs used for cpu caches */ 3760 SL_OBJECTS, /* Determine allocated objects not slabs */ 3761 SL_TOTAL /* Determine object capacity not slabs */ 3762 }; 3763 3764 #define SO_ALL (1 << SL_ALL) 3765 #define SO_PARTIAL (1 << SL_PARTIAL) 3766 #define SO_CPU (1 << SL_CPU) 3767 #define SO_OBJECTS (1 << SL_OBJECTS) 3768 #define SO_TOTAL (1 << SL_TOTAL) 3769 3770 static ssize_t show_slab_objects(struct kmem_cache *s, 3771 char *buf, unsigned long flags) 3772 { 3773 unsigned long total = 0; 3774 int node; 3775 int x; 3776 unsigned long *nodes; 3777 unsigned long *per_cpu; 3778 3779 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 3780 if (!nodes) 3781 return -ENOMEM; 3782 per_cpu = nodes + nr_node_ids; 3783 3784 if (flags & SO_CPU) { 3785 int cpu; 3786 3787 for_each_possible_cpu(cpu) { 3788 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3789 3790 if (!c || c->node < 0) 3791 continue; 3792 3793 if (c->page) { 3794 if (flags & SO_TOTAL) 3795 x = c->page->objects; 3796 else if (flags & SO_OBJECTS) 3797 x = c->page->inuse; 3798 else 3799 x = 1; 3800 3801 total += x; 3802 nodes[c->node] += x; 3803 } 3804 per_cpu[c->node]++; 3805 } 3806 } 3807 3808 if (flags & SO_ALL) { 3809 for_each_node_state(node, N_NORMAL_MEMORY) { 3810 struct kmem_cache_node *n = get_node(s, node); 3811 3812 if (flags & SO_TOTAL) 3813 x = atomic_long_read(&n->total_objects); 3814 else if (flags & SO_OBJECTS) 3815 x = atomic_long_read(&n->total_objects) - 3816 count_partial(n, count_free); 3817 3818 else 3819 x = atomic_long_read(&n->nr_slabs); 3820 total += x; 3821 nodes[node] += x; 3822 } 3823 3824 } else if (flags & SO_PARTIAL) { 3825 for_each_node_state(node, N_NORMAL_MEMORY) { 3826 struct kmem_cache_node *n = get_node(s, node); 3827 3828 if (flags & SO_TOTAL) 3829 x = count_partial(n, count_total); 3830 else if (flags & SO_OBJECTS) 3831 x = count_partial(n, count_inuse); 3832 else 3833 x = n->nr_partial; 3834 total += x; 3835 nodes[node] += x; 3836 } 3837 } 3838 x = sprintf(buf, "%lu", total); 3839 #ifdef CONFIG_NUMA 3840 for_each_node_state(node, N_NORMAL_MEMORY) 3841 if (nodes[node]) 3842 x += sprintf(buf + x, " N%d=%lu", 3843 node, nodes[node]); 3844 #endif 3845 kfree(nodes); 3846 return x + sprintf(buf + x, "\n"); 3847 } 3848 3849 static int any_slab_objects(struct kmem_cache *s) 3850 { 3851 int node; 3852 3853 for_each_online_node(node) { 3854 struct kmem_cache_node *n = get_node(s, node); 3855 3856 if (!n) 3857 continue; 3858 3859 if (atomic_long_read(&n->total_objects)) 3860 return 1; 3861 } 3862 return 0; 3863 } 3864 3865 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 3866 #define to_slab(n) container_of(n, struct kmem_cache, kobj); 3867 3868 struct slab_attribute { 3869 struct attribute attr; 3870 ssize_t (*show)(struct kmem_cache *s, char *buf); 3871 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 3872 }; 3873 3874 #define SLAB_ATTR_RO(_name) \ 3875 static struct slab_attribute _name##_attr = __ATTR_RO(_name) 3876 3877 #define SLAB_ATTR(_name) \ 3878 static struct slab_attribute _name##_attr = \ 3879 __ATTR(_name, 0644, _name##_show, _name##_store) 3880 3881 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 3882 { 3883 return sprintf(buf, "%d\n", s->size); 3884 } 3885 SLAB_ATTR_RO(slab_size); 3886 3887 static ssize_t align_show(struct kmem_cache *s, char *buf) 3888 { 3889 return sprintf(buf, "%d\n", s->align); 3890 } 3891 SLAB_ATTR_RO(align); 3892 3893 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 3894 { 3895 return sprintf(buf, "%d\n", s->objsize); 3896 } 3897 SLAB_ATTR_RO(object_size); 3898 3899 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 3900 { 3901 return sprintf(buf, "%d\n", oo_objects(s->oo)); 3902 } 3903 SLAB_ATTR_RO(objs_per_slab); 3904 3905 static ssize_t order_store(struct kmem_cache *s, 3906 const char *buf, size_t length) 3907 { 3908 unsigned long order; 3909 int err; 3910 3911 err = strict_strtoul(buf, 10, &order); 3912 if (err) 3913 return err; 3914 3915 if (order > slub_max_order || order < slub_min_order) 3916 return -EINVAL; 3917 3918 calculate_sizes(s, order); 3919 return length; 3920 } 3921 3922 static ssize_t order_show(struct kmem_cache *s, char *buf) 3923 { 3924 return sprintf(buf, "%d\n", oo_order(s->oo)); 3925 } 3926 SLAB_ATTR(order); 3927 3928 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 3929 { 3930 return sprintf(buf, "%lu\n", s->min_partial); 3931 } 3932 3933 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 3934 size_t length) 3935 { 3936 unsigned long min; 3937 int err; 3938 3939 err = strict_strtoul(buf, 10, &min); 3940 if (err) 3941 return err; 3942 3943 set_min_partial(s, min); 3944 return length; 3945 } 3946 SLAB_ATTR(min_partial); 3947 3948 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3949 { 3950 if (s->ctor) { 3951 int n = sprint_symbol(buf, (unsigned long)s->ctor); 3952 3953 return n + sprintf(buf + n, "\n"); 3954 } 3955 return 0; 3956 } 3957 SLAB_ATTR_RO(ctor); 3958 3959 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3960 { 3961 return sprintf(buf, "%d\n", s->refcount - 1); 3962 } 3963 SLAB_ATTR_RO(aliases); 3964 3965 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 3966 { 3967 return show_slab_objects(s, buf, SO_ALL); 3968 } 3969 SLAB_ATTR_RO(slabs); 3970 3971 static ssize_t partial_show(struct kmem_cache *s, char *buf) 3972 { 3973 return show_slab_objects(s, buf, SO_PARTIAL); 3974 } 3975 SLAB_ATTR_RO(partial); 3976 3977 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 3978 { 3979 return show_slab_objects(s, buf, SO_CPU); 3980 } 3981 SLAB_ATTR_RO(cpu_slabs); 3982 3983 static ssize_t objects_show(struct kmem_cache *s, char *buf) 3984 { 3985 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 3986 } 3987 SLAB_ATTR_RO(objects); 3988 3989 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 3990 { 3991 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 3992 } 3993 SLAB_ATTR_RO(objects_partial); 3994 3995 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 3996 { 3997 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 3998 } 3999 SLAB_ATTR_RO(total_objects); 4000 4001 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 4002 { 4003 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 4004 } 4005 4006 static ssize_t sanity_checks_store(struct kmem_cache *s, 4007 const char *buf, size_t length) 4008 { 4009 s->flags &= ~SLAB_DEBUG_FREE; 4010 if (buf[0] == '1') 4011 s->flags |= SLAB_DEBUG_FREE; 4012 return length; 4013 } 4014 SLAB_ATTR(sanity_checks); 4015 4016 static ssize_t trace_show(struct kmem_cache *s, char *buf) 4017 { 4018 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 4019 } 4020 4021 static ssize_t trace_store(struct kmem_cache *s, const char *buf, 4022 size_t length) 4023 { 4024 s->flags &= ~SLAB_TRACE; 4025 if (buf[0] == '1') 4026 s->flags |= SLAB_TRACE; 4027 return length; 4028 } 4029 SLAB_ATTR(trace); 4030 4031 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 4032 { 4033 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 4034 } 4035 4036 static ssize_t reclaim_account_store(struct kmem_cache *s, 4037 const char *buf, size_t length) 4038 { 4039 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 4040 if (buf[0] == '1') 4041 s->flags |= SLAB_RECLAIM_ACCOUNT; 4042 return length; 4043 } 4044 SLAB_ATTR(reclaim_account); 4045 4046 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 4047 { 4048 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 4049 } 4050 SLAB_ATTR_RO(hwcache_align); 4051 4052 #ifdef CONFIG_ZONE_DMA 4053 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 4054 { 4055 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 4056 } 4057 SLAB_ATTR_RO(cache_dma); 4058 #endif 4059 4060 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 4061 { 4062 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 4063 } 4064 SLAB_ATTR_RO(destroy_by_rcu); 4065 4066 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 4067 { 4068 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 4069 } 4070 4071 static ssize_t red_zone_store(struct kmem_cache *s, 4072 const char *buf, size_t length) 4073 { 4074 if (any_slab_objects(s)) 4075 return -EBUSY; 4076 4077 s->flags &= ~SLAB_RED_ZONE; 4078 if (buf[0] == '1') 4079 s->flags |= SLAB_RED_ZONE; 4080 calculate_sizes(s, -1); 4081 return length; 4082 } 4083 SLAB_ATTR(red_zone); 4084 4085 static ssize_t poison_show(struct kmem_cache *s, char *buf) 4086 { 4087 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 4088 } 4089 4090 static ssize_t poison_store(struct kmem_cache *s, 4091 const char *buf, size_t length) 4092 { 4093 if (any_slab_objects(s)) 4094 return -EBUSY; 4095 4096 s->flags &= ~SLAB_POISON; 4097 if (buf[0] == '1') 4098 s->flags |= SLAB_POISON; 4099 calculate_sizes(s, -1); 4100 return length; 4101 } 4102 SLAB_ATTR(poison); 4103 4104 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 4105 { 4106 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 4107 } 4108 4109 static ssize_t store_user_store(struct kmem_cache *s, 4110 const char *buf, size_t length) 4111 { 4112 if (any_slab_objects(s)) 4113 return -EBUSY; 4114 4115 s->flags &= ~SLAB_STORE_USER; 4116 if (buf[0] == '1') 4117 s->flags |= SLAB_STORE_USER; 4118 calculate_sizes(s, -1); 4119 return length; 4120 } 4121 SLAB_ATTR(store_user); 4122 4123 static ssize_t validate_show(struct kmem_cache *s, char *buf) 4124 { 4125 return 0; 4126 } 4127 4128 static ssize_t validate_store(struct kmem_cache *s, 4129 const char *buf, size_t length) 4130 { 4131 int ret = -EINVAL; 4132 4133 if (buf[0] == '1') { 4134 ret = validate_slab_cache(s); 4135 if (ret >= 0) 4136 ret = length; 4137 } 4138 return ret; 4139 } 4140 SLAB_ATTR(validate); 4141 4142 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4143 { 4144 return 0; 4145 } 4146 4147 static ssize_t shrink_store(struct kmem_cache *s, 4148 const char *buf, size_t length) 4149 { 4150 if (buf[0] == '1') { 4151 int rc = kmem_cache_shrink(s); 4152 4153 if (rc) 4154 return rc; 4155 } else 4156 return -EINVAL; 4157 return length; 4158 } 4159 SLAB_ATTR(shrink); 4160 4161 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 4162 { 4163 if (!(s->flags & SLAB_STORE_USER)) 4164 return -ENOSYS; 4165 return list_locations(s, buf, TRACK_ALLOC); 4166 } 4167 SLAB_ATTR_RO(alloc_calls); 4168 4169 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 4170 { 4171 if (!(s->flags & SLAB_STORE_USER)) 4172 return -ENOSYS; 4173 return list_locations(s, buf, TRACK_FREE); 4174 } 4175 SLAB_ATTR_RO(free_calls); 4176 4177 #ifdef CONFIG_NUMA 4178 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4179 { 4180 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 4181 } 4182 4183 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 4184 const char *buf, size_t length) 4185 { 4186 unsigned long ratio; 4187 int err; 4188 4189 err = strict_strtoul(buf, 10, &ratio); 4190 if (err) 4191 return err; 4192 4193 if (ratio <= 100) 4194 s->remote_node_defrag_ratio = ratio * 10; 4195 4196 return length; 4197 } 4198 SLAB_ATTR(remote_node_defrag_ratio); 4199 #endif 4200 4201 #ifdef CONFIG_SLUB_STATS 4202 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 4203 { 4204 unsigned long sum = 0; 4205 int cpu; 4206 int len; 4207 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); 4208 4209 if (!data) 4210 return -ENOMEM; 4211 4212 for_each_online_cpu(cpu) { 4213 unsigned x = get_cpu_slab(s, cpu)->stat[si]; 4214 4215 data[cpu] = x; 4216 sum += x; 4217 } 4218 4219 len = sprintf(buf, "%lu", sum); 4220 4221 #ifdef CONFIG_SMP 4222 for_each_online_cpu(cpu) { 4223 if (data[cpu] && len < PAGE_SIZE - 20) 4224 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); 4225 } 4226 #endif 4227 kfree(data); 4228 return len + sprintf(buf + len, "\n"); 4229 } 4230 4231 #define STAT_ATTR(si, text) \ 4232 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 4233 { \ 4234 return show_stat(s, buf, si); \ 4235 } \ 4236 SLAB_ATTR_RO(text); \ 4237 4238 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 4239 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 4240 STAT_ATTR(FREE_FASTPATH, free_fastpath); 4241 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 4242 STAT_ATTR(FREE_FROZEN, free_frozen); 4243 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 4244 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 4245 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 4246 STAT_ATTR(ALLOC_SLAB, alloc_slab); 4247 STAT_ATTR(ALLOC_REFILL, alloc_refill); 4248 STAT_ATTR(FREE_SLAB, free_slab); 4249 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 4250 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 4251 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 4252 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 4253 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4254 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4255 STAT_ATTR(ORDER_FALLBACK, order_fallback); 4256 #endif 4257 4258 static struct attribute *slab_attrs[] = { 4259 &slab_size_attr.attr, 4260 &object_size_attr.attr, 4261 &objs_per_slab_attr.attr, 4262 &order_attr.attr, 4263 &min_partial_attr.attr, 4264 &objects_attr.attr, 4265 &objects_partial_attr.attr, 4266 &total_objects_attr.attr, 4267 &slabs_attr.attr, 4268 &partial_attr.attr, 4269 &cpu_slabs_attr.attr, 4270 &ctor_attr.attr, 4271 &aliases_attr.attr, 4272 &align_attr.attr, 4273 &sanity_checks_attr.attr, 4274 &trace_attr.attr, 4275 &hwcache_align_attr.attr, 4276 &reclaim_account_attr.attr, 4277 &destroy_by_rcu_attr.attr, 4278 &red_zone_attr.attr, 4279 &poison_attr.attr, 4280 &store_user_attr.attr, 4281 &validate_attr.attr, 4282 &shrink_attr.attr, 4283 &alloc_calls_attr.attr, 4284 &free_calls_attr.attr, 4285 #ifdef CONFIG_ZONE_DMA 4286 &cache_dma_attr.attr, 4287 #endif 4288 #ifdef CONFIG_NUMA 4289 &remote_node_defrag_ratio_attr.attr, 4290 #endif 4291 #ifdef CONFIG_SLUB_STATS 4292 &alloc_fastpath_attr.attr, 4293 &alloc_slowpath_attr.attr, 4294 &free_fastpath_attr.attr, 4295 &free_slowpath_attr.attr, 4296 &free_frozen_attr.attr, 4297 &free_add_partial_attr.attr, 4298 &free_remove_partial_attr.attr, 4299 &alloc_from_partial_attr.attr, 4300 &alloc_slab_attr.attr, 4301 &alloc_refill_attr.attr, 4302 &free_slab_attr.attr, 4303 &cpuslab_flush_attr.attr, 4304 &deactivate_full_attr.attr, 4305 &deactivate_empty_attr.attr, 4306 &deactivate_to_head_attr.attr, 4307 &deactivate_to_tail_attr.attr, 4308 &deactivate_remote_frees_attr.attr, 4309 &order_fallback_attr.attr, 4310 #endif 4311 NULL 4312 }; 4313 4314 static struct attribute_group slab_attr_group = { 4315 .attrs = slab_attrs, 4316 }; 4317 4318 static ssize_t slab_attr_show(struct kobject *kobj, 4319 struct attribute *attr, 4320 char *buf) 4321 { 4322 struct slab_attribute *attribute; 4323 struct kmem_cache *s; 4324 int err; 4325 4326 attribute = to_slab_attr(attr); 4327 s = to_slab(kobj); 4328 4329 if (!attribute->show) 4330 return -EIO; 4331 4332 err = attribute->show(s, buf); 4333 4334 return err; 4335 } 4336 4337 static ssize_t slab_attr_store(struct kobject *kobj, 4338 struct attribute *attr, 4339 const char *buf, size_t len) 4340 { 4341 struct slab_attribute *attribute; 4342 struct kmem_cache *s; 4343 int err; 4344 4345 attribute = to_slab_attr(attr); 4346 s = to_slab(kobj); 4347 4348 if (!attribute->store) 4349 return -EIO; 4350 4351 err = attribute->store(s, buf, len); 4352 4353 return err; 4354 } 4355 4356 static void kmem_cache_release(struct kobject *kobj) 4357 { 4358 struct kmem_cache *s = to_slab(kobj); 4359 4360 kfree(s); 4361 } 4362 4363 static struct sysfs_ops slab_sysfs_ops = { 4364 .show = slab_attr_show, 4365 .store = slab_attr_store, 4366 }; 4367 4368 static struct kobj_type slab_ktype = { 4369 .sysfs_ops = &slab_sysfs_ops, 4370 .release = kmem_cache_release 4371 }; 4372 4373 static int uevent_filter(struct kset *kset, struct kobject *kobj) 4374 { 4375 struct kobj_type *ktype = get_ktype(kobj); 4376 4377 if (ktype == &slab_ktype) 4378 return 1; 4379 return 0; 4380 } 4381 4382 static struct kset_uevent_ops slab_uevent_ops = { 4383 .filter = uevent_filter, 4384 }; 4385 4386 static struct kset *slab_kset; 4387 4388 #define ID_STR_LENGTH 64 4389 4390 /* Create a unique string id for a slab cache: 4391 * 4392 * Format :[flags-]size 4393 */ 4394 static char *create_unique_id(struct kmem_cache *s) 4395 { 4396 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 4397 char *p = name; 4398 4399 BUG_ON(!name); 4400 4401 *p++ = ':'; 4402 /* 4403 * First flags affecting slabcache operations. We will only 4404 * get here for aliasable slabs so we do not need to support 4405 * too many flags. The flags here must cover all flags that 4406 * are matched during merging to guarantee that the id is 4407 * unique. 4408 */ 4409 if (s->flags & SLAB_CACHE_DMA) 4410 *p++ = 'd'; 4411 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4412 *p++ = 'a'; 4413 if (s->flags & SLAB_DEBUG_FREE) 4414 *p++ = 'F'; 4415 if (p != name + 1) 4416 *p++ = '-'; 4417 p += sprintf(p, "%07d", s->size); 4418 BUG_ON(p > name + ID_STR_LENGTH - 1); 4419 return name; 4420 } 4421 4422 static int sysfs_slab_add(struct kmem_cache *s) 4423 { 4424 int err; 4425 const char *name; 4426 int unmergeable; 4427 4428 if (slab_state < SYSFS) 4429 /* Defer until later */ 4430 return 0; 4431 4432 unmergeable = slab_unmergeable(s); 4433 if (unmergeable) { 4434 /* 4435 * Slabcache can never be merged so we can use the name proper. 4436 * This is typically the case for debug situations. In that 4437 * case we can catch duplicate names easily. 4438 */ 4439 sysfs_remove_link(&slab_kset->kobj, s->name); 4440 name = s->name; 4441 } else { 4442 /* 4443 * Create a unique name for the slab as a target 4444 * for the symlinks. 4445 */ 4446 name = create_unique_id(s); 4447 } 4448 4449 s->kobj.kset = slab_kset; 4450 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 4451 if (err) { 4452 kobject_put(&s->kobj); 4453 return err; 4454 } 4455 4456 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4457 if (err) 4458 return err; 4459 kobject_uevent(&s->kobj, KOBJ_ADD); 4460 if (!unmergeable) { 4461 /* Setup first alias */ 4462 sysfs_slab_alias(s, s->name); 4463 kfree(name); 4464 } 4465 return 0; 4466 } 4467 4468 static void sysfs_slab_remove(struct kmem_cache *s) 4469 { 4470 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4471 kobject_del(&s->kobj); 4472 kobject_put(&s->kobj); 4473 } 4474 4475 /* 4476 * Need to buffer aliases during bootup until sysfs becomes 4477 * available lest we lose that information. 4478 */ 4479 struct saved_alias { 4480 struct kmem_cache *s; 4481 const char *name; 4482 struct saved_alias *next; 4483 }; 4484 4485 static struct saved_alias *alias_list; 4486 4487 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 4488 { 4489 struct saved_alias *al; 4490 4491 if (slab_state == SYSFS) { 4492 /* 4493 * If we have a leftover link then remove it. 4494 */ 4495 sysfs_remove_link(&slab_kset->kobj, name); 4496 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 4497 } 4498 4499 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 4500 if (!al) 4501 return -ENOMEM; 4502 4503 al->s = s; 4504 al->name = name; 4505 al->next = alias_list; 4506 alias_list = al; 4507 return 0; 4508 } 4509 4510 static int __init slab_sysfs_init(void) 4511 { 4512 struct kmem_cache *s; 4513 int err; 4514 4515 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 4516 if (!slab_kset) { 4517 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4518 return -ENOSYS; 4519 } 4520 4521 slab_state = SYSFS; 4522 4523 list_for_each_entry(s, &slab_caches, list) { 4524 err = sysfs_slab_add(s); 4525 if (err) 4526 printk(KERN_ERR "SLUB: Unable to add boot slab %s" 4527 " to sysfs\n", s->name); 4528 } 4529 4530 while (alias_list) { 4531 struct saved_alias *al = alias_list; 4532 4533 alias_list = alias_list->next; 4534 err = sysfs_slab_alias(al->s, al->name); 4535 if (err) 4536 printk(KERN_ERR "SLUB: Unable to add boot slab alias" 4537 " %s to sysfs\n", s->name); 4538 kfree(al); 4539 } 4540 4541 resiliency_test(); 4542 return 0; 4543 } 4544 4545 __initcall(slab_sysfs_init); 4546 #endif 4547 4548 /* 4549 * The /proc/slabinfo ABI 4550 */ 4551 #ifdef CONFIG_SLABINFO 4552 static void print_slabinfo_header(struct seq_file *m) 4553 { 4554 seq_puts(m, "slabinfo - version: 2.1\n"); 4555 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4556 "<objperslab> <pagesperslab>"); 4557 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4558 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4559 seq_putc(m, '\n'); 4560 } 4561 4562 static void *s_start(struct seq_file *m, loff_t *pos) 4563 { 4564 loff_t n = *pos; 4565 4566 down_read(&slub_lock); 4567 if (!n) 4568 print_slabinfo_header(m); 4569 4570 return seq_list_start(&slab_caches, *pos); 4571 } 4572 4573 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4574 { 4575 return seq_list_next(p, &slab_caches, pos); 4576 } 4577 4578 static void s_stop(struct seq_file *m, void *p) 4579 { 4580 up_read(&slub_lock); 4581 } 4582 4583 static int s_show(struct seq_file *m, void *p) 4584 { 4585 unsigned long nr_partials = 0; 4586 unsigned long nr_slabs = 0; 4587 unsigned long nr_inuse = 0; 4588 unsigned long nr_objs = 0; 4589 unsigned long nr_free = 0; 4590 struct kmem_cache *s; 4591 int node; 4592 4593 s = list_entry(p, struct kmem_cache, list); 4594 4595 for_each_online_node(node) { 4596 struct kmem_cache_node *n = get_node(s, node); 4597 4598 if (!n) 4599 continue; 4600 4601 nr_partials += n->nr_partial; 4602 nr_slabs += atomic_long_read(&n->nr_slabs); 4603 nr_objs += atomic_long_read(&n->total_objects); 4604 nr_free += count_partial(n, count_free); 4605 } 4606 4607 nr_inuse = nr_objs - nr_free; 4608 4609 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 4610 nr_objs, s->size, oo_objects(s->oo), 4611 (1 << oo_order(s->oo))); 4612 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); 4613 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 4614 0UL); 4615 seq_putc(m, '\n'); 4616 return 0; 4617 } 4618 4619 static const struct seq_operations slabinfo_op = { 4620 .start = s_start, 4621 .next = s_next, 4622 .stop = s_stop, 4623 .show = s_show, 4624 }; 4625 4626 static int slabinfo_open(struct inode *inode, struct file *file) 4627 { 4628 return seq_open(file, &slabinfo_op); 4629 } 4630 4631 static const struct file_operations proc_slabinfo_operations = { 4632 .open = slabinfo_open, 4633 .read = seq_read, 4634 .llseek = seq_lseek, 4635 .release = seq_release, 4636 }; 4637 4638 static int __init slab_proc_init(void) 4639 { 4640 proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); 4641 return 0; 4642 } 4643 module_init(slab_proc_init); 4644 #endif /* CONFIG_SLABINFO */ 4645