1 /* 2 * SLUB: A slab allocator that limits cache line use instead of queuing 3 * objects in per cpu and per node lists. 4 * 5 * The allocator synchronizes using per slab locks and only 6 * uses a centralized lock to manage a pool of partial slabs. 7 * 8 * (C) 2007 SGI, Christoph Lameter 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/swap.h> /* struct reclaim_state */ 13 #include <linux/module.h> 14 #include <linux/bit_spinlock.h> 15 #include <linux/interrupt.h> 16 #include <linux/bitops.h> 17 #include <linux/slab.h> 18 #include <linux/proc_fs.h> 19 #include <linux/seq_file.h> 20 #include <linux/kmemcheck.h> 21 #include <linux/cpu.h> 22 #include <linux/cpuset.h> 23 #include <linux/mempolicy.h> 24 #include <linux/ctype.h> 25 #include <linux/debugobjects.h> 26 #include <linux/kallsyms.h> 27 #include <linux/memory.h> 28 #include <linux/math64.h> 29 #include <linux/fault-inject.h> 30 31 #include <trace/events/kmem.h> 32 33 /* 34 * Lock order: 35 * 1. slab_lock(page) 36 * 2. slab->list_lock 37 * 38 * The slab_lock protects operations on the object of a particular 39 * slab and its metadata in the page struct. If the slab lock 40 * has been taken then no allocations nor frees can be performed 41 * on the objects in the slab nor can the slab be added or removed 42 * from the partial or full lists since this would mean modifying 43 * the page_struct of the slab. 44 * 45 * The list_lock protects the partial and full list on each node and 46 * the partial slab counter. If taken then no new slabs may be added or 47 * removed from the lists nor make the number of partial slabs be modified. 48 * (Note that the total number of slabs is an atomic value that may be 49 * modified without taking the list lock). 50 * 51 * The list_lock is a centralized lock and thus we avoid taking it as 52 * much as possible. As long as SLUB does not have to handle partial 53 * slabs, operations can continue without any centralized lock. F.e. 54 * allocating a long series of objects that fill up slabs does not require 55 * the list lock. 56 * 57 * The lock order is sometimes inverted when we are trying to get a slab 58 * off a list. We take the list_lock and then look for a page on the list 59 * to use. While we do that objects in the slabs may be freed. We can 60 * only operate on the slab if we have also taken the slab_lock. So we use 61 * a slab_trylock() on the slab. If trylock was successful then no frees 62 * can occur anymore and we can use the slab for allocations etc. If the 63 * slab_trylock() does not succeed then frees are in progress in the slab and 64 * we must stay away from it for a while since we may cause a bouncing 65 * cacheline if we try to acquire the lock. So go onto the next slab. 66 * If all pages are busy then we may allocate a new slab instead of reusing 67 * a partial slab. A new slab has no one operating on it and thus there is 68 * no danger of cacheline contention. 69 * 70 * Interrupts are disabled during allocation and deallocation in order to 71 * make the slab allocator safe to use in the context of an irq. In addition 72 * interrupts are disabled to ensure that the processor does not change 73 * while handling per_cpu slabs, due to kernel preemption. 74 * 75 * SLUB assigns one slab for allocation to each processor. 76 * Allocations only occur from these slabs called cpu slabs. 77 * 78 * Slabs with free elements are kept on a partial list and during regular 79 * operations no list for full slabs is used. If an object in a full slab is 80 * freed then the slab will show up again on the partial lists. 81 * We track full slabs for debugging purposes though because otherwise we 82 * cannot scan all objects. 83 * 84 * Slabs are freed when they become empty. Teardown and setup is 85 * minimal so we rely on the page allocators per cpu caches for 86 * fast frees and allocs. 87 * 88 * Overloading of page flags that are otherwise used for LRU management. 89 * 90 * PageActive The slab is frozen and exempt from list processing. 91 * This means that the slab is dedicated to a purpose 92 * such as satisfying allocations for a specific 93 * processor. Objects may be freed in the slab while 94 * it is frozen but slab_free will then skip the usual 95 * list operations. It is up to the processor holding 96 * the slab to integrate the slab into the slab lists 97 * when the slab is no longer needed. 98 * 99 * One use of this flag is to mark slabs that are 100 * used for allocations. Then such a slab becomes a cpu 101 * slab. The cpu slab may be equipped with an additional 102 * freelist that allows lockless access to 103 * free objects in addition to the regular freelist 104 * that requires the slab lock. 105 * 106 * PageError Slab requires special handling due to debug 107 * options set. This moves slab handling out of 108 * the fast path and disables lockless freelists. 109 */ 110 111 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 112 SLAB_TRACE | SLAB_DEBUG_FREE) 113 114 static inline int kmem_cache_debug(struct kmem_cache *s) 115 { 116 #ifdef CONFIG_SLUB_DEBUG 117 return unlikely(s->flags & SLAB_DEBUG_FLAGS); 118 #else 119 return 0; 120 #endif 121 } 122 123 /* 124 * Issues still to be resolved: 125 * 126 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 127 * 128 * - Variable sizing of the per node arrays 129 */ 130 131 /* Enable to test recovery from slab corruption on boot */ 132 #undef SLUB_RESILIENCY_TEST 133 134 /* 135 * Mininum number of partial slabs. These will be left on the partial 136 * lists even if they are empty. kmem_cache_shrink may reclaim them. 137 */ 138 #define MIN_PARTIAL 5 139 140 /* 141 * Maximum number of desirable partial slabs. 142 * The existence of more partial slabs makes kmem_cache_shrink 143 * sort the partial list by the number of objects in the. 144 */ 145 #define MAX_PARTIAL 10 146 147 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 148 SLAB_POISON | SLAB_STORE_USER) 149 150 /* 151 * Debugging flags that require metadata to be stored in the slab. These get 152 * disabled when slub_debug=O is used and a cache's min order increases with 153 * metadata. 154 */ 155 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 156 157 /* 158 * Set of flags that will prevent slab merging 159 */ 160 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 161 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 162 SLAB_FAILSLAB) 163 164 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 165 SLAB_CACHE_DMA | SLAB_NOTRACK) 166 167 #define OO_SHIFT 16 168 #define OO_MASK ((1 << OO_SHIFT) - 1) 169 #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ 170 171 /* Internal SLUB flags */ 172 #define __OBJECT_POISON 0x80000000UL /* Poison object */ 173 174 static int kmem_size = sizeof(struct kmem_cache); 175 176 #ifdef CONFIG_SMP 177 static struct notifier_block slab_notifier; 178 #endif 179 180 static enum { 181 DOWN, /* No slab functionality available */ 182 PARTIAL, /* Kmem_cache_node works */ 183 UP, /* Everything works but does not show up in sysfs */ 184 SYSFS /* Sysfs up */ 185 } slab_state = DOWN; 186 187 /* A list of all slab caches on the system */ 188 static DECLARE_RWSEM(slub_lock); 189 static LIST_HEAD(slab_caches); 190 191 /* 192 * Tracking user of a slab. 193 */ 194 struct track { 195 unsigned long addr; /* Called from address */ 196 int cpu; /* Was running on cpu */ 197 int pid; /* Pid context */ 198 unsigned long when; /* When did the operation occur */ 199 }; 200 201 enum track_item { TRACK_ALLOC, TRACK_FREE }; 202 203 #ifdef CONFIG_SYSFS 204 static int sysfs_slab_add(struct kmem_cache *); 205 static int sysfs_slab_alias(struct kmem_cache *, const char *); 206 static void sysfs_slab_remove(struct kmem_cache *); 207 208 #else 209 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 210 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 211 { return 0; } 212 static inline void sysfs_slab_remove(struct kmem_cache *s) 213 { 214 kfree(s->name); 215 kfree(s); 216 } 217 218 #endif 219 220 static inline void stat(const struct kmem_cache *s, enum stat_item si) 221 { 222 #ifdef CONFIG_SLUB_STATS 223 __this_cpu_inc(s->cpu_slab->stat[si]); 224 #endif 225 } 226 227 /******************************************************************** 228 * Core slab cache functions 229 *******************************************************************/ 230 231 int slab_is_available(void) 232 { 233 return slab_state >= UP; 234 } 235 236 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 237 { 238 return s->node[node]; 239 } 240 241 /* Verify that a pointer has an address that is valid within a slab page */ 242 static inline int check_valid_pointer(struct kmem_cache *s, 243 struct page *page, const void *object) 244 { 245 void *base; 246 247 if (!object) 248 return 1; 249 250 base = page_address(page); 251 if (object < base || object >= base + page->objects * s->size || 252 (object - base) % s->size) { 253 return 0; 254 } 255 256 return 1; 257 } 258 259 static inline void *get_freepointer(struct kmem_cache *s, void *object) 260 { 261 return *(void **)(object + s->offset); 262 } 263 264 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 265 { 266 void *p; 267 268 #ifdef CONFIG_DEBUG_PAGEALLOC 269 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); 270 #else 271 p = get_freepointer(s, object); 272 #endif 273 return p; 274 } 275 276 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 277 { 278 *(void **)(object + s->offset) = fp; 279 } 280 281 /* Loop over all objects in a slab */ 282 #define for_each_object(__p, __s, __addr, __objects) \ 283 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ 284 __p += (__s)->size) 285 286 /* Determine object index from a given position */ 287 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 288 { 289 return (p - addr) / s->size; 290 } 291 292 static inline size_t slab_ksize(const struct kmem_cache *s) 293 { 294 #ifdef CONFIG_SLUB_DEBUG 295 /* 296 * Debugging requires use of the padding between object 297 * and whatever may come after it. 298 */ 299 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 300 return s->objsize; 301 302 #endif 303 /* 304 * If we have the need to store the freelist pointer 305 * back there or track user information then we can 306 * only use the space before that information. 307 */ 308 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 309 return s->inuse; 310 /* 311 * Else we can use all the padding etc for the allocation 312 */ 313 return s->size; 314 } 315 316 static inline int order_objects(int order, unsigned long size, int reserved) 317 { 318 return ((PAGE_SIZE << order) - reserved) / size; 319 } 320 321 static inline struct kmem_cache_order_objects oo_make(int order, 322 unsigned long size, int reserved) 323 { 324 struct kmem_cache_order_objects x = { 325 (order << OO_SHIFT) + order_objects(order, size, reserved) 326 }; 327 328 return x; 329 } 330 331 static inline int oo_order(struct kmem_cache_order_objects x) 332 { 333 return x.x >> OO_SHIFT; 334 } 335 336 static inline int oo_objects(struct kmem_cache_order_objects x) 337 { 338 return x.x & OO_MASK; 339 } 340 341 #ifdef CONFIG_SLUB_DEBUG 342 /* 343 * Determine a map of object in use on a page. 344 * 345 * Slab lock or node listlock must be held to guarantee that the page does 346 * not vanish from under us. 347 */ 348 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) 349 { 350 void *p; 351 void *addr = page_address(page); 352 353 for (p = page->freelist; p; p = get_freepointer(s, p)) 354 set_bit(slab_index(p, s, addr), map); 355 } 356 357 /* 358 * Debug settings: 359 */ 360 #ifdef CONFIG_SLUB_DEBUG_ON 361 static int slub_debug = DEBUG_DEFAULT_FLAGS; 362 #else 363 static int slub_debug; 364 #endif 365 366 static char *slub_debug_slabs; 367 static int disable_higher_order_debug; 368 369 /* 370 * Object debugging 371 */ 372 static void print_section(char *text, u8 *addr, unsigned int length) 373 { 374 int i, offset; 375 int newline = 1; 376 char ascii[17]; 377 378 ascii[16] = 0; 379 380 for (i = 0; i < length; i++) { 381 if (newline) { 382 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 383 newline = 0; 384 } 385 printk(KERN_CONT " %02x", addr[i]); 386 offset = i % 16; 387 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 388 if (offset == 15) { 389 printk(KERN_CONT " %s\n", ascii); 390 newline = 1; 391 } 392 } 393 if (!newline) { 394 i %= 16; 395 while (i < 16) { 396 printk(KERN_CONT " "); 397 ascii[i] = ' '; 398 i++; 399 } 400 printk(KERN_CONT " %s\n", ascii); 401 } 402 } 403 404 static struct track *get_track(struct kmem_cache *s, void *object, 405 enum track_item alloc) 406 { 407 struct track *p; 408 409 if (s->offset) 410 p = object + s->offset + sizeof(void *); 411 else 412 p = object + s->inuse; 413 414 return p + alloc; 415 } 416 417 static void set_track(struct kmem_cache *s, void *object, 418 enum track_item alloc, unsigned long addr) 419 { 420 struct track *p = get_track(s, object, alloc); 421 422 if (addr) { 423 p->addr = addr; 424 p->cpu = smp_processor_id(); 425 p->pid = current->pid; 426 p->when = jiffies; 427 } else 428 memset(p, 0, sizeof(struct track)); 429 } 430 431 static void init_tracking(struct kmem_cache *s, void *object) 432 { 433 if (!(s->flags & SLAB_STORE_USER)) 434 return; 435 436 set_track(s, object, TRACK_FREE, 0UL); 437 set_track(s, object, TRACK_ALLOC, 0UL); 438 } 439 440 static void print_track(const char *s, struct track *t) 441 { 442 if (!t->addr) 443 return; 444 445 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 446 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); 447 } 448 449 static void print_tracking(struct kmem_cache *s, void *object) 450 { 451 if (!(s->flags & SLAB_STORE_USER)) 452 return; 453 454 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); 455 print_track("Freed", get_track(s, object, TRACK_FREE)); 456 } 457 458 static void print_page_info(struct page *page) 459 { 460 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 461 page, page->objects, page->inuse, page->freelist, page->flags); 462 463 } 464 465 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 466 { 467 va_list args; 468 char buf[100]; 469 470 va_start(args, fmt); 471 vsnprintf(buf, sizeof(buf), fmt, args); 472 va_end(args); 473 printk(KERN_ERR "========================================" 474 "=====================================\n"); 475 printk(KERN_ERR "BUG %s: %s\n", s->name, buf); 476 printk(KERN_ERR "----------------------------------------" 477 "-------------------------------------\n\n"); 478 } 479 480 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 481 { 482 va_list args; 483 char buf[100]; 484 485 va_start(args, fmt); 486 vsnprintf(buf, sizeof(buf), fmt, args); 487 va_end(args); 488 printk(KERN_ERR "FIX %s: %s\n", s->name, buf); 489 } 490 491 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 492 { 493 unsigned int off; /* Offset of last byte */ 494 u8 *addr = page_address(page); 495 496 print_tracking(s, p); 497 498 print_page_info(page); 499 500 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 501 p, p - addr, get_freepointer(s, p)); 502 503 if (p > addr + 16) 504 print_section("Bytes b4", p - 16, 16); 505 506 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE)); 507 508 if (s->flags & SLAB_RED_ZONE) 509 print_section("Redzone", p + s->objsize, 510 s->inuse - s->objsize); 511 512 if (s->offset) 513 off = s->offset + sizeof(void *); 514 else 515 off = s->inuse; 516 517 if (s->flags & SLAB_STORE_USER) 518 off += 2 * sizeof(struct track); 519 520 if (off != s->size) 521 /* Beginning of the filler is the free pointer */ 522 print_section("Padding", p + off, s->size - off); 523 524 dump_stack(); 525 } 526 527 static void object_err(struct kmem_cache *s, struct page *page, 528 u8 *object, char *reason) 529 { 530 slab_bug(s, "%s", reason); 531 print_trailer(s, page, object); 532 } 533 534 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 535 { 536 va_list args; 537 char buf[100]; 538 539 va_start(args, fmt); 540 vsnprintf(buf, sizeof(buf), fmt, args); 541 va_end(args); 542 slab_bug(s, "%s", buf); 543 print_page_info(page); 544 dump_stack(); 545 } 546 547 static void init_object(struct kmem_cache *s, void *object, u8 val) 548 { 549 u8 *p = object; 550 551 if (s->flags & __OBJECT_POISON) { 552 memset(p, POISON_FREE, s->objsize - 1); 553 p[s->objsize - 1] = POISON_END; 554 } 555 556 if (s->flags & SLAB_RED_ZONE) 557 memset(p + s->objsize, val, s->inuse - s->objsize); 558 } 559 560 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) 561 { 562 while (bytes) { 563 if (*start != (u8)value) 564 return start; 565 start++; 566 bytes--; 567 } 568 return NULL; 569 } 570 571 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 572 void *from, void *to) 573 { 574 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 575 memset(from, data, to - from); 576 } 577 578 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 579 u8 *object, char *what, 580 u8 *start, unsigned int value, unsigned int bytes) 581 { 582 u8 *fault; 583 u8 *end; 584 585 fault = check_bytes(start, value, bytes); 586 if (!fault) 587 return 1; 588 589 end = start + bytes; 590 while (end > fault && end[-1] == value) 591 end--; 592 593 slab_bug(s, "%s overwritten", what); 594 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", 595 fault, end - 1, fault[0], value); 596 print_trailer(s, page, object); 597 598 restore_bytes(s, what, value, fault, end); 599 return 0; 600 } 601 602 /* 603 * Object layout: 604 * 605 * object address 606 * Bytes of the object to be managed. 607 * If the freepointer may overlay the object then the free 608 * pointer is the first word of the object. 609 * 610 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 611 * 0xa5 (POISON_END) 612 * 613 * object + s->objsize 614 * Padding to reach word boundary. This is also used for Redzoning. 615 * Padding is extended by another word if Redzoning is enabled and 616 * objsize == inuse. 617 * 618 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 619 * 0xcc (RED_ACTIVE) for objects in use. 620 * 621 * object + s->inuse 622 * Meta data starts here. 623 * 624 * A. Free pointer (if we cannot overwrite object on free) 625 * B. Tracking data for SLAB_STORE_USER 626 * C. Padding to reach required alignment boundary or at mininum 627 * one word if debugging is on to be able to detect writes 628 * before the word boundary. 629 * 630 * Padding is done using 0x5a (POISON_INUSE) 631 * 632 * object + s->size 633 * Nothing is used beyond s->size. 634 * 635 * If slabcaches are merged then the objsize and inuse boundaries are mostly 636 * ignored. And therefore no slab options that rely on these boundaries 637 * may be used with merged slabcaches. 638 */ 639 640 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 641 { 642 unsigned long off = s->inuse; /* The end of info */ 643 644 if (s->offset) 645 /* Freepointer is placed after the object. */ 646 off += sizeof(void *); 647 648 if (s->flags & SLAB_STORE_USER) 649 /* We also have user information there */ 650 off += 2 * sizeof(struct track); 651 652 if (s->size == off) 653 return 1; 654 655 return check_bytes_and_report(s, page, p, "Object padding", 656 p + off, POISON_INUSE, s->size - off); 657 } 658 659 /* Check the pad bytes at the end of a slab page */ 660 static int slab_pad_check(struct kmem_cache *s, struct page *page) 661 { 662 u8 *start; 663 u8 *fault; 664 u8 *end; 665 int length; 666 int remainder; 667 668 if (!(s->flags & SLAB_POISON)) 669 return 1; 670 671 start = page_address(page); 672 length = (PAGE_SIZE << compound_order(page)) - s->reserved; 673 end = start + length; 674 remainder = length % s->size; 675 if (!remainder) 676 return 1; 677 678 fault = check_bytes(end - remainder, POISON_INUSE, remainder); 679 if (!fault) 680 return 1; 681 while (end > fault && end[-1] == POISON_INUSE) 682 end--; 683 684 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 685 print_section("Padding", end - remainder, remainder); 686 687 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); 688 return 0; 689 } 690 691 static int check_object(struct kmem_cache *s, struct page *page, 692 void *object, u8 val) 693 { 694 u8 *p = object; 695 u8 *endobject = object + s->objsize; 696 697 if (s->flags & SLAB_RED_ZONE) { 698 if (!check_bytes_and_report(s, page, object, "Redzone", 699 endobject, val, s->inuse - s->objsize)) 700 return 0; 701 } else { 702 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 703 check_bytes_and_report(s, page, p, "Alignment padding", 704 endobject, POISON_INUSE, s->inuse - s->objsize); 705 } 706 } 707 708 if (s->flags & SLAB_POISON) { 709 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 710 (!check_bytes_and_report(s, page, p, "Poison", p, 711 POISON_FREE, s->objsize - 1) || 712 !check_bytes_and_report(s, page, p, "Poison", 713 p + s->objsize - 1, POISON_END, 1))) 714 return 0; 715 /* 716 * check_pad_bytes cleans up on its own. 717 */ 718 check_pad_bytes(s, page, p); 719 } 720 721 if (!s->offset && val == SLUB_RED_ACTIVE) 722 /* 723 * Object and freepointer overlap. Cannot check 724 * freepointer while object is allocated. 725 */ 726 return 1; 727 728 /* Check free pointer validity */ 729 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 730 object_err(s, page, p, "Freepointer corrupt"); 731 /* 732 * No choice but to zap it and thus lose the remainder 733 * of the free objects in this slab. May cause 734 * another error because the object count is now wrong. 735 */ 736 set_freepointer(s, p, NULL); 737 return 0; 738 } 739 return 1; 740 } 741 742 static int check_slab(struct kmem_cache *s, struct page *page) 743 { 744 int maxobj; 745 746 VM_BUG_ON(!irqs_disabled()); 747 748 if (!PageSlab(page)) { 749 slab_err(s, page, "Not a valid slab page"); 750 return 0; 751 } 752 753 maxobj = order_objects(compound_order(page), s->size, s->reserved); 754 if (page->objects > maxobj) { 755 slab_err(s, page, "objects %u > max %u", 756 s->name, page->objects, maxobj); 757 return 0; 758 } 759 if (page->inuse > page->objects) { 760 slab_err(s, page, "inuse %u > max %u", 761 s->name, page->inuse, page->objects); 762 return 0; 763 } 764 /* Slab_pad_check fixes things up after itself */ 765 slab_pad_check(s, page); 766 return 1; 767 } 768 769 /* 770 * Determine if a certain object on a page is on the freelist. Must hold the 771 * slab lock to guarantee that the chains are in a consistent state. 772 */ 773 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 774 { 775 int nr = 0; 776 void *fp = page->freelist; 777 void *object = NULL; 778 unsigned long max_objects; 779 780 while (fp && nr <= page->objects) { 781 if (fp == search) 782 return 1; 783 if (!check_valid_pointer(s, page, fp)) { 784 if (object) { 785 object_err(s, page, object, 786 "Freechain corrupt"); 787 set_freepointer(s, object, NULL); 788 break; 789 } else { 790 slab_err(s, page, "Freepointer corrupt"); 791 page->freelist = NULL; 792 page->inuse = page->objects; 793 slab_fix(s, "Freelist cleared"); 794 return 0; 795 } 796 break; 797 } 798 object = fp; 799 fp = get_freepointer(s, object); 800 nr++; 801 } 802 803 max_objects = order_objects(compound_order(page), s->size, s->reserved); 804 if (max_objects > MAX_OBJS_PER_PAGE) 805 max_objects = MAX_OBJS_PER_PAGE; 806 807 if (page->objects != max_objects) { 808 slab_err(s, page, "Wrong number of objects. Found %d but " 809 "should be %d", page->objects, max_objects); 810 page->objects = max_objects; 811 slab_fix(s, "Number of objects adjusted."); 812 } 813 if (page->inuse != page->objects - nr) { 814 slab_err(s, page, "Wrong object count. Counter is %d but " 815 "counted were %d", page->inuse, page->objects - nr); 816 page->inuse = page->objects - nr; 817 slab_fix(s, "Object count adjusted."); 818 } 819 return search == NULL; 820 } 821 822 static void trace(struct kmem_cache *s, struct page *page, void *object, 823 int alloc) 824 { 825 if (s->flags & SLAB_TRACE) { 826 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 827 s->name, 828 alloc ? "alloc" : "free", 829 object, page->inuse, 830 page->freelist); 831 832 if (!alloc) 833 print_section("Object", (void *)object, s->objsize); 834 835 dump_stack(); 836 } 837 } 838 839 /* 840 * Hooks for other subsystems that check memory allocations. In a typical 841 * production configuration these hooks all should produce no code at all. 842 */ 843 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 844 { 845 flags &= gfp_allowed_mask; 846 lockdep_trace_alloc(flags); 847 might_sleep_if(flags & __GFP_WAIT); 848 849 return should_failslab(s->objsize, flags, s->flags); 850 } 851 852 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 853 { 854 flags &= gfp_allowed_mask; 855 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 856 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); 857 } 858 859 static inline void slab_free_hook(struct kmem_cache *s, void *x) 860 { 861 kmemleak_free_recursive(x, s->flags); 862 863 /* 864 * Trouble is that we may no longer disable interupts in the fast path 865 * So in order to make the debug calls that expect irqs to be 866 * disabled we need to disable interrupts temporarily. 867 */ 868 #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP) 869 { 870 unsigned long flags; 871 872 local_irq_save(flags); 873 kmemcheck_slab_free(s, x, s->objsize); 874 debug_check_no_locks_freed(x, s->objsize); 875 local_irq_restore(flags); 876 } 877 #endif 878 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 879 debug_check_no_obj_freed(x, s->objsize); 880 } 881 882 /* 883 * Tracking of fully allocated slabs for debugging purposes. 884 */ 885 static void add_full(struct kmem_cache_node *n, struct page *page) 886 { 887 spin_lock(&n->list_lock); 888 list_add(&page->lru, &n->full); 889 spin_unlock(&n->list_lock); 890 } 891 892 static void remove_full(struct kmem_cache *s, struct page *page) 893 { 894 struct kmem_cache_node *n; 895 896 if (!(s->flags & SLAB_STORE_USER)) 897 return; 898 899 n = get_node(s, page_to_nid(page)); 900 901 spin_lock(&n->list_lock); 902 list_del(&page->lru); 903 spin_unlock(&n->list_lock); 904 } 905 906 /* Tracking of the number of slabs for debugging purposes */ 907 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 908 { 909 struct kmem_cache_node *n = get_node(s, node); 910 911 return atomic_long_read(&n->nr_slabs); 912 } 913 914 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 915 { 916 return atomic_long_read(&n->nr_slabs); 917 } 918 919 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 920 { 921 struct kmem_cache_node *n = get_node(s, node); 922 923 /* 924 * May be called early in order to allocate a slab for the 925 * kmem_cache_node structure. Solve the chicken-egg 926 * dilemma by deferring the increment of the count during 927 * bootstrap (see early_kmem_cache_node_alloc). 928 */ 929 if (n) { 930 atomic_long_inc(&n->nr_slabs); 931 atomic_long_add(objects, &n->total_objects); 932 } 933 } 934 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 935 { 936 struct kmem_cache_node *n = get_node(s, node); 937 938 atomic_long_dec(&n->nr_slabs); 939 atomic_long_sub(objects, &n->total_objects); 940 } 941 942 /* Object debug checks for alloc/free paths */ 943 static void setup_object_debug(struct kmem_cache *s, struct page *page, 944 void *object) 945 { 946 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 947 return; 948 949 init_object(s, object, SLUB_RED_INACTIVE); 950 init_tracking(s, object); 951 } 952 953 static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, 954 void *object, unsigned long addr) 955 { 956 if (!check_slab(s, page)) 957 goto bad; 958 959 if (!on_freelist(s, page, object)) { 960 object_err(s, page, object, "Object already allocated"); 961 goto bad; 962 } 963 964 if (!check_valid_pointer(s, page, object)) { 965 object_err(s, page, object, "Freelist Pointer check fails"); 966 goto bad; 967 } 968 969 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) 970 goto bad; 971 972 /* Success perform special debug activities for allocs */ 973 if (s->flags & SLAB_STORE_USER) 974 set_track(s, object, TRACK_ALLOC, addr); 975 trace(s, page, object, 1); 976 init_object(s, object, SLUB_RED_ACTIVE); 977 return 1; 978 979 bad: 980 if (PageSlab(page)) { 981 /* 982 * If this is a slab page then lets do the best we can 983 * to avoid issues in the future. Marking all objects 984 * as used avoids touching the remaining objects. 985 */ 986 slab_fix(s, "Marking all objects used"); 987 page->inuse = page->objects; 988 page->freelist = NULL; 989 } 990 return 0; 991 } 992 993 static noinline int free_debug_processing(struct kmem_cache *s, 994 struct page *page, void *object, unsigned long addr) 995 { 996 if (!check_slab(s, page)) 997 goto fail; 998 999 if (!check_valid_pointer(s, page, object)) { 1000 slab_err(s, page, "Invalid object pointer 0x%p", object); 1001 goto fail; 1002 } 1003 1004 if (on_freelist(s, page, object)) { 1005 object_err(s, page, object, "Object already free"); 1006 goto fail; 1007 } 1008 1009 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 1010 return 0; 1011 1012 if (unlikely(s != page->slab)) { 1013 if (!PageSlab(page)) { 1014 slab_err(s, page, "Attempt to free object(0x%p) " 1015 "outside of slab", object); 1016 } else if (!page->slab) { 1017 printk(KERN_ERR 1018 "SLUB <none>: no slab for object 0x%p.\n", 1019 object); 1020 dump_stack(); 1021 } else 1022 object_err(s, page, object, 1023 "page slab pointer corrupt."); 1024 goto fail; 1025 } 1026 1027 /* Special debug activities for freeing objects */ 1028 if (!PageSlubFrozen(page) && !page->freelist) 1029 remove_full(s, page); 1030 if (s->flags & SLAB_STORE_USER) 1031 set_track(s, object, TRACK_FREE, addr); 1032 trace(s, page, object, 0); 1033 init_object(s, object, SLUB_RED_INACTIVE); 1034 return 1; 1035 1036 fail: 1037 slab_fix(s, "Object at 0x%p not freed", object); 1038 return 0; 1039 } 1040 1041 static int __init setup_slub_debug(char *str) 1042 { 1043 slub_debug = DEBUG_DEFAULT_FLAGS; 1044 if (*str++ != '=' || !*str) 1045 /* 1046 * No options specified. Switch on full debugging. 1047 */ 1048 goto out; 1049 1050 if (*str == ',') 1051 /* 1052 * No options but restriction on slabs. This means full 1053 * debugging for slabs matching a pattern. 1054 */ 1055 goto check_slabs; 1056 1057 if (tolower(*str) == 'o') { 1058 /* 1059 * Avoid enabling debugging on caches if its minimum order 1060 * would increase as a result. 1061 */ 1062 disable_higher_order_debug = 1; 1063 goto out; 1064 } 1065 1066 slub_debug = 0; 1067 if (*str == '-') 1068 /* 1069 * Switch off all debugging measures. 1070 */ 1071 goto out; 1072 1073 /* 1074 * Determine which debug features should be switched on 1075 */ 1076 for (; *str && *str != ','; str++) { 1077 switch (tolower(*str)) { 1078 case 'f': 1079 slub_debug |= SLAB_DEBUG_FREE; 1080 break; 1081 case 'z': 1082 slub_debug |= SLAB_RED_ZONE; 1083 break; 1084 case 'p': 1085 slub_debug |= SLAB_POISON; 1086 break; 1087 case 'u': 1088 slub_debug |= SLAB_STORE_USER; 1089 break; 1090 case 't': 1091 slub_debug |= SLAB_TRACE; 1092 break; 1093 case 'a': 1094 slub_debug |= SLAB_FAILSLAB; 1095 break; 1096 default: 1097 printk(KERN_ERR "slub_debug option '%c' " 1098 "unknown. skipped\n", *str); 1099 } 1100 } 1101 1102 check_slabs: 1103 if (*str == ',') 1104 slub_debug_slabs = str + 1; 1105 out: 1106 return 1; 1107 } 1108 1109 __setup("slub_debug", setup_slub_debug); 1110 1111 static unsigned long kmem_cache_flags(unsigned long objsize, 1112 unsigned long flags, const char *name, 1113 void (*ctor)(void *)) 1114 { 1115 /* 1116 * Enable debugging if selected on the kernel commandline. 1117 */ 1118 if (slub_debug && (!slub_debug_slabs || 1119 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) 1120 flags |= slub_debug; 1121 1122 return flags; 1123 } 1124 #else 1125 static inline void setup_object_debug(struct kmem_cache *s, 1126 struct page *page, void *object) {} 1127 1128 static inline int alloc_debug_processing(struct kmem_cache *s, 1129 struct page *page, void *object, unsigned long addr) { return 0; } 1130 1131 static inline int free_debug_processing(struct kmem_cache *s, 1132 struct page *page, void *object, unsigned long addr) { return 0; } 1133 1134 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1135 { return 1; } 1136 static inline int check_object(struct kmem_cache *s, struct page *page, 1137 void *object, u8 val) { return 1; } 1138 static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 1139 static inline unsigned long kmem_cache_flags(unsigned long objsize, 1140 unsigned long flags, const char *name, 1141 void (*ctor)(void *)) 1142 { 1143 return flags; 1144 } 1145 #define slub_debug 0 1146 1147 #define disable_higher_order_debug 0 1148 1149 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1150 { return 0; } 1151 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1152 { return 0; } 1153 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1154 int objects) {} 1155 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1156 int objects) {} 1157 1158 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 1159 { return 0; } 1160 1161 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 1162 void *object) {} 1163 1164 static inline void slab_free_hook(struct kmem_cache *s, void *x) {} 1165 1166 #endif /* CONFIG_SLUB_DEBUG */ 1167 1168 /* 1169 * Slab allocation and freeing 1170 */ 1171 static inline struct page *alloc_slab_page(gfp_t flags, int node, 1172 struct kmem_cache_order_objects oo) 1173 { 1174 int order = oo_order(oo); 1175 1176 flags |= __GFP_NOTRACK; 1177 1178 if (node == NUMA_NO_NODE) 1179 return alloc_pages(flags, order); 1180 else 1181 return alloc_pages_exact_node(node, flags, order); 1182 } 1183 1184 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1185 { 1186 struct page *page; 1187 struct kmem_cache_order_objects oo = s->oo; 1188 gfp_t alloc_gfp; 1189 1190 flags |= s->allocflags; 1191 1192 /* 1193 * Let the initial higher-order allocation fail under memory pressure 1194 * so we fall-back to the minimum order allocation. 1195 */ 1196 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1197 1198 page = alloc_slab_page(alloc_gfp, node, oo); 1199 if (unlikely(!page)) { 1200 oo = s->min; 1201 /* 1202 * Allocation may have failed due to fragmentation. 1203 * Try a lower order alloc if possible 1204 */ 1205 page = alloc_slab_page(flags, node, oo); 1206 if (!page) 1207 return NULL; 1208 1209 stat(s, ORDER_FALLBACK); 1210 } 1211 1212 if (kmemcheck_enabled 1213 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { 1214 int pages = 1 << oo_order(oo); 1215 1216 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); 1217 1218 /* 1219 * Objects from caches that have a constructor don't get 1220 * cleared when they're allocated, so we need to do it here. 1221 */ 1222 if (s->ctor) 1223 kmemcheck_mark_uninitialized_pages(page, pages); 1224 else 1225 kmemcheck_mark_unallocated_pages(page, pages); 1226 } 1227 1228 page->objects = oo_objects(oo); 1229 mod_zone_page_state(page_zone(page), 1230 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1231 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1232 1 << oo_order(oo)); 1233 1234 return page; 1235 } 1236 1237 static void setup_object(struct kmem_cache *s, struct page *page, 1238 void *object) 1239 { 1240 setup_object_debug(s, page, object); 1241 if (unlikely(s->ctor)) 1242 s->ctor(object); 1243 } 1244 1245 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1246 { 1247 struct page *page; 1248 void *start; 1249 void *last; 1250 void *p; 1251 1252 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1253 1254 page = allocate_slab(s, 1255 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1256 if (!page) 1257 goto out; 1258 1259 inc_slabs_node(s, page_to_nid(page), page->objects); 1260 page->slab = s; 1261 page->flags |= 1 << PG_slab; 1262 1263 start = page_address(page); 1264 1265 if (unlikely(s->flags & SLAB_POISON)) 1266 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1267 1268 last = start; 1269 for_each_object(p, s, start, page->objects) { 1270 setup_object(s, page, last); 1271 set_freepointer(s, last, p); 1272 last = p; 1273 } 1274 setup_object(s, page, last); 1275 set_freepointer(s, last, NULL); 1276 1277 page->freelist = start; 1278 page->inuse = 0; 1279 out: 1280 return page; 1281 } 1282 1283 static void __free_slab(struct kmem_cache *s, struct page *page) 1284 { 1285 int order = compound_order(page); 1286 int pages = 1 << order; 1287 1288 if (kmem_cache_debug(s)) { 1289 void *p; 1290 1291 slab_pad_check(s, page); 1292 for_each_object(p, s, page_address(page), 1293 page->objects) 1294 check_object(s, page, p, SLUB_RED_INACTIVE); 1295 } 1296 1297 kmemcheck_free_shadow(page, compound_order(page)); 1298 1299 mod_zone_page_state(page_zone(page), 1300 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1301 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1302 -pages); 1303 1304 __ClearPageSlab(page); 1305 reset_page_mapcount(page); 1306 if (current->reclaim_state) 1307 current->reclaim_state->reclaimed_slab += pages; 1308 __free_pages(page, order); 1309 } 1310 1311 #define need_reserve_slab_rcu \ 1312 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) 1313 1314 static void rcu_free_slab(struct rcu_head *h) 1315 { 1316 struct page *page; 1317 1318 if (need_reserve_slab_rcu) 1319 page = virt_to_head_page(h); 1320 else 1321 page = container_of((struct list_head *)h, struct page, lru); 1322 1323 __free_slab(page->slab, page); 1324 } 1325 1326 static void free_slab(struct kmem_cache *s, struct page *page) 1327 { 1328 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1329 struct rcu_head *head; 1330 1331 if (need_reserve_slab_rcu) { 1332 int order = compound_order(page); 1333 int offset = (PAGE_SIZE << order) - s->reserved; 1334 1335 VM_BUG_ON(s->reserved != sizeof(*head)); 1336 head = page_address(page) + offset; 1337 } else { 1338 /* 1339 * RCU free overloads the RCU head over the LRU 1340 */ 1341 head = (void *)&page->lru; 1342 } 1343 1344 call_rcu(head, rcu_free_slab); 1345 } else 1346 __free_slab(s, page); 1347 } 1348 1349 static void discard_slab(struct kmem_cache *s, struct page *page) 1350 { 1351 dec_slabs_node(s, page_to_nid(page), page->objects); 1352 free_slab(s, page); 1353 } 1354 1355 /* 1356 * Per slab locking using the pagelock 1357 */ 1358 static __always_inline void slab_lock(struct page *page) 1359 { 1360 bit_spin_lock(PG_locked, &page->flags); 1361 } 1362 1363 static __always_inline void slab_unlock(struct page *page) 1364 { 1365 __bit_spin_unlock(PG_locked, &page->flags); 1366 } 1367 1368 static __always_inline int slab_trylock(struct page *page) 1369 { 1370 int rc = 1; 1371 1372 rc = bit_spin_trylock(PG_locked, &page->flags); 1373 return rc; 1374 } 1375 1376 /* 1377 * Management of partially allocated slabs 1378 */ 1379 static void add_partial(struct kmem_cache_node *n, 1380 struct page *page, int tail) 1381 { 1382 spin_lock(&n->list_lock); 1383 n->nr_partial++; 1384 if (tail) 1385 list_add_tail(&page->lru, &n->partial); 1386 else 1387 list_add(&page->lru, &n->partial); 1388 spin_unlock(&n->list_lock); 1389 } 1390 1391 static inline void __remove_partial(struct kmem_cache_node *n, 1392 struct page *page) 1393 { 1394 list_del(&page->lru); 1395 n->nr_partial--; 1396 } 1397 1398 static void remove_partial(struct kmem_cache *s, struct page *page) 1399 { 1400 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1401 1402 spin_lock(&n->list_lock); 1403 __remove_partial(n, page); 1404 spin_unlock(&n->list_lock); 1405 } 1406 1407 /* 1408 * Lock slab and remove from the partial list. 1409 * 1410 * Must hold list_lock. 1411 */ 1412 static inline int lock_and_freeze_slab(struct kmem_cache_node *n, 1413 struct page *page) 1414 { 1415 if (slab_trylock(page)) { 1416 __remove_partial(n, page); 1417 __SetPageSlubFrozen(page); 1418 return 1; 1419 } 1420 return 0; 1421 } 1422 1423 /* 1424 * Try to allocate a partial slab from a specific node. 1425 */ 1426 static struct page *get_partial_node(struct kmem_cache_node *n) 1427 { 1428 struct page *page; 1429 1430 /* 1431 * Racy check. If we mistakenly see no partial slabs then we 1432 * just allocate an empty slab. If we mistakenly try to get a 1433 * partial slab and there is none available then get_partials() 1434 * will return NULL. 1435 */ 1436 if (!n || !n->nr_partial) 1437 return NULL; 1438 1439 spin_lock(&n->list_lock); 1440 list_for_each_entry(page, &n->partial, lru) 1441 if (lock_and_freeze_slab(n, page)) 1442 goto out; 1443 page = NULL; 1444 out: 1445 spin_unlock(&n->list_lock); 1446 return page; 1447 } 1448 1449 /* 1450 * Get a page from somewhere. Search in increasing NUMA distances. 1451 */ 1452 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) 1453 { 1454 #ifdef CONFIG_NUMA 1455 struct zonelist *zonelist; 1456 struct zoneref *z; 1457 struct zone *zone; 1458 enum zone_type high_zoneidx = gfp_zone(flags); 1459 struct page *page; 1460 1461 /* 1462 * The defrag ratio allows a configuration of the tradeoffs between 1463 * inter node defragmentation and node local allocations. A lower 1464 * defrag_ratio increases the tendency to do local allocations 1465 * instead of attempting to obtain partial slabs from other nodes. 1466 * 1467 * If the defrag_ratio is set to 0 then kmalloc() always 1468 * returns node local objects. If the ratio is higher then kmalloc() 1469 * may return off node objects because partial slabs are obtained 1470 * from other nodes and filled up. 1471 * 1472 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes 1473 * defrag_ratio = 1000) then every (well almost) allocation will 1474 * first attempt to defrag slab caches on other nodes. This means 1475 * scanning over all nodes to look for partial slabs which may be 1476 * expensive if we do it every time we are trying to find a slab 1477 * with available objects. 1478 */ 1479 if (!s->remote_node_defrag_ratio || 1480 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1481 return NULL; 1482 1483 get_mems_allowed(); 1484 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1485 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1486 struct kmem_cache_node *n; 1487 1488 n = get_node(s, zone_to_nid(zone)); 1489 1490 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1491 n->nr_partial > s->min_partial) { 1492 page = get_partial_node(n); 1493 if (page) { 1494 put_mems_allowed(); 1495 return page; 1496 } 1497 } 1498 } 1499 put_mems_allowed(); 1500 #endif 1501 return NULL; 1502 } 1503 1504 /* 1505 * Get a partial page, lock it and return it. 1506 */ 1507 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1508 { 1509 struct page *page; 1510 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; 1511 1512 page = get_partial_node(get_node(s, searchnode)); 1513 if (page || node != NUMA_NO_NODE) 1514 return page; 1515 1516 return get_any_partial(s, flags); 1517 } 1518 1519 /* 1520 * Move a page back to the lists. 1521 * 1522 * Must be called with the slab lock held. 1523 * 1524 * On exit the slab lock will have been dropped. 1525 */ 1526 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1527 __releases(bitlock) 1528 { 1529 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1530 1531 __ClearPageSlubFrozen(page); 1532 if (page->inuse) { 1533 1534 if (page->freelist) { 1535 add_partial(n, page, tail); 1536 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1537 } else { 1538 stat(s, DEACTIVATE_FULL); 1539 if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) 1540 add_full(n, page); 1541 } 1542 slab_unlock(page); 1543 } else { 1544 stat(s, DEACTIVATE_EMPTY); 1545 if (n->nr_partial < s->min_partial) { 1546 /* 1547 * Adding an empty slab to the partial slabs in order 1548 * to avoid page allocator overhead. This slab needs 1549 * to come after the other slabs with objects in 1550 * so that the others get filled first. That way the 1551 * size of the partial list stays small. 1552 * 1553 * kmem_cache_shrink can reclaim any empty slabs from 1554 * the partial list. 1555 */ 1556 add_partial(n, page, 1); 1557 slab_unlock(page); 1558 } else { 1559 slab_unlock(page); 1560 stat(s, FREE_SLAB); 1561 discard_slab(s, page); 1562 } 1563 } 1564 } 1565 1566 #ifdef CONFIG_PREEMPT 1567 /* 1568 * Calculate the next globally unique transaction for disambiguiation 1569 * during cmpxchg. The transactions start with the cpu number and are then 1570 * incremented by CONFIG_NR_CPUS. 1571 */ 1572 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 1573 #else 1574 /* 1575 * No preemption supported therefore also no need to check for 1576 * different cpus. 1577 */ 1578 #define TID_STEP 1 1579 #endif 1580 1581 static inline unsigned long next_tid(unsigned long tid) 1582 { 1583 return tid + TID_STEP; 1584 } 1585 1586 static inline unsigned int tid_to_cpu(unsigned long tid) 1587 { 1588 return tid % TID_STEP; 1589 } 1590 1591 static inline unsigned long tid_to_event(unsigned long tid) 1592 { 1593 return tid / TID_STEP; 1594 } 1595 1596 static inline unsigned int init_tid(int cpu) 1597 { 1598 return cpu; 1599 } 1600 1601 static inline void note_cmpxchg_failure(const char *n, 1602 const struct kmem_cache *s, unsigned long tid) 1603 { 1604 #ifdef SLUB_DEBUG_CMPXCHG 1605 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 1606 1607 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name); 1608 1609 #ifdef CONFIG_PREEMPT 1610 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 1611 printk("due to cpu change %d -> %d\n", 1612 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 1613 else 1614 #endif 1615 if (tid_to_event(tid) != tid_to_event(actual_tid)) 1616 printk("due to cpu running other code. Event %ld->%ld\n", 1617 tid_to_event(tid), tid_to_event(actual_tid)); 1618 else 1619 printk("for unknown reason: actual=%lx was=%lx target=%lx\n", 1620 actual_tid, tid, next_tid(tid)); 1621 #endif 1622 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 1623 } 1624 1625 void init_kmem_cache_cpus(struct kmem_cache *s) 1626 { 1627 int cpu; 1628 1629 for_each_possible_cpu(cpu) 1630 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); 1631 } 1632 /* 1633 * Remove the cpu slab 1634 */ 1635 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1636 __releases(bitlock) 1637 { 1638 struct page *page = c->page; 1639 int tail = 1; 1640 1641 if (page->freelist) 1642 stat(s, DEACTIVATE_REMOTE_FREES); 1643 /* 1644 * Merge cpu freelist into slab freelist. Typically we get here 1645 * because both freelists are empty. So this is unlikely 1646 * to occur. 1647 */ 1648 while (unlikely(c->freelist)) { 1649 void **object; 1650 1651 tail = 0; /* Hot objects. Put the slab first */ 1652 1653 /* Retrieve object from cpu_freelist */ 1654 object = c->freelist; 1655 c->freelist = get_freepointer(s, c->freelist); 1656 1657 /* And put onto the regular freelist */ 1658 set_freepointer(s, object, page->freelist); 1659 page->freelist = object; 1660 page->inuse--; 1661 } 1662 c->page = NULL; 1663 c->tid = next_tid(c->tid); 1664 unfreeze_slab(s, page, tail); 1665 } 1666 1667 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1668 { 1669 stat(s, CPUSLAB_FLUSH); 1670 slab_lock(c->page); 1671 deactivate_slab(s, c); 1672 } 1673 1674 /* 1675 * Flush cpu slab. 1676 * 1677 * Called from IPI handler with interrupts disabled. 1678 */ 1679 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 1680 { 1681 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 1682 1683 if (likely(c && c->page)) 1684 flush_slab(s, c); 1685 } 1686 1687 static void flush_cpu_slab(void *d) 1688 { 1689 struct kmem_cache *s = d; 1690 1691 __flush_cpu_slab(s, smp_processor_id()); 1692 } 1693 1694 static void flush_all(struct kmem_cache *s) 1695 { 1696 on_each_cpu(flush_cpu_slab, s, 1); 1697 } 1698 1699 /* 1700 * Check if the objects in a per cpu structure fit numa 1701 * locality expectations. 1702 */ 1703 static inline int node_match(struct kmem_cache_cpu *c, int node) 1704 { 1705 #ifdef CONFIG_NUMA 1706 if (node != NUMA_NO_NODE && c->node != node) 1707 return 0; 1708 #endif 1709 return 1; 1710 } 1711 1712 static int count_free(struct page *page) 1713 { 1714 return page->objects - page->inuse; 1715 } 1716 1717 static unsigned long count_partial(struct kmem_cache_node *n, 1718 int (*get_count)(struct page *)) 1719 { 1720 unsigned long flags; 1721 unsigned long x = 0; 1722 struct page *page; 1723 1724 spin_lock_irqsave(&n->list_lock, flags); 1725 list_for_each_entry(page, &n->partial, lru) 1726 x += get_count(page); 1727 spin_unlock_irqrestore(&n->list_lock, flags); 1728 return x; 1729 } 1730 1731 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 1732 { 1733 #ifdef CONFIG_SLUB_DEBUG 1734 return atomic_long_read(&n->total_objects); 1735 #else 1736 return 0; 1737 #endif 1738 } 1739 1740 static noinline void 1741 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 1742 { 1743 int node; 1744 1745 printk(KERN_WARNING 1746 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", 1747 nid, gfpflags); 1748 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " 1749 "default order: %d, min order: %d\n", s->name, s->objsize, 1750 s->size, oo_order(s->oo), oo_order(s->min)); 1751 1752 if (oo_order(s->min) > get_order(s->objsize)) 1753 printk(KERN_WARNING " %s debugging increased min order, use " 1754 "slub_debug=O to disable.\n", s->name); 1755 1756 for_each_online_node(node) { 1757 struct kmem_cache_node *n = get_node(s, node); 1758 unsigned long nr_slabs; 1759 unsigned long nr_objs; 1760 unsigned long nr_free; 1761 1762 if (!n) 1763 continue; 1764 1765 nr_free = count_partial(n, count_free); 1766 nr_slabs = node_nr_slabs(n); 1767 nr_objs = node_nr_objs(n); 1768 1769 printk(KERN_WARNING 1770 " node %d: slabs: %ld, objs: %ld, free: %ld\n", 1771 node, nr_slabs, nr_objs, nr_free); 1772 } 1773 } 1774 1775 /* 1776 * Slow path. The lockless freelist is empty or we need to perform 1777 * debugging duties. 1778 * 1779 * Interrupts are disabled. 1780 * 1781 * Processing is still very fast if new objects have been freed to the 1782 * regular freelist. In that case we simply take over the regular freelist 1783 * as the lockless freelist and zap the regular freelist. 1784 * 1785 * If that is not working then we fall back to the partial lists. We take the 1786 * first element of the freelist as the object to allocate now and move the 1787 * rest of the freelist to the lockless freelist. 1788 * 1789 * And if we were unable to get a new slab from the partial slab lists then 1790 * we need to allocate a new slab. This is the slowest path since it involves 1791 * a call to the page allocator and the setup of a new slab. 1792 */ 1793 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 1794 unsigned long addr, struct kmem_cache_cpu *c) 1795 { 1796 void **object; 1797 struct page *page; 1798 unsigned long flags; 1799 1800 local_irq_save(flags); 1801 #ifdef CONFIG_PREEMPT 1802 /* 1803 * We may have been preempted and rescheduled on a different 1804 * cpu before disabling interrupts. Need to reload cpu area 1805 * pointer. 1806 */ 1807 c = this_cpu_ptr(s->cpu_slab); 1808 #endif 1809 1810 /* We handle __GFP_ZERO in the caller */ 1811 gfpflags &= ~__GFP_ZERO; 1812 1813 page = c->page; 1814 if (!page) 1815 goto new_slab; 1816 1817 slab_lock(page); 1818 if (unlikely(!node_match(c, node))) 1819 goto another_slab; 1820 1821 stat(s, ALLOC_REFILL); 1822 1823 load_freelist: 1824 object = page->freelist; 1825 if (unlikely(!object)) 1826 goto another_slab; 1827 if (kmem_cache_debug(s)) 1828 goto debug; 1829 1830 c->freelist = get_freepointer(s, object); 1831 page->inuse = page->objects; 1832 page->freelist = NULL; 1833 1834 slab_unlock(page); 1835 c->tid = next_tid(c->tid); 1836 local_irq_restore(flags); 1837 stat(s, ALLOC_SLOWPATH); 1838 return object; 1839 1840 another_slab: 1841 deactivate_slab(s, c); 1842 1843 new_slab: 1844 page = get_partial(s, gfpflags, node); 1845 if (page) { 1846 stat(s, ALLOC_FROM_PARTIAL); 1847 c->node = page_to_nid(page); 1848 c->page = page; 1849 goto load_freelist; 1850 } 1851 1852 gfpflags &= gfp_allowed_mask; 1853 if (gfpflags & __GFP_WAIT) 1854 local_irq_enable(); 1855 1856 page = new_slab(s, gfpflags, node); 1857 1858 if (gfpflags & __GFP_WAIT) 1859 local_irq_disable(); 1860 1861 if (page) { 1862 c = __this_cpu_ptr(s->cpu_slab); 1863 stat(s, ALLOC_SLAB); 1864 if (c->page) 1865 flush_slab(s, c); 1866 1867 slab_lock(page); 1868 __SetPageSlubFrozen(page); 1869 c->node = page_to_nid(page); 1870 c->page = page; 1871 goto load_freelist; 1872 } 1873 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) 1874 slab_out_of_memory(s, gfpflags, node); 1875 local_irq_restore(flags); 1876 return NULL; 1877 debug: 1878 if (!alloc_debug_processing(s, page, object, addr)) 1879 goto another_slab; 1880 1881 page->inuse++; 1882 page->freelist = get_freepointer(s, object); 1883 deactivate_slab(s, c); 1884 c->page = NULL; 1885 c->node = NUMA_NO_NODE; 1886 local_irq_restore(flags); 1887 return object; 1888 } 1889 1890 /* 1891 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 1892 * have the fastpath folded into their functions. So no function call 1893 * overhead for requests that can be satisfied on the fastpath. 1894 * 1895 * The fastpath works by first checking if the lockless freelist can be used. 1896 * If not then __slab_alloc is called for slow processing. 1897 * 1898 * Otherwise we can simply pick the next object from the lockless free list. 1899 */ 1900 static __always_inline void *slab_alloc(struct kmem_cache *s, 1901 gfp_t gfpflags, int node, unsigned long addr) 1902 { 1903 void **object; 1904 struct kmem_cache_cpu *c; 1905 unsigned long tid; 1906 1907 if (slab_pre_alloc_hook(s, gfpflags)) 1908 return NULL; 1909 1910 redo: 1911 1912 /* 1913 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 1914 * enabled. We may switch back and forth between cpus while 1915 * reading from one cpu area. That does not matter as long 1916 * as we end up on the original cpu again when doing the cmpxchg. 1917 */ 1918 c = __this_cpu_ptr(s->cpu_slab); 1919 1920 /* 1921 * The transaction ids are globally unique per cpu and per operation on 1922 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 1923 * occurs on the right processor and that there was no operation on the 1924 * linked list in between. 1925 */ 1926 tid = c->tid; 1927 barrier(); 1928 1929 object = c->freelist; 1930 if (unlikely(!object || !node_match(c, node))) 1931 1932 object = __slab_alloc(s, gfpflags, node, addr, c); 1933 1934 else { 1935 /* 1936 * The cmpxchg will only match if there was no additional 1937 * operation and if we are on the right processor. 1938 * 1939 * The cmpxchg does the following atomically (without lock semantics!) 1940 * 1. Relocate first pointer to the current per cpu area. 1941 * 2. Verify that tid and freelist have not been changed 1942 * 3. If they were not changed replace tid and freelist 1943 * 1944 * Since this is without lock semantics the protection is only against 1945 * code executing on this cpu *not* from access by other cpus. 1946 */ 1947 if (unlikely(!irqsafe_cpu_cmpxchg_double( 1948 s->cpu_slab->freelist, s->cpu_slab->tid, 1949 object, tid, 1950 get_freepointer_safe(s, object), next_tid(tid)))) { 1951 1952 note_cmpxchg_failure("slab_alloc", s, tid); 1953 goto redo; 1954 } 1955 stat(s, ALLOC_FASTPATH); 1956 } 1957 1958 if (unlikely(gfpflags & __GFP_ZERO) && object) 1959 memset(object, 0, s->objsize); 1960 1961 slab_post_alloc_hook(s, gfpflags, object); 1962 1963 return object; 1964 } 1965 1966 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1967 { 1968 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 1969 1970 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); 1971 1972 return ret; 1973 } 1974 EXPORT_SYMBOL(kmem_cache_alloc); 1975 1976 #ifdef CONFIG_TRACING 1977 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 1978 { 1979 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 1980 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 1981 return ret; 1982 } 1983 EXPORT_SYMBOL(kmem_cache_alloc_trace); 1984 1985 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 1986 { 1987 void *ret = kmalloc_order(size, flags, order); 1988 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); 1989 return ret; 1990 } 1991 EXPORT_SYMBOL(kmalloc_order_trace); 1992 #endif 1993 1994 #ifdef CONFIG_NUMA 1995 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1996 { 1997 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 1998 1999 trace_kmem_cache_alloc_node(_RET_IP_, ret, 2000 s->objsize, s->size, gfpflags, node); 2001 2002 return ret; 2003 } 2004 EXPORT_SYMBOL(kmem_cache_alloc_node); 2005 2006 #ifdef CONFIG_TRACING 2007 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 2008 gfp_t gfpflags, 2009 int node, size_t size) 2010 { 2011 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 2012 2013 trace_kmalloc_node(_RET_IP_, ret, 2014 size, s->size, gfpflags, node); 2015 return ret; 2016 } 2017 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 2018 #endif 2019 #endif 2020 2021 /* 2022 * Slow patch handling. This may still be called frequently since objects 2023 * have a longer lifetime than the cpu slabs in most processing loads. 2024 * 2025 * So we still attempt to reduce cache line usage. Just take the slab 2026 * lock and free the item. If there is no additional partial page 2027 * handling required then we can return immediately. 2028 */ 2029 static void __slab_free(struct kmem_cache *s, struct page *page, 2030 void *x, unsigned long addr) 2031 { 2032 void *prior; 2033 void **object = (void *)x; 2034 unsigned long flags; 2035 2036 local_irq_save(flags); 2037 slab_lock(page); 2038 stat(s, FREE_SLOWPATH); 2039 2040 if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) 2041 goto out_unlock; 2042 2043 prior = page->freelist; 2044 set_freepointer(s, object, prior); 2045 page->freelist = object; 2046 page->inuse--; 2047 2048 if (unlikely(PageSlubFrozen(page))) { 2049 stat(s, FREE_FROZEN); 2050 goto out_unlock; 2051 } 2052 2053 if (unlikely(!page->inuse)) 2054 goto slab_empty; 2055 2056 /* 2057 * Objects left in the slab. If it was not on the partial list before 2058 * then add it. 2059 */ 2060 if (unlikely(!prior)) { 2061 add_partial(get_node(s, page_to_nid(page)), page, 1); 2062 stat(s, FREE_ADD_PARTIAL); 2063 } 2064 2065 out_unlock: 2066 slab_unlock(page); 2067 local_irq_restore(flags); 2068 return; 2069 2070 slab_empty: 2071 if (prior) { 2072 /* 2073 * Slab still on the partial list. 2074 */ 2075 remove_partial(s, page); 2076 stat(s, FREE_REMOVE_PARTIAL); 2077 } 2078 slab_unlock(page); 2079 local_irq_restore(flags); 2080 stat(s, FREE_SLAB); 2081 discard_slab(s, page); 2082 } 2083 2084 /* 2085 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 2086 * can perform fastpath freeing without additional function calls. 2087 * 2088 * The fastpath is only possible if we are freeing to the current cpu slab 2089 * of this processor. This typically the case if we have just allocated 2090 * the item before. 2091 * 2092 * If fastpath is not possible then fall back to __slab_free where we deal 2093 * with all sorts of special processing. 2094 */ 2095 static __always_inline void slab_free(struct kmem_cache *s, 2096 struct page *page, void *x, unsigned long addr) 2097 { 2098 void **object = (void *)x; 2099 struct kmem_cache_cpu *c; 2100 unsigned long tid; 2101 2102 slab_free_hook(s, x); 2103 2104 redo: 2105 2106 /* 2107 * Determine the currently cpus per cpu slab. 2108 * The cpu may change afterward. However that does not matter since 2109 * data is retrieved via this pointer. If we are on the same cpu 2110 * during the cmpxchg then the free will succedd. 2111 */ 2112 c = __this_cpu_ptr(s->cpu_slab); 2113 2114 tid = c->tid; 2115 barrier(); 2116 2117 if (likely(page == c->page)) { 2118 set_freepointer(s, object, c->freelist); 2119 2120 if (unlikely(!irqsafe_cpu_cmpxchg_double( 2121 s->cpu_slab->freelist, s->cpu_slab->tid, 2122 c->freelist, tid, 2123 object, next_tid(tid)))) { 2124 2125 note_cmpxchg_failure("slab_free", s, tid); 2126 goto redo; 2127 } 2128 stat(s, FREE_FASTPATH); 2129 } else 2130 __slab_free(s, page, x, addr); 2131 2132 } 2133 2134 void kmem_cache_free(struct kmem_cache *s, void *x) 2135 { 2136 struct page *page; 2137 2138 page = virt_to_head_page(x); 2139 2140 slab_free(s, page, x, _RET_IP_); 2141 2142 trace_kmem_cache_free(_RET_IP_, x); 2143 } 2144 EXPORT_SYMBOL(kmem_cache_free); 2145 2146 /* 2147 * Object placement in a slab is made very easy because we always start at 2148 * offset 0. If we tune the size of the object to the alignment then we can 2149 * get the required alignment by putting one properly sized object after 2150 * another. 2151 * 2152 * Notice that the allocation order determines the sizes of the per cpu 2153 * caches. Each processor has always one slab available for allocations. 2154 * Increasing the allocation order reduces the number of times that slabs 2155 * must be moved on and off the partial lists and is therefore a factor in 2156 * locking overhead. 2157 */ 2158 2159 /* 2160 * Mininum / Maximum order of slab pages. This influences locking overhead 2161 * and slab fragmentation. A higher order reduces the number of partial slabs 2162 * and increases the number of allocations possible without having to 2163 * take the list_lock. 2164 */ 2165 static int slub_min_order; 2166 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 2167 static int slub_min_objects; 2168 2169 /* 2170 * Merge control. If this is set then no merging of slab caches will occur. 2171 * (Could be removed. This was introduced to pacify the merge skeptics.) 2172 */ 2173 static int slub_nomerge; 2174 2175 /* 2176 * Calculate the order of allocation given an slab object size. 2177 * 2178 * The order of allocation has significant impact on performance and other 2179 * system components. Generally order 0 allocations should be preferred since 2180 * order 0 does not cause fragmentation in the page allocator. Larger objects 2181 * be problematic to put into order 0 slabs because there may be too much 2182 * unused space left. We go to a higher order if more than 1/16th of the slab 2183 * would be wasted. 2184 * 2185 * In order to reach satisfactory performance we must ensure that a minimum 2186 * number of objects is in one slab. Otherwise we may generate too much 2187 * activity on the partial lists which requires taking the list_lock. This is 2188 * less a concern for large slabs though which are rarely used. 2189 * 2190 * slub_max_order specifies the order where we begin to stop considering the 2191 * number of objects in a slab as critical. If we reach slub_max_order then 2192 * we try to keep the page order as low as possible. So we accept more waste 2193 * of space in favor of a small page order. 2194 * 2195 * Higher order allocations also allow the placement of more objects in a 2196 * slab and thereby reduce object handling overhead. If the user has 2197 * requested a higher mininum order then we start with that one instead of 2198 * the smallest order which will fit the object. 2199 */ 2200 static inline int slab_order(int size, int min_objects, 2201 int max_order, int fract_leftover, int reserved) 2202 { 2203 int order; 2204 int rem; 2205 int min_order = slub_min_order; 2206 2207 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE) 2208 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 2209 2210 for (order = max(min_order, 2211 fls(min_objects * size - 1) - PAGE_SHIFT); 2212 order <= max_order; order++) { 2213 2214 unsigned long slab_size = PAGE_SIZE << order; 2215 2216 if (slab_size < min_objects * size + reserved) 2217 continue; 2218 2219 rem = (slab_size - reserved) % size; 2220 2221 if (rem <= slab_size / fract_leftover) 2222 break; 2223 2224 } 2225 2226 return order; 2227 } 2228 2229 static inline int calculate_order(int size, int reserved) 2230 { 2231 int order; 2232 int min_objects; 2233 int fraction; 2234 int max_objects; 2235 2236 /* 2237 * Attempt to find best configuration for a slab. This 2238 * works by first attempting to generate a layout with 2239 * the best configuration and backing off gradually. 2240 * 2241 * First we reduce the acceptable waste in a slab. Then 2242 * we reduce the minimum objects required in a slab. 2243 */ 2244 min_objects = slub_min_objects; 2245 if (!min_objects) 2246 min_objects = 4 * (fls(nr_cpu_ids) + 1); 2247 max_objects = order_objects(slub_max_order, size, reserved); 2248 min_objects = min(min_objects, max_objects); 2249 2250 while (min_objects > 1) { 2251 fraction = 16; 2252 while (fraction >= 4) { 2253 order = slab_order(size, min_objects, 2254 slub_max_order, fraction, reserved); 2255 if (order <= slub_max_order) 2256 return order; 2257 fraction /= 2; 2258 } 2259 min_objects--; 2260 } 2261 2262 /* 2263 * We were unable to place multiple objects in a slab. Now 2264 * lets see if we can place a single object there. 2265 */ 2266 order = slab_order(size, 1, slub_max_order, 1, reserved); 2267 if (order <= slub_max_order) 2268 return order; 2269 2270 /* 2271 * Doh this slab cannot be placed using slub_max_order. 2272 */ 2273 order = slab_order(size, 1, MAX_ORDER, 1, reserved); 2274 if (order < MAX_ORDER) 2275 return order; 2276 return -ENOSYS; 2277 } 2278 2279 /* 2280 * Figure out what the alignment of the objects will be. 2281 */ 2282 static unsigned long calculate_alignment(unsigned long flags, 2283 unsigned long align, unsigned long size) 2284 { 2285 /* 2286 * If the user wants hardware cache aligned objects then follow that 2287 * suggestion if the object is sufficiently large. 2288 * 2289 * The hardware cache alignment cannot override the specified 2290 * alignment though. If that is greater then use it. 2291 */ 2292 if (flags & SLAB_HWCACHE_ALIGN) { 2293 unsigned long ralign = cache_line_size(); 2294 while (size <= ralign / 2) 2295 ralign /= 2; 2296 align = max(align, ralign); 2297 } 2298 2299 if (align < ARCH_SLAB_MINALIGN) 2300 align = ARCH_SLAB_MINALIGN; 2301 2302 return ALIGN(align, sizeof(void *)); 2303 } 2304 2305 static void 2306 init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 2307 { 2308 n->nr_partial = 0; 2309 spin_lock_init(&n->list_lock); 2310 INIT_LIST_HEAD(&n->partial); 2311 #ifdef CONFIG_SLUB_DEBUG 2312 atomic_long_set(&n->nr_slabs, 0); 2313 atomic_long_set(&n->total_objects, 0); 2314 INIT_LIST_HEAD(&n->full); 2315 #endif 2316 } 2317 2318 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 2319 { 2320 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2321 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2322 2323 #ifdef CONFIG_CMPXCHG_LOCAL 2324 /* 2325 * Must align to double word boundary for the double cmpxchg instructions 2326 * to work. 2327 */ 2328 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *)); 2329 #else 2330 /* Regular alignment is sufficient */ 2331 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); 2332 #endif 2333 2334 if (!s->cpu_slab) 2335 return 0; 2336 2337 init_kmem_cache_cpus(s); 2338 2339 return 1; 2340 } 2341 2342 static struct kmem_cache *kmem_cache_node; 2343 2344 /* 2345 * No kmalloc_node yet so do it by hand. We know that this is the first 2346 * slab on the node for this slabcache. There are no concurrent accesses 2347 * possible. 2348 * 2349 * Note that this function only works on the kmalloc_node_cache 2350 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2351 * memory on a fresh node that has no slab structures yet. 2352 */ 2353 static void early_kmem_cache_node_alloc(int node) 2354 { 2355 struct page *page; 2356 struct kmem_cache_node *n; 2357 unsigned long flags; 2358 2359 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 2360 2361 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); 2362 2363 BUG_ON(!page); 2364 if (page_to_nid(page) != node) { 2365 printk(KERN_ERR "SLUB: Unable to allocate memory from " 2366 "node %d\n", node); 2367 printk(KERN_ERR "SLUB: Allocating a useless per node structure " 2368 "in order to be able to continue\n"); 2369 } 2370 2371 n = page->freelist; 2372 BUG_ON(!n); 2373 page->freelist = get_freepointer(kmem_cache_node, n); 2374 page->inuse++; 2375 kmem_cache_node->node[node] = n; 2376 #ifdef CONFIG_SLUB_DEBUG 2377 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 2378 init_tracking(kmem_cache_node, n); 2379 #endif 2380 init_kmem_cache_node(n, kmem_cache_node); 2381 inc_slabs_node(kmem_cache_node, node, page->objects); 2382 2383 /* 2384 * lockdep requires consistent irq usage for each lock 2385 * so even though there cannot be a race this early in 2386 * the boot sequence, we still disable irqs. 2387 */ 2388 local_irq_save(flags); 2389 add_partial(n, page, 0); 2390 local_irq_restore(flags); 2391 } 2392 2393 static void free_kmem_cache_nodes(struct kmem_cache *s) 2394 { 2395 int node; 2396 2397 for_each_node_state(node, N_NORMAL_MEMORY) { 2398 struct kmem_cache_node *n = s->node[node]; 2399 2400 if (n) 2401 kmem_cache_free(kmem_cache_node, n); 2402 2403 s->node[node] = NULL; 2404 } 2405 } 2406 2407 static int init_kmem_cache_nodes(struct kmem_cache *s) 2408 { 2409 int node; 2410 2411 for_each_node_state(node, N_NORMAL_MEMORY) { 2412 struct kmem_cache_node *n; 2413 2414 if (slab_state == DOWN) { 2415 early_kmem_cache_node_alloc(node); 2416 continue; 2417 } 2418 n = kmem_cache_alloc_node(kmem_cache_node, 2419 GFP_KERNEL, node); 2420 2421 if (!n) { 2422 free_kmem_cache_nodes(s); 2423 return 0; 2424 } 2425 2426 s->node[node] = n; 2427 init_kmem_cache_node(n, s); 2428 } 2429 return 1; 2430 } 2431 2432 static void set_min_partial(struct kmem_cache *s, unsigned long min) 2433 { 2434 if (min < MIN_PARTIAL) 2435 min = MIN_PARTIAL; 2436 else if (min > MAX_PARTIAL) 2437 min = MAX_PARTIAL; 2438 s->min_partial = min; 2439 } 2440 2441 /* 2442 * calculate_sizes() determines the order and the distribution of data within 2443 * a slab object. 2444 */ 2445 static int calculate_sizes(struct kmem_cache *s, int forced_order) 2446 { 2447 unsigned long flags = s->flags; 2448 unsigned long size = s->objsize; 2449 unsigned long align = s->align; 2450 int order; 2451 2452 /* 2453 * Round up object size to the next word boundary. We can only 2454 * place the free pointer at word boundaries and this determines 2455 * the possible location of the free pointer. 2456 */ 2457 size = ALIGN(size, sizeof(void *)); 2458 2459 #ifdef CONFIG_SLUB_DEBUG 2460 /* 2461 * Determine if we can poison the object itself. If the user of 2462 * the slab may touch the object after free or before allocation 2463 * then we should never poison the object itself. 2464 */ 2465 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 2466 !s->ctor) 2467 s->flags |= __OBJECT_POISON; 2468 else 2469 s->flags &= ~__OBJECT_POISON; 2470 2471 2472 /* 2473 * If we are Redzoning then check if there is some space between the 2474 * end of the object and the free pointer. If not then add an 2475 * additional word to have some bytes to store Redzone information. 2476 */ 2477 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 2478 size += sizeof(void *); 2479 #endif 2480 2481 /* 2482 * With that we have determined the number of bytes in actual use 2483 * by the object. This is the potential offset to the free pointer. 2484 */ 2485 s->inuse = size; 2486 2487 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 2488 s->ctor)) { 2489 /* 2490 * Relocate free pointer after the object if it is not 2491 * permitted to overwrite the first word of the object on 2492 * kmem_cache_free. 2493 * 2494 * This is the case if we do RCU, have a constructor or 2495 * destructor or are poisoning the objects. 2496 */ 2497 s->offset = size; 2498 size += sizeof(void *); 2499 } 2500 2501 #ifdef CONFIG_SLUB_DEBUG 2502 if (flags & SLAB_STORE_USER) 2503 /* 2504 * Need to store information about allocs and frees after 2505 * the object. 2506 */ 2507 size += 2 * sizeof(struct track); 2508 2509 if (flags & SLAB_RED_ZONE) 2510 /* 2511 * Add some empty padding so that we can catch 2512 * overwrites from earlier objects rather than let 2513 * tracking information or the free pointer be 2514 * corrupted if a user writes before the start 2515 * of the object. 2516 */ 2517 size += sizeof(void *); 2518 #endif 2519 2520 /* 2521 * Determine the alignment based on various parameters that the 2522 * user specified and the dynamic determination of cache line size 2523 * on bootup. 2524 */ 2525 align = calculate_alignment(flags, align, s->objsize); 2526 s->align = align; 2527 2528 /* 2529 * SLUB stores one object immediately after another beginning from 2530 * offset 0. In order to align the objects we have to simply size 2531 * each object to conform to the alignment. 2532 */ 2533 size = ALIGN(size, align); 2534 s->size = size; 2535 if (forced_order >= 0) 2536 order = forced_order; 2537 else 2538 order = calculate_order(size, s->reserved); 2539 2540 if (order < 0) 2541 return 0; 2542 2543 s->allocflags = 0; 2544 if (order) 2545 s->allocflags |= __GFP_COMP; 2546 2547 if (s->flags & SLAB_CACHE_DMA) 2548 s->allocflags |= SLUB_DMA; 2549 2550 if (s->flags & SLAB_RECLAIM_ACCOUNT) 2551 s->allocflags |= __GFP_RECLAIMABLE; 2552 2553 /* 2554 * Determine the number of objects per slab 2555 */ 2556 s->oo = oo_make(order, size, s->reserved); 2557 s->min = oo_make(get_order(size), size, s->reserved); 2558 if (oo_objects(s->oo) > oo_objects(s->max)) 2559 s->max = s->oo; 2560 2561 return !!oo_objects(s->oo); 2562 2563 } 2564 2565 static int kmem_cache_open(struct kmem_cache *s, 2566 const char *name, size_t size, 2567 size_t align, unsigned long flags, 2568 void (*ctor)(void *)) 2569 { 2570 memset(s, 0, kmem_size); 2571 s->name = name; 2572 s->ctor = ctor; 2573 s->objsize = size; 2574 s->align = align; 2575 s->flags = kmem_cache_flags(size, flags, name, ctor); 2576 s->reserved = 0; 2577 2578 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) 2579 s->reserved = sizeof(struct rcu_head); 2580 2581 if (!calculate_sizes(s, -1)) 2582 goto error; 2583 if (disable_higher_order_debug) { 2584 /* 2585 * Disable debugging flags that store metadata if the min slab 2586 * order increased. 2587 */ 2588 if (get_order(s->size) > get_order(s->objsize)) { 2589 s->flags &= ~DEBUG_METADATA_FLAGS; 2590 s->offset = 0; 2591 if (!calculate_sizes(s, -1)) 2592 goto error; 2593 } 2594 } 2595 2596 /* 2597 * The larger the object size is, the more pages we want on the partial 2598 * list to avoid pounding the page allocator excessively. 2599 */ 2600 set_min_partial(s, ilog2(s->size)); 2601 s->refcount = 1; 2602 #ifdef CONFIG_NUMA 2603 s->remote_node_defrag_ratio = 1000; 2604 #endif 2605 if (!init_kmem_cache_nodes(s)) 2606 goto error; 2607 2608 if (alloc_kmem_cache_cpus(s)) 2609 return 1; 2610 2611 free_kmem_cache_nodes(s); 2612 error: 2613 if (flags & SLAB_PANIC) 2614 panic("Cannot create slab %s size=%lu realsize=%u " 2615 "order=%u offset=%u flags=%lx\n", 2616 s->name, (unsigned long)size, s->size, oo_order(s->oo), 2617 s->offset, flags); 2618 return 0; 2619 } 2620 2621 /* 2622 * Determine the size of a slab object 2623 */ 2624 unsigned int kmem_cache_size(struct kmem_cache *s) 2625 { 2626 return s->objsize; 2627 } 2628 EXPORT_SYMBOL(kmem_cache_size); 2629 2630 static void list_slab_objects(struct kmem_cache *s, struct page *page, 2631 const char *text) 2632 { 2633 #ifdef CONFIG_SLUB_DEBUG 2634 void *addr = page_address(page); 2635 void *p; 2636 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * 2637 sizeof(long), GFP_ATOMIC); 2638 if (!map) 2639 return; 2640 slab_err(s, page, "%s", text); 2641 slab_lock(page); 2642 2643 get_map(s, page, map); 2644 for_each_object(p, s, addr, page->objects) { 2645 2646 if (!test_bit(slab_index(p, s, addr), map)) { 2647 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", 2648 p, p - addr); 2649 print_tracking(s, p); 2650 } 2651 } 2652 slab_unlock(page); 2653 kfree(map); 2654 #endif 2655 } 2656 2657 /* 2658 * Attempt to free all partial slabs on a node. 2659 */ 2660 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 2661 { 2662 unsigned long flags; 2663 struct page *page, *h; 2664 2665 spin_lock_irqsave(&n->list_lock, flags); 2666 list_for_each_entry_safe(page, h, &n->partial, lru) { 2667 if (!page->inuse) { 2668 __remove_partial(n, page); 2669 discard_slab(s, page); 2670 } else { 2671 list_slab_objects(s, page, 2672 "Objects remaining on kmem_cache_close()"); 2673 } 2674 } 2675 spin_unlock_irqrestore(&n->list_lock, flags); 2676 } 2677 2678 /* 2679 * Release all resources used by a slab cache. 2680 */ 2681 static inline int kmem_cache_close(struct kmem_cache *s) 2682 { 2683 int node; 2684 2685 flush_all(s); 2686 free_percpu(s->cpu_slab); 2687 /* Attempt to free all objects */ 2688 for_each_node_state(node, N_NORMAL_MEMORY) { 2689 struct kmem_cache_node *n = get_node(s, node); 2690 2691 free_partial(s, n); 2692 if (n->nr_partial || slabs_node(s, node)) 2693 return 1; 2694 } 2695 free_kmem_cache_nodes(s); 2696 return 0; 2697 } 2698 2699 /* 2700 * Close a cache and release the kmem_cache structure 2701 * (must be used for caches created using kmem_cache_create) 2702 */ 2703 void kmem_cache_destroy(struct kmem_cache *s) 2704 { 2705 down_write(&slub_lock); 2706 s->refcount--; 2707 if (!s->refcount) { 2708 list_del(&s->list); 2709 if (kmem_cache_close(s)) { 2710 printk(KERN_ERR "SLUB %s: %s called for cache that " 2711 "still has objects.\n", s->name, __func__); 2712 dump_stack(); 2713 } 2714 if (s->flags & SLAB_DESTROY_BY_RCU) 2715 rcu_barrier(); 2716 sysfs_slab_remove(s); 2717 } 2718 up_write(&slub_lock); 2719 } 2720 EXPORT_SYMBOL(kmem_cache_destroy); 2721 2722 /******************************************************************** 2723 * Kmalloc subsystem 2724 *******************************************************************/ 2725 2726 struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 2727 EXPORT_SYMBOL(kmalloc_caches); 2728 2729 static struct kmem_cache *kmem_cache; 2730 2731 #ifdef CONFIG_ZONE_DMA 2732 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; 2733 #endif 2734 2735 static int __init setup_slub_min_order(char *str) 2736 { 2737 get_option(&str, &slub_min_order); 2738 2739 return 1; 2740 } 2741 2742 __setup("slub_min_order=", setup_slub_min_order); 2743 2744 static int __init setup_slub_max_order(char *str) 2745 { 2746 get_option(&str, &slub_max_order); 2747 slub_max_order = min(slub_max_order, MAX_ORDER - 1); 2748 2749 return 1; 2750 } 2751 2752 __setup("slub_max_order=", setup_slub_max_order); 2753 2754 static int __init setup_slub_min_objects(char *str) 2755 { 2756 get_option(&str, &slub_min_objects); 2757 2758 return 1; 2759 } 2760 2761 __setup("slub_min_objects=", setup_slub_min_objects); 2762 2763 static int __init setup_slub_nomerge(char *str) 2764 { 2765 slub_nomerge = 1; 2766 return 1; 2767 } 2768 2769 __setup("slub_nomerge", setup_slub_nomerge); 2770 2771 static struct kmem_cache *__init create_kmalloc_cache(const char *name, 2772 int size, unsigned int flags) 2773 { 2774 struct kmem_cache *s; 2775 2776 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 2777 2778 /* 2779 * This function is called with IRQs disabled during early-boot on 2780 * single CPU so there's no need to take slub_lock here. 2781 */ 2782 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, 2783 flags, NULL)) 2784 goto panic; 2785 2786 list_add(&s->list, &slab_caches); 2787 return s; 2788 2789 panic: 2790 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2791 return NULL; 2792 } 2793 2794 /* 2795 * Conversion table for small slabs sizes / 8 to the index in the 2796 * kmalloc array. This is necessary for slabs < 192 since we have non power 2797 * of two cache sizes there. The size of larger slabs can be determined using 2798 * fls. 2799 */ 2800 static s8 size_index[24] = { 2801 3, /* 8 */ 2802 4, /* 16 */ 2803 5, /* 24 */ 2804 5, /* 32 */ 2805 6, /* 40 */ 2806 6, /* 48 */ 2807 6, /* 56 */ 2808 6, /* 64 */ 2809 1, /* 72 */ 2810 1, /* 80 */ 2811 1, /* 88 */ 2812 1, /* 96 */ 2813 7, /* 104 */ 2814 7, /* 112 */ 2815 7, /* 120 */ 2816 7, /* 128 */ 2817 2, /* 136 */ 2818 2, /* 144 */ 2819 2, /* 152 */ 2820 2, /* 160 */ 2821 2, /* 168 */ 2822 2, /* 176 */ 2823 2, /* 184 */ 2824 2 /* 192 */ 2825 }; 2826 2827 static inline int size_index_elem(size_t bytes) 2828 { 2829 return (bytes - 1) / 8; 2830 } 2831 2832 static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2833 { 2834 int index; 2835 2836 if (size <= 192) { 2837 if (!size) 2838 return ZERO_SIZE_PTR; 2839 2840 index = size_index[size_index_elem(size)]; 2841 } else 2842 index = fls(size - 1); 2843 2844 #ifdef CONFIG_ZONE_DMA 2845 if (unlikely((flags & SLUB_DMA))) 2846 return kmalloc_dma_caches[index]; 2847 2848 #endif 2849 return kmalloc_caches[index]; 2850 } 2851 2852 void *__kmalloc(size_t size, gfp_t flags) 2853 { 2854 struct kmem_cache *s; 2855 void *ret; 2856 2857 if (unlikely(size > SLUB_MAX_SIZE)) 2858 return kmalloc_large(size, flags); 2859 2860 s = get_slab(size, flags); 2861 2862 if (unlikely(ZERO_OR_NULL_PTR(s))) 2863 return s; 2864 2865 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); 2866 2867 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 2868 2869 return ret; 2870 } 2871 EXPORT_SYMBOL(__kmalloc); 2872 2873 #ifdef CONFIG_NUMA 2874 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2875 { 2876 struct page *page; 2877 void *ptr = NULL; 2878 2879 flags |= __GFP_COMP | __GFP_NOTRACK; 2880 page = alloc_pages_node(node, flags, get_order(size)); 2881 if (page) 2882 ptr = page_address(page); 2883 2884 kmemleak_alloc(ptr, size, 1, flags); 2885 return ptr; 2886 } 2887 2888 void *__kmalloc_node(size_t size, gfp_t flags, int node) 2889 { 2890 struct kmem_cache *s; 2891 void *ret; 2892 2893 if (unlikely(size > SLUB_MAX_SIZE)) { 2894 ret = kmalloc_large_node(size, flags, node); 2895 2896 trace_kmalloc_node(_RET_IP_, ret, 2897 size, PAGE_SIZE << get_order(size), 2898 flags, node); 2899 2900 return ret; 2901 } 2902 2903 s = get_slab(size, flags); 2904 2905 if (unlikely(ZERO_OR_NULL_PTR(s))) 2906 return s; 2907 2908 ret = slab_alloc(s, flags, node, _RET_IP_); 2909 2910 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 2911 2912 return ret; 2913 } 2914 EXPORT_SYMBOL(__kmalloc_node); 2915 #endif 2916 2917 size_t ksize(const void *object) 2918 { 2919 struct page *page; 2920 2921 if (unlikely(object == ZERO_SIZE_PTR)) 2922 return 0; 2923 2924 page = virt_to_head_page(object); 2925 2926 if (unlikely(!PageSlab(page))) { 2927 WARN_ON(!PageCompound(page)); 2928 return PAGE_SIZE << compound_order(page); 2929 } 2930 2931 return slab_ksize(page->slab); 2932 } 2933 EXPORT_SYMBOL(ksize); 2934 2935 void kfree(const void *x) 2936 { 2937 struct page *page; 2938 void *object = (void *)x; 2939 2940 trace_kfree(_RET_IP_, x); 2941 2942 if (unlikely(ZERO_OR_NULL_PTR(x))) 2943 return; 2944 2945 page = virt_to_head_page(x); 2946 if (unlikely(!PageSlab(page))) { 2947 BUG_ON(!PageCompound(page)); 2948 kmemleak_free(x); 2949 put_page(page); 2950 return; 2951 } 2952 slab_free(page->slab, page, object, _RET_IP_); 2953 } 2954 EXPORT_SYMBOL(kfree); 2955 2956 /* 2957 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2958 * the remaining slabs by the number of items in use. The slabs with the 2959 * most items in use come first. New allocations will then fill those up 2960 * and thus they can be removed from the partial lists. 2961 * 2962 * The slabs with the least items are placed last. This results in them 2963 * being allocated from last increasing the chance that the last objects 2964 * are freed in them. 2965 */ 2966 int kmem_cache_shrink(struct kmem_cache *s) 2967 { 2968 int node; 2969 int i; 2970 struct kmem_cache_node *n; 2971 struct page *page; 2972 struct page *t; 2973 int objects = oo_objects(s->max); 2974 struct list_head *slabs_by_inuse = 2975 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); 2976 unsigned long flags; 2977 2978 if (!slabs_by_inuse) 2979 return -ENOMEM; 2980 2981 flush_all(s); 2982 for_each_node_state(node, N_NORMAL_MEMORY) { 2983 n = get_node(s, node); 2984 2985 if (!n->nr_partial) 2986 continue; 2987 2988 for (i = 0; i < objects; i++) 2989 INIT_LIST_HEAD(slabs_by_inuse + i); 2990 2991 spin_lock_irqsave(&n->list_lock, flags); 2992 2993 /* 2994 * Build lists indexed by the items in use in each slab. 2995 * 2996 * Note that concurrent frees may occur while we hold the 2997 * list_lock. page->inuse here is the upper limit. 2998 */ 2999 list_for_each_entry_safe(page, t, &n->partial, lru) { 3000 if (!page->inuse && slab_trylock(page)) { 3001 /* 3002 * Must hold slab lock here because slab_free 3003 * may have freed the last object and be 3004 * waiting to release the slab. 3005 */ 3006 __remove_partial(n, page); 3007 slab_unlock(page); 3008 discard_slab(s, page); 3009 } else { 3010 list_move(&page->lru, 3011 slabs_by_inuse + page->inuse); 3012 } 3013 } 3014 3015 /* 3016 * Rebuild the partial list with the slabs filled up most 3017 * first and the least used slabs at the end. 3018 */ 3019 for (i = objects - 1; i >= 0; i--) 3020 list_splice(slabs_by_inuse + i, n->partial.prev); 3021 3022 spin_unlock_irqrestore(&n->list_lock, flags); 3023 } 3024 3025 kfree(slabs_by_inuse); 3026 return 0; 3027 } 3028 EXPORT_SYMBOL(kmem_cache_shrink); 3029 3030 #if defined(CONFIG_MEMORY_HOTPLUG) 3031 static int slab_mem_going_offline_callback(void *arg) 3032 { 3033 struct kmem_cache *s; 3034 3035 down_read(&slub_lock); 3036 list_for_each_entry(s, &slab_caches, list) 3037 kmem_cache_shrink(s); 3038 up_read(&slub_lock); 3039 3040 return 0; 3041 } 3042 3043 static void slab_mem_offline_callback(void *arg) 3044 { 3045 struct kmem_cache_node *n; 3046 struct kmem_cache *s; 3047 struct memory_notify *marg = arg; 3048 int offline_node; 3049 3050 offline_node = marg->status_change_nid; 3051 3052 /* 3053 * If the node still has available memory. we need kmem_cache_node 3054 * for it yet. 3055 */ 3056 if (offline_node < 0) 3057 return; 3058 3059 down_read(&slub_lock); 3060 list_for_each_entry(s, &slab_caches, list) { 3061 n = get_node(s, offline_node); 3062 if (n) { 3063 /* 3064 * if n->nr_slabs > 0, slabs still exist on the node 3065 * that is going down. We were unable to free them, 3066 * and offline_pages() function shouldn't call this 3067 * callback. So, we must fail. 3068 */ 3069 BUG_ON(slabs_node(s, offline_node)); 3070 3071 s->node[offline_node] = NULL; 3072 kmem_cache_free(kmem_cache_node, n); 3073 } 3074 } 3075 up_read(&slub_lock); 3076 } 3077 3078 static int slab_mem_going_online_callback(void *arg) 3079 { 3080 struct kmem_cache_node *n; 3081 struct kmem_cache *s; 3082 struct memory_notify *marg = arg; 3083 int nid = marg->status_change_nid; 3084 int ret = 0; 3085 3086 /* 3087 * If the node's memory is already available, then kmem_cache_node is 3088 * already created. Nothing to do. 3089 */ 3090 if (nid < 0) 3091 return 0; 3092 3093 /* 3094 * We are bringing a node online. No memory is available yet. We must 3095 * allocate a kmem_cache_node structure in order to bring the node 3096 * online. 3097 */ 3098 down_read(&slub_lock); 3099 list_for_each_entry(s, &slab_caches, list) { 3100 /* 3101 * XXX: kmem_cache_alloc_node will fallback to other nodes 3102 * since memory is not yet available from the node that 3103 * is brought up. 3104 */ 3105 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 3106 if (!n) { 3107 ret = -ENOMEM; 3108 goto out; 3109 } 3110 init_kmem_cache_node(n, s); 3111 s->node[nid] = n; 3112 } 3113 out: 3114 up_read(&slub_lock); 3115 return ret; 3116 } 3117 3118 static int slab_memory_callback(struct notifier_block *self, 3119 unsigned long action, void *arg) 3120 { 3121 int ret = 0; 3122 3123 switch (action) { 3124 case MEM_GOING_ONLINE: 3125 ret = slab_mem_going_online_callback(arg); 3126 break; 3127 case MEM_GOING_OFFLINE: 3128 ret = slab_mem_going_offline_callback(arg); 3129 break; 3130 case MEM_OFFLINE: 3131 case MEM_CANCEL_ONLINE: 3132 slab_mem_offline_callback(arg); 3133 break; 3134 case MEM_ONLINE: 3135 case MEM_CANCEL_OFFLINE: 3136 break; 3137 } 3138 if (ret) 3139 ret = notifier_from_errno(ret); 3140 else 3141 ret = NOTIFY_OK; 3142 return ret; 3143 } 3144 3145 #endif /* CONFIG_MEMORY_HOTPLUG */ 3146 3147 /******************************************************************** 3148 * Basic setup of slabs 3149 *******************************************************************/ 3150 3151 /* 3152 * Used for early kmem_cache structures that were allocated using 3153 * the page allocator 3154 */ 3155 3156 static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) 3157 { 3158 int node; 3159 3160 list_add(&s->list, &slab_caches); 3161 s->refcount = -1; 3162 3163 for_each_node_state(node, N_NORMAL_MEMORY) { 3164 struct kmem_cache_node *n = get_node(s, node); 3165 struct page *p; 3166 3167 if (n) { 3168 list_for_each_entry(p, &n->partial, lru) 3169 p->slab = s; 3170 3171 #ifdef CONFIG_SLUB_DEBUG 3172 list_for_each_entry(p, &n->full, lru) 3173 p->slab = s; 3174 #endif 3175 } 3176 } 3177 } 3178 3179 void __init kmem_cache_init(void) 3180 { 3181 int i; 3182 int caches = 0; 3183 struct kmem_cache *temp_kmem_cache; 3184 int order; 3185 struct kmem_cache *temp_kmem_cache_node; 3186 unsigned long kmalloc_size; 3187 3188 kmem_size = offsetof(struct kmem_cache, node) + 3189 nr_node_ids * sizeof(struct kmem_cache_node *); 3190 3191 /* Allocate two kmem_caches from the page allocator */ 3192 kmalloc_size = ALIGN(kmem_size, cache_line_size()); 3193 order = get_order(2 * kmalloc_size); 3194 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); 3195 3196 /* 3197 * Must first have the slab cache available for the allocations of the 3198 * struct kmem_cache_node's. There is special bootstrap code in 3199 * kmem_cache_open for slab_state == DOWN. 3200 */ 3201 kmem_cache_node = (void *)kmem_cache + kmalloc_size; 3202 3203 kmem_cache_open(kmem_cache_node, "kmem_cache_node", 3204 sizeof(struct kmem_cache_node), 3205 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3206 3207 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3208 3209 /* Able to allocate the per node structures */ 3210 slab_state = PARTIAL; 3211 3212 temp_kmem_cache = kmem_cache; 3213 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, 3214 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3215 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3216 memcpy(kmem_cache, temp_kmem_cache, kmem_size); 3217 3218 /* 3219 * Allocate kmem_cache_node properly from the kmem_cache slab. 3220 * kmem_cache_node is separately allocated so no need to 3221 * update any list pointers. 3222 */ 3223 temp_kmem_cache_node = kmem_cache_node; 3224 3225 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3226 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size); 3227 3228 kmem_cache_bootstrap_fixup(kmem_cache_node); 3229 3230 caches++; 3231 kmem_cache_bootstrap_fixup(kmem_cache); 3232 caches++; 3233 /* Free temporary boot structure */ 3234 free_pages((unsigned long)temp_kmem_cache, order); 3235 3236 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 3237 3238 /* 3239 * Patch up the size_index table if we have strange large alignment 3240 * requirements for the kmalloc array. This is only the case for 3241 * MIPS it seems. The standard arches will not generate any code here. 3242 * 3243 * Largest permitted alignment is 256 bytes due to the way we 3244 * handle the index determination for the smaller caches. 3245 * 3246 * Make sure that nothing crazy happens if someone starts tinkering 3247 * around with ARCH_KMALLOC_MINALIGN 3248 */ 3249 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3250 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3251 3252 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { 3253 int elem = size_index_elem(i); 3254 if (elem >= ARRAY_SIZE(size_index)) 3255 break; 3256 size_index[elem] = KMALLOC_SHIFT_LOW; 3257 } 3258 3259 if (KMALLOC_MIN_SIZE == 64) { 3260 /* 3261 * The 96 byte size cache is not used if the alignment 3262 * is 64 byte. 3263 */ 3264 for (i = 64 + 8; i <= 96; i += 8) 3265 size_index[size_index_elem(i)] = 7; 3266 } else if (KMALLOC_MIN_SIZE == 128) { 3267 /* 3268 * The 192 byte sized cache is not used if the alignment 3269 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3270 * instead. 3271 */ 3272 for (i = 128 + 8; i <= 192; i += 8) 3273 size_index[size_index_elem(i)] = 8; 3274 } 3275 3276 /* Caches that are not of the two-to-the-power-of size */ 3277 if (KMALLOC_MIN_SIZE <= 32) { 3278 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0); 3279 caches++; 3280 } 3281 3282 if (KMALLOC_MIN_SIZE <= 64) { 3283 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0); 3284 caches++; 3285 } 3286 3287 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3288 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); 3289 caches++; 3290 } 3291 3292 slab_state = UP; 3293 3294 /* Provide the correct kmalloc names now that the caches are up */ 3295 if (KMALLOC_MIN_SIZE <= 32) { 3296 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT); 3297 BUG_ON(!kmalloc_caches[1]->name); 3298 } 3299 3300 if (KMALLOC_MIN_SIZE <= 64) { 3301 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT); 3302 BUG_ON(!kmalloc_caches[2]->name); 3303 } 3304 3305 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3306 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); 3307 3308 BUG_ON(!s); 3309 kmalloc_caches[i]->name = s; 3310 } 3311 3312 #ifdef CONFIG_SMP 3313 register_cpu_notifier(&slab_notifier); 3314 #endif 3315 3316 #ifdef CONFIG_ZONE_DMA 3317 for (i = 0; i < SLUB_PAGE_SHIFT; i++) { 3318 struct kmem_cache *s = kmalloc_caches[i]; 3319 3320 if (s && s->size) { 3321 char *name = kasprintf(GFP_NOWAIT, 3322 "dma-kmalloc-%d", s->objsize); 3323 3324 BUG_ON(!name); 3325 kmalloc_dma_caches[i] = create_kmalloc_cache(name, 3326 s->objsize, SLAB_CACHE_DMA); 3327 } 3328 } 3329 #endif 3330 printk(KERN_INFO 3331 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3332 " CPUs=%d, Nodes=%d\n", 3333 caches, cache_line_size(), 3334 slub_min_order, slub_max_order, slub_min_objects, 3335 nr_cpu_ids, nr_node_ids); 3336 } 3337 3338 void __init kmem_cache_init_late(void) 3339 { 3340 } 3341 3342 /* 3343 * Find a mergeable slab cache 3344 */ 3345 static int slab_unmergeable(struct kmem_cache *s) 3346 { 3347 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3348 return 1; 3349 3350 if (s->ctor) 3351 return 1; 3352 3353 /* 3354 * We may have set a slab to be unmergeable during bootstrap. 3355 */ 3356 if (s->refcount < 0) 3357 return 1; 3358 3359 return 0; 3360 } 3361 3362 static struct kmem_cache *find_mergeable(size_t size, 3363 size_t align, unsigned long flags, const char *name, 3364 void (*ctor)(void *)) 3365 { 3366 struct kmem_cache *s; 3367 3368 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 3369 return NULL; 3370 3371 if (ctor) 3372 return NULL; 3373 3374 size = ALIGN(size, sizeof(void *)); 3375 align = calculate_alignment(flags, align, size); 3376 size = ALIGN(size, align); 3377 flags = kmem_cache_flags(size, flags, name, NULL); 3378 3379 list_for_each_entry(s, &slab_caches, list) { 3380 if (slab_unmergeable(s)) 3381 continue; 3382 3383 if (size > s->size) 3384 continue; 3385 3386 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) 3387 continue; 3388 /* 3389 * Check if alignment is compatible. 3390 * Courtesy of Adrian Drzewiecki 3391 */ 3392 if ((s->size & ~(align - 1)) != s->size) 3393 continue; 3394 3395 if (s->size - size >= sizeof(void *)) 3396 continue; 3397 3398 return s; 3399 } 3400 return NULL; 3401 } 3402 3403 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 3404 size_t align, unsigned long flags, void (*ctor)(void *)) 3405 { 3406 struct kmem_cache *s; 3407 char *n; 3408 3409 if (WARN_ON(!name)) 3410 return NULL; 3411 3412 down_write(&slub_lock); 3413 s = find_mergeable(size, align, flags, name, ctor); 3414 if (s) { 3415 s->refcount++; 3416 /* 3417 * Adjust the object sizes so that we clear 3418 * the complete object on kzalloc. 3419 */ 3420 s->objsize = max(s->objsize, (int)size); 3421 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3422 3423 if (sysfs_slab_alias(s, name)) { 3424 s->refcount--; 3425 goto err; 3426 } 3427 up_write(&slub_lock); 3428 return s; 3429 } 3430 3431 n = kstrdup(name, GFP_KERNEL); 3432 if (!n) 3433 goto err; 3434 3435 s = kmalloc(kmem_size, GFP_KERNEL); 3436 if (s) { 3437 if (kmem_cache_open(s, n, 3438 size, align, flags, ctor)) { 3439 list_add(&s->list, &slab_caches); 3440 if (sysfs_slab_add(s)) { 3441 list_del(&s->list); 3442 kfree(n); 3443 kfree(s); 3444 goto err; 3445 } 3446 up_write(&slub_lock); 3447 return s; 3448 } 3449 kfree(n); 3450 kfree(s); 3451 } 3452 err: 3453 up_write(&slub_lock); 3454 3455 if (flags & SLAB_PANIC) 3456 panic("Cannot create slabcache %s\n", name); 3457 else 3458 s = NULL; 3459 return s; 3460 } 3461 EXPORT_SYMBOL(kmem_cache_create); 3462 3463 #ifdef CONFIG_SMP 3464 /* 3465 * Use the cpu notifier to insure that the cpu slabs are flushed when 3466 * necessary. 3467 */ 3468 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3469 unsigned long action, void *hcpu) 3470 { 3471 long cpu = (long)hcpu; 3472 struct kmem_cache *s; 3473 unsigned long flags; 3474 3475 switch (action) { 3476 case CPU_UP_CANCELED: 3477 case CPU_UP_CANCELED_FROZEN: 3478 case CPU_DEAD: 3479 case CPU_DEAD_FROZEN: 3480 down_read(&slub_lock); 3481 list_for_each_entry(s, &slab_caches, list) { 3482 local_irq_save(flags); 3483 __flush_cpu_slab(s, cpu); 3484 local_irq_restore(flags); 3485 } 3486 up_read(&slub_lock); 3487 break; 3488 default: 3489 break; 3490 } 3491 return NOTIFY_OK; 3492 } 3493 3494 static struct notifier_block __cpuinitdata slab_notifier = { 3495 .notifier_call = slab_cpuup_callback 3496 }; 3497 3498 #endif 3499 3500 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3501 { 3502 struct kmem_cache *s; 3503 void *ret; 3504 3505 if (unlikely(size > SLUB_MAX_SIZE)) 3506 return kmalloc_large(size, gfpflags); 3507 3508 s = get_slab(size, gfpflags); 3509 3510 if (unlikely(ZERO_OR_NULL_PTR(s))) 3511 return s; 3512 3513 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); 3514 3515 /* Honor the call site pointer we received. */ 3516 trace_kmalloc(caller, ret, size, s->size, gfpflags); 3517 3518 return ret; 3519 } 3520 3521 #ifdef CONFIG_NUMA 3522 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3523 int node, unsigned long caller) 3524 { 3525 struct kmem_cache *s; 3526 void *ret; 3527 3528 if (unlikely(size > SLUB_MAX_SIZE)) { 3529 ret = kmalloc_large_node(size, gfpflags, node); 3530 3531 trace_kmalloc_node(caller, ret, 3532 size, PAGE_SIZE << get_order(size), 3533 gfpflags, node); 3534 3535 return ret; 3536 } 3537 3538 s = get_slab(size, gfpflags); 3539 3540 if (unlikely(ZERO_OR_NULL_PTR(s))) 3541 return s; 3542 3543 ret = slab_alloc(s, gfpflags, node, caller); 3544 3545 /* Honor the call site pointer we received. */ 3546 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 3547 3548 return ret; 3549 } 3550 #endif 3551 3552 #ifdef CONFIG_SYSFS 3553 static int count_inuse(struct page *page) 3554 { 3555 return page->inuse; 3556 } 3557 3558 static int count_total(struct page *page) 3559 { 3560 return page->objects; 3561 } 3562 #endif 3563 3564 #ifdef CONFIG_SLUB_DEBUG 3565 static int validate_slab(struct kmem_cache *s, struct page *page, 3566 unsigned long *map) 3567 { 3568 void *p; 3569 void *addr = page_address(page); 3570 3571 if (!check_slab(s, page) || 3572 !on_freelist(s, page, NULL)) 3573 return 0; 3574 3575 /* Now we know that a valid freelist exists */ 3576 bitmap_zero(map, page->objects); 3577 3578 get_map(s, page, map); 3579 for_each_object(p, s, addr, page->objects) { 3580 if (test_bit(slab_index(p, s, addr), map)) 3581 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) 3582 return 0; 3583 } 3584 3585 for_each_object(p, s, addr, page->objects) 3586 if (!test_bit(slab_index(p, s, addr), map)) 3587 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) 3588 return 0; 3589 return 1; 3590 } 3591 3592 static void validate_slab_slab(struct kmem_cache *s, struct page *page, 3593 unsigned long *map) 3594 { 3595 if (slab_trylock(page)) { 3596 validate_slab(s, page, map); 3597 slab_unlock(page); 3598 } else 3599 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3600 s->name, page); 3601 } 3602 3603 static int validate_slab_node(struct kmem_cache *s, 3604 struct kmem_cache_node *n, unsigned long *map) 3605 { 3606 unsigned long count = 0; 3607 struct page *page; 3608 unsigned long flags; 3609 3610 spin_lock_irqsave(&n->list_lock, flags); 3611 3612 list_for_each_entry(page, &n->partial, lru) { 3613 validate_slab_slab(s, page, map); 3614 count++; 3615 } 3616 if (count != n->nr_partial) 3617 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " 3618 "counter=%ld\n", s->name, count, n->nr_partial); 3619 3620 if (!(s->flags & SLAB_STORE_USER)) 3621 goto out; 3622 3623 list_for_each_entry(page, &n->full, lru) { 3624 validate_slab_slab(s, page, map); 3625 count++; 3626 } 3627 if (count != atomic_long_read(&n->nr_slabs)) 3628 printk(KERN_ERR "SLUB: %s %ld slabs counted but " 3629 "counter=%ld\n", s->name, count, 3630 atomic_long_read(&n->nr_slabs)); 3631 3632 out: 3633 spin_unlock_irqrestore(&n->list_lock, flags); 3634 return count; 3635 } 3636 3637 static long validate_slab_cache(struct kmem_cache *s) 3638 { 3639 int node; 3640 unsigned long count = 0; 3641 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3642 sizeof(unsigned long), GFP_KERNEL); 3643 3644 if (!map) 3645 return -ENOMEM; 3646 3647 flush_all(s); 3648 for_each_node_state(node, N_NORMAL_MEMORY) { 3649 struct kmem_cache_node *n = get_node(s, node); 3650 3651 count += validate_slab_node(s, n, map); 3652 } 3653 kfree(map); 3654 return count; 3655 } 3656 /* 3657 * Generate lists of code addresses where slabcache objects are allocated 3658 * and freed. 3659 */ 3660 3661 struct location { 3662 unsigned long count; 3663 unsigned long addr; 3664 long long sum_time; 3665 long min_time; 3666 long max_time; 3667 long min_pid; 3668 long max_pid; 3669 DECLARE_BITMAP(cpus, NR_CPUS); 3670 nodemask_t nodes; 3671 }; 3672 3673 struct loc_track { 3674 unsigned long max; 3675 unsigned long count; 3676 struct location *loc; 3677 }; 3678 3679 static void free_loc_track(struct loc_track *t) 3680 { 3681 if (t->max) 3682 free_pages((unsigned long)t->loc, 3683 get_order(sizeof(struct location) * t->max)); 3684 } 3685 3686 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 3687 { 3688 struct location *l; 3689 int order; 3690 3691 order = get_order(sizeof(struct location) * max); 3692 3693 l = (void *)__get_free_pages(flags, order); 3694 if (!l) 3695 return 0; 3696 3697 if (t->count) { 3698 memcpy(l, t->loc, sizeof(struct location) * t->count); 3699 free_loc_track(t); 3700 } 3701 t->max = max; 3702 t->loc = l; 3703 return 1; 3704 } 3705 3706 static int add_location(struct loc_track *t, struct kmem_cache *s, 3707 const struct track *track) 3708 { 3709 long start, end, pos; 3710 struct location *l; 3711 unsigned long caddr; 3712 unsigned long age = jiffies - track->when; 3713 3714 start = -1; 3715 end = t->count; 3716 3717 for ( ; ; ) { 3718 pos = start + (end - start + 1) / 2; 3719 3720 /* 3721 * There is nothing at "end". If we end up there 3722 * we need to add something to before end. 3723 */ 3724 if (pos == end) 3725 break; 3726 3727 caddr = t->loc[pos].addr; 3728 if (track->addr == caddr) { 3729 3730 l = &t->loc[pos]; 3731 l->count++; 3732 if (track->when) { 3733 l->sum_time += age; 3734 if (age < l->min_time) 3735 l->min_time = age; 3736 if (age > l->max_time) 3737 l->max_time = age; 3738 3739 if (track->pid < l->min_pid) 3740 l->min_pid = track->pid; 3741 if (track->pid > l->max_pid) 3742 l->max_pid = track->pid; 3743 3744 cpumask_set_cpu(track->cpu, 3745 to_cpumask(l->cpus)); 3746 } 3747 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3748 return 1; 3749 } 3750 3751 if (track->addr < caddr) 3752 end = pos; 3753 else 3754 start = pos; 3755 } 3756 3757 /* 3758 * Not found. Insert new tracking element. 3759 */ 3760 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 3761 return 0; 3762 3763 l = t->loc + pos; 3764 if (pos < t->count) 3765 memmove(l + 1, l, 3766 (t->count - pos) * sizeof(struct location)); 3767 t->count++; 3768 l->count = 1; 3769 l->addr = track->addr; 3770 l->sum_time = age; 3771 l->min_time = age; 3772 l->max_time = age; 3773 l->min_pid = track->pid; 3774 l->max_pid = track->pid; 3775 cpumask_clear(to_cpumask(l->cpus)); 3776 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 3777 nodes_clear(l->nodes); 3778 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3779 return 1; 3780 } 3781 3782 static void process_slab(struct loc_track *t, struct kmem_cache *s, 3783 struct page *page, enum track_item alloc, 3784 unsigned long *map) 3785 { 3786 void *addr = page_address(page); 3787 void *p; 3788 3789 bitmap_zero(map, page->objects); 3790 get_map(s, page, map); 3791 3792 for_each_object(p, s, addr, page->objects) 3793 if (!test_bit(slab_index(p, s, addr), map)) 3794 add_location(t, s, get_track(s, p, alloc)); 3795 } 3796 3797 static int list_locations(struct kmem_cache *s, char *buf, 3798 enum track_item alloc) 3799 { 3800 int len = 0; 3801 unsigned long i; 3802 struct loc_track t = { 0, 0, NULL }; 3803 int node; 3804 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3805 sizeof(unsigned long), GFP_KERNEL); 3806 3807 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 3808 GFP_TEMPORARY)) { 3809 kfree(map); 3810 return sprintf(buf, "Out of memory\n"); 3811 } 3812 /* Push back cpu slabs */ 3813 flush_all(s); 3814 3815 for_each_node_state(node, N_NORMAL_MEMORY) { 3816 struct kmem_cache_node *n = get_node(s, node); 3817 unsigned long flags; 3818 struct page *page; 3819 3820 if (!atomic_long_read(&n->nr_slabs)) 3821 continue; 3822 3823 spin_lock_irqsave(&n->list_lock, flags); 3824 list_for_each_entry(page, &n->partial, lru) 3825 process_slab(&t, s, page, alloc, map); 3826 list_for_each_entry(page, &n->full, lru) 3827 process_slab(&t, s, page, alloc, map); 3828 spin_unlock_irqrestore(&n->list_lock, flags); 3829 } 3830 3831 for (i = 0; i < t.count; i++) { 3832 struct location *l = &t.loc[i]; 3833 3834 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) 3835 break; 3836 len += sprintf(buf + len, "%7ld ", l->count); 3837 3838 if (l->addr) 3839 len += sprintf(buf + len, "%pS", (void *)l->addr); 3840 else 3841 len += sprintf(buf + len, "<not-available>"); 3842 3843 if (l->sum_time != l->min_time) { 3844 len += sprintf(buf + len, " age=%ld/%ld/%ld", 3845 l->min_time, 3846 (long)div_u64(l->sum_time, l->count), 3847 l->max_time); 3848 } else 3849 len += sprintf(buf + len, " age=%ld", 3850 l->min_time); 3851 3852 if (l->min_pid != l->max_pid) 3853 len += sprintf(buf + len, " pid=%ld-%ld", 3854 l->min_pid, l->max_pid); 3855 else 3856 len += sprintf(buf + len, " pid=%ld", 3857 l->min_pid); 3858 3859 if (num_online_cpus() > 1 && 3860 !cpumask_empty(to_cpumask(l->cpus)) && 3861 len < PAGE_SIZE - 60) { 3862 len += sprintf(buf + len, " cpus="); 3863 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3864 to_cpumask(l->cpus)); 3865 } 3866 3867 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && 3868 len < PAGE_SIZE - 60) { 3869 len += sprintf(buf + len, " nodes="); 3870 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3871 l->nodes); 3872 } 3873 3874 len += sprintf(buf + len, "\n"); 3875 } 3876 3877 free_loc_track(&t); 3878 kfree(map); 3879 if (!t.count) 3880 len += sprintf(buf, "No data\n"); 3881 return len; 3882 } 3883 #endif 3884 3885 #ifdef SLUB_RESILIENCY_TEST 3886 static void resiliency_test(void) 3887 { 3888 u8 *p; 3889 3890 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); 3891 3892 printk(KERN_ERR "SLUB resiliency testing\n"); 3893 printk(KERN_ERR "-----------------------\n"); 3894 printk(KERN_ERR "A. Corruption after allocation\n"); 3895 3896 p = kzalloc(16, GFP_KERNEL); 3897 p[16] = 0x12; 3898 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" 3899 " 0x12->0x%p\n\n", p + 16); 3900 3901 validate_slab_cache(kmalloc_caches[4]); 3902 3903 /* Hmmm... The next two are dangerous */ 3904 p = kzalloc(32, GFP_KERNEL); 3905 p[32 + sizeof(void *)] = 0x34; 3906 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" 3907 " 0x34 -> -0x%p\n", p); 3908 printk(KERN_ERR 3909 "If allocated object is overwritten then not detectable\n\n"); 3910 3911 validate_slab_cache(kmalloc_caches[5]); 3912 p = kzalloc(64, GFP_KERNEL); 3913 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 3914 *p = 0x56; 3915 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 3916 p); 3917 printk(KERN_ERR 3918 "If allocated object is overwritten then not detectable\n\n"); 3919 validate_slab_cache(kmalloc_caches[6]); 3920 3921 printk(KERN_ERR "\nB. Corruption after free\n"); 3922 p = kzalloc(128, GFP_KERNEL); 3923 kfree(p); 3924 *p = 0x78; 3925 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 3926 validate_slab_cache(kmalloc_caches[7]); 3927 3928 p = kzalloc(256, GFP_KERNEL); 3929 kfree(p); 3930 p[50] = 0x9a; 3931 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", 3932 p); 3933 validate_slab_cache(kmalloc_caches[8]); 3934 3935 p = kzalloc(512, GFP_KERNEL); 3936 kfree(p); 3937 p[512] = 0xab; 3938 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 3939 validate_slab_cache(kmalloc_caches[9]); 3940 } 3941 #else 3942 #ifdef CONFIG_SYSFS 3943 static void resiliency_test(void) {}; 3944 #endif 3945 #endif 3946 3947 #ifdef CONFIG_SYSFS 3948 enum slab_stat_type { 3949 SL_ALL, /* All slabs */ 3950 SL_PARTIAL, /* Only partially allocated slabs */ 3951 SL_CPU, /* Only slabs used for cpu caches */ 3952 SL_OBJECTS, /* Determine allocated objects not slabs */ 3953 SL_TOTAL /* Determine object capacity not slabs */ 3954 }; 3955 3956 #define SO_ALL (1 << SL_ALL) 3957 #define SO_PARTIAL (1 << SL_PARTIAL) 3958 #define SO_CPU (1 << SL_CPU) 3959 #define SO_OBJECTS (1 << SL_OBJECTS) 3960 #define SO_TOTAL (1 << SL_TOTAL) 3961 3962 static ssize_t show_slab_objects(struct kmem_cache *s, 3963 char *buf, unsigned long flags) 3964 { 3965 unsigned long total = 0; 3966 int node; 3967 int x; 3968 unsigned long *nodes; 3969 unsigned long *per_cpu; 3970 3971 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 3972 if (!nodes) 3973 return -ENOMEM; 3974 per_cpu = nodes + nr_node_ids; 3975 3976 if (flags & SO_CPU) { 3977 int cpu; 3978 3979 for_each_possible_cpu(cpu) { 3980 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3981 3982 if (!c || c->node < 0) 3983 continue; 3984 3985 if (c->page) { 3986 if (flags & SO_TOTAL) 3987 x = c->page->objects; 3988 else if (flags & SO_OBJECTS) 3989 x = c->page->inuse; 3990 else 3991 x = 1; 3992 3993 total += x; 3994 nodes[c->node] += x; 3995 } 3996 per_cpu[c->node]++; 3997 } 3998 } 3999 4000 lock_memory_hotplug(); 4001 #ifdef CONFIG_SLUB_DEBUG 4002 if (flags & SO_ALL) { 4003 for_each_node_state(node, N_NORMAL_MEMORY) { 4004 struct kmem_cache_node *n = get_node(s, node); 4005 4006 if (flags & SO_TOTAL) 4007 x = atomic_long_read(&n->total_objects); 4008 else if (flags & SO_OBJECTS) 4009 x = atomic_long_read(&n->total_objects) - 4010 count_partial(n, count_free); 4011 4012 else 4013 x = atomic_long_read(&n->nr_slabs); 4014 total += x; 4015 nodes[node] += x; 4016 } 4017 4018 } else 4019 #endif 4020 if (flags & SO_PARTIAL) { 4021 for_each_node_state(node, N_NORMAL_MEMORY) { 4022 struct kmem_cache_node *n = get_node(s, node); 4023 4024 if (flags & SO_TOTAL) 4025 x = count_partial(n, count_total); 4026 else if (flags & SO_OBJECTS) 4027 x = count_partial(n, count_inuse); 4028 else 4029 x = n->nr_partial; 4030 total += x; 4031 nodes[node] += x; 4032 } 4033 } 4034 x = sprintf(buf, "%lu", total); 4035 #ifdef CONFIG_NUMA 4036 for_each_node_state(node, N_NORMAL_MEMORY) 4037 if (nodes[node]) 4038 x += sprintf(buf + x, " N%d=%lu", 4039 node, nodes[node]); 4040 #endif 4041 unlock_memory_hotplug(); 4042 kfree(nodes); 4043 return x + sprintf(buf + x, "\n"); 4044 } 4045 4046 #ifdef CONFIG_SLUB_DEBUG 4047 static int any_slab_objects(struct kmem_cache *s) 4048 { 4049 int node; 4050 4051 for_each_online_node(node) { 4052 struct kmem_cache_node *n = get_node(s, node); 4053 4054 if (!n) 4055 continue; 4056 4057 if (atomic_long_read(&n->total_objects)) 4058 return 1; 4059 } 4060 return 0; 4061 } 4062 #endif 4063 4064 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 4065 #define to_slab(n) container_of(n, struct kmem_cache, kobj); 4066 4067 struct slab_attribute { 4068 struct attribute attr; 4069 ssize_t (*show)(struct kmem_cache *s, char *buf); 4070 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 4071 }; 4072 4073 #define SLAB_ATTR_RO(_name) \ 4074 static struct slab_attribute _name##_attr = __ATTR_RO(_name) 4075 4076 #define SLAB_ATTR(_name) \ 4077 static struct slab_attribute _name##_attr = \ 4078 __ATTR(_name, 0644, _name##_show, _name##_store) 4079 4080 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 4081 { 4082 return sprintf(buf, "%d\n", s->size); 4083 } 4084 SLAB_ATTR_RO(slab_size); 4085 4086 static ssize_t align_show(struct kmem_cache *s, char *buf) 4087 { 4088 return sprintf(buf, "%d\n", s->align); 4089 } 4090 SLAB_ATTR_RO(align); 4091 4092 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 4093 { 4094 return sprintf(buf, "%d\n", s->objsize); 4095 } 4096 SLAB_ATTR_RO(object_size); 4097 4098 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 4099 { 4100 return sprintf(buf, "%d\n", oo_objects(s->oo)); 4101 } 4102 SLAB_ATTR_RO(objs_per_slab); 4103 4104 static ssize_t order_store(struct kmem_cache *s, 4105 const char *buf, size_t length) 4106 { 4107 unsigned long order; 4108 int err; 4109 4110 err = strict_strtoul(buf, 10, &order); 4111 if (err) 4112 return err; 4113 4114 if (order > slub_max_order || order < slub_min_order) 4115 return -EINVAL; 4116 4117 calculate_sizes(s, order); 4118 return length; 4119 } 4120 4121 static ssize_t order_show(struct kmem_cache *s, char *buf) 4122 { 4123 return sprintf(buf, "%d\n", oo_order(s->oo)); 4124 } 4125 SLAB_ATTR(order); 4126 4127 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 4128 { 4129 return sprintf(buf, "%lu\n", s->min_partial); 4130 } 4131 4132 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 4133 size_t length) 4134 { 4135 unsigned long min; 4136 int err; 4137 4138 err = strict_strtoul(buf, 10, &min); 4139 if (err) 4140 return err; 4141 4142 set_min_partial(s, min); 4143 return length; 4144 } 4145 SLAB_ATTR(min_partial); 4146 4147 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 4148 { 4149 if (!s->ctor) 4150 return 0; 4151 return sprintf(buf, "%pS\n", s->ctor); 4152 } 4153 SLAB_ATTR_RO(ctor); 4154 4155 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 4156 { 4157 return sprintf(buf, "%d\n", s->refcount - 1); 4158 } 4159 SLAB_ATTR_RO(aliases); 4160 4161 static ssize_t partial_show(struct kmem_cache *s, char *buf) 4162 { 4163 return show_slab_objects(s, buf, SO_PARTIAL); 4164 } 4165 SLAB_ATTR_RO(partial); 4166 4167 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 4168 { 4169 return show_slab_objects(s, buf, SO_CPU); 4170 } 4171 SLAB_ATTR_RO(cpu_slabs); 4172 4173 static ssize_t objects_show(struct kmem_cache *s, char *buf) 4174 { 4175 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 4176 } 4177 SLAB_ATTR_RO(objects); 4178 4179 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 4180 { 4181 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 4182 } 4183 SLAB_ATTR_RO(objects_partial); 4184 4185 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 4186 { 4187 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 4188 } 4189 4190 static ssize_t reclaim_account_store(struct kmem_cache *s, 4191 const char *buf, size_t length) 4192 { 4193 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 4194 if (buf[0] == '1') 4195 s->flags |= SLAB_RECLAIM_ACCOUNT; 4196 return length; 4197 } 4198 SLAB_ATTR(reclaim_account); 4199 4200 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 4201 { 4202 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 4203 } 4204 SLAB_ATTR_RO(hwcache_align); 4205 4206 #ifdef CONFIG_ZONE_DMA 4207 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 4208 { 4209 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 4210 } 4211 SLAB_ATTR_RO(cache_dma); 4212 #endif 4213 4214 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 4215 { 4216 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 4217 } 4218 SLAB_ATTR_RO(destroy_by_rcu); 4219 4220 static ssize_t reserved_show(struct kmem_cache *s, char *buf) 4221 { 4222 return sprintf(buf, "%d\n", s->reserved); 4223 } 4224 SLAB_ATTR_RO(reserved); 4225 4226 #ifdef CONFIG_SLUB_DEBUG 4227 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 4228 { 4229 return show_slab_objects(s, buf, SO_ALL); 4230 } 4231 SLAB_ATTR_RO(slabs); 4232 4233 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 4234 { 4235 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 4236 } 4237 SLAB_ATTR_RO(total_objects); 4238 4239 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 4240 { 4241 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 4242 } 4243 4244 static ssize_t sanity_checks_store(struct kmem_cache *s, 4245 const char *buf, size_t length) 4246 { 4247 s->flags &= ~SLAB_DEBUG_FREE; 4248 if (buf[0] == '1') 4249 s->flags |= SLAB_DEBUG_FREE; 4250 return length; 4251 } 4252 SLAB_ATTR(sanity_checks); 4253 4254 static ssize_t trace_show(struct kmem_cache *s, char *buf) 4255 { 4256 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 4257 } 4258 4259 static ssize_t trace_store(struct kmem_cache *s, const char *buf, 4260 size_t length) 4261 { 4262 s->flags &= ~SLAB_TRACE; 4263 if (buf[0] == '1') 4264 s->flags |= SLAB_TRACE; 4265 return length; 4266 } 4267 SLAB_ATTR(trace); 4268 4269 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 4270 { 4271 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 4272 } 4273 4274 static ssize_t red_zone_store(struct kmem_cache *s, 4275 const char *buf, size_t length) 4276 { 4277 if (any_slab_objects(s)) 4278 return -EBUSY; 4279 4280 s->flags &= ~SLAB_RED_ZONE; 4281 if (buf[0] == '1') 4282 s->flags |= SLAB_RED_ZONE; 4283 calculate_sizes(s, -1); 4284 return length; 4285 } 4286 SLAB_ATTR(red_zone); 4287 4288 static ssize_t poison_show(struct kmem_cache *s, char *buf) 4289 { 4290 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 4291 } 4292 4293 static ssize_t poison_store(struct kmem_cache *s, 4294 const char *buf, size_t length) 4295 { 4296 if (any_slab_objects(s)) 4297 return -EBUSY; 4298 4299 s->flags &= ~SLAB_POISON; 4300 if (buf[0] == '1') 4301 s->flags |= SLAB_POISON; 4302 calculate_sizes(s, -1); 4303 return length; 4304 } 4305 SLAB_ATTR(poison); 4306 4307 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 4308 { 4309 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 4310 } 4311 4312 static ssize_t store_user_store(struct kmem_cache *s, 4313 const char *buf, size_t length) 4314 { 4315 if (any_slab_objects(s)) 4316 return -EBUSY; 4317 4318 s->flags &= ~SLAB_STORE_USER; 4319 if (buf[0] == '1') 4320 s->flags |= SLAB_STORE_USER; 4321 calculate_sizes(s, -1); 4322 return length; 4323 } 4324 SLAB_ATTR(store_user); 4325 4326 static ssize_t validate_show(struct kmem_cache *s, char *buf) 4327 { 4328 return 0; 4329 } 4330 4331 static ssize_t validate_store(struct kmem_cache *s, 4332 const char *buf, size_t length) 4333 { 4334 int ret = -EINVAL; 4335 4336 if (buf[0] == '1') { 4337 ret = validate_slab_cache(s); 4338 if (ret >= 0) 4339 ret = length; 4340 } 4341 return ret; 4342 } 4343 SLAB_ATTR(validate); 4344 4345 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 4346 { 4347 if (!(s->flags & SLAB_STORE_USER)) 4348 return -ENOSYS; 4349 return list_locations(s, buf, TRACK_ALLOC); 4350 } 4351 SLAB_ATTR_RO(alloc_calls); 4352 4353 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 4354 { 4355 if (!(s->flags & SLAB_STORE_USER)) 4356 return -ENOSYS; 4357 return list_locations(s, buf, TRACK_FREE); 4358 } 4359 SLAB_ATTR_RO(free_calls); 4360 #endif /* CONFIG_SLUB_DEBUG */ 4361 4362 #ifdef CONFIG_FAILSLAB 4363 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 4364 { 4365 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 4366 } 4367 4368 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 4369 size_t length) 4370 { 4371 s->flags &= ~SLAB_FAILSLAB; 4372 if (buf[0] == '1') 4373 s->flags |= SLAB_FAILSLAB; 4374 return length; 4375 } 4376 SLAB_ATTR(failslab); 4377 #endif 4378 4379 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4380 { 4381 return 0; 4382 } 4383 4384 static ssize_t shrink_store(struct kmem_cache *s, 4385 const char *buf, size_t length) 4386 { 4387 if (buf[0] == '1') { 4388 int rc = kmem_cache_shrink(s); 4389 4390 if (rc) 4391 return rc; 4392 } else 4393 return -EINVAL; 4394 return length; 4395 } 4396 SLAB_ATTR(shrink); 4397 4398 #ifdef CONFIG_NUMA 4399 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4400 { 4401 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 4402 } 4403 4404 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 4405 const char *buf, size_t length) 4406 { 4407 unsigned long ratio; 4408 int err; 4409 4410 err = strict_strtoul(buf, 10, &ratio); 4411 if (err) 4412 return err; 4413 4414 if (ratio <= 100) 4415 s->remote_node_defrag_ratio = ratio * 10; 4416 4417 return length; 4418 } 4419 SLAB_ATTR(remote_node_defrag_ratio); 4420 #endif 4421 4422 #ifdef CONFIG_SLUB_STATS 4423 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 4424 { 4425 unsigned long sum = 0; 4426 int cpu; 4427 int len; 4428 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); 4429 4430 if (!data) 4431 return -ENOMEM; 4432 4433 for_each_online_cpu(cpu) { 4434 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 4435 4436 data[cpu] = x; 4437 sum += x; 4438 } 4439 4440 len = sprintf(buf, "%lu", sum); 4441 4442 #ifdef CONFIG_SMP 4443 for_each_online_cpu(cpu) { 4444 if (data[cpu] && len < PAGE_SIZE - 20) 4445 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); 4446 } 4447 #endif 4448 kfree(data); 4449 return len + sprintf(buf + len, "\n"); 4450 } 4451 4452 static void clear_stat(struct kmem_cache *s, enum stat_item si) 4453 { 4454 int cpu; 4455 4456 for_each_online_cpu(cpu) 4457 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 4458 } 4459 4460 #define STAT_ATTR(si, text) \ 4461 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 4462 { \ 4463 return show_stat(s, buf, si); \ 4464 } \ 4465 static ssize_t text##_store(struct kmem_cache *s, \ 4466 const char *buf, size_t length) \ 4467 { \ 4468 if (buf[0] != '0') \ 4469 return -EINVAL; \ 4470 clear_stat(s, si); \ 4471 return length; \ 4472 } \ 4473 SLAB_ATTR(text); \ 4474 4475 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 4476 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 4477 STAT_ATTR(FREE_FASTPATH, free_fastpath); 4478 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 4479 STAT_ATTR(FREE_FROZEN, free_frozen); 4480 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 4481 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 4482 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 4483 STAT_ATTR(ALLOC_SLAB, alloc_slab); 4484 STAT_ATTR(ALLOC_REFILL, alloc_refill); 4485 STAT_ATTR(FREE_SLAB, free_slab); 4486 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 4487 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 4488 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 4489 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 4490 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4491 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4492 STAT_ATTR(ORDER_FALLBACK, order_fallback); 4493 #endif 4494 4495 static struct attribute *slab_attrs[] = { 4496 &slab_size_attr.attr, 4497 &object_size_attr.attr, 4498 &objs_per_slab_attr.attr, 4499 &order_attr.attr, 4500 &min_partial_attr.attr, 4501 &objects_attr.attr, 4502 &objects_partial_attr.attr, 4503 &partial_attr.attr, 4504 &cpu_slabs_attr.attr, 4505 &ctor_attr.attr, 4506 &aliases_attr.attr, 4507 &align_attr.attr, 4508 &hwcache_align_attr.attr, 4509 &reclaim_account_attr.attr, 4510 &destroy_by_rcu_attr.attr, 4511 &shrink_attr.attr, 4512 &reserved_attr.attr, 4513 #ifdef CONFIG_SLUB_DEBUG 4514 &total_objects_attr.attr, 4515 &slabs_attr.attr, 4516 &sanity_checks_attr.attr, 4517 &trace_attr.attr, 4518 &red_zone_attr.attr, 4519 &poison_attr.attr, 4520 &store_user_attr.attr, 4521 &validate_attr.attr, 4522 &alloc_calls_attr.attr, 4523 &free_calls_attr.attr, 4524 #endif 4525 #ifdef CONFIG_ZONE_DMA 4526 &cache_dma_attr.attr, 4527 #endif 4528 #ifdef CONFIG_NUMA 4529 &remote_node_defrag_ratio_attr.attr, 4530 #endif 4531 #ifdef CONFIG_SLUB_STATS 4532 &alloc_fastpath_attr.attr, 4533 &alloc_slowpath_attr.attr, 4534 &free_fastpath_attr.attr, 4535 &free_slowpath_attr.attr, 4536 &free_frozen_attr.attr, 4537 &free_add_partial_attr.attr, 4538 &free_remove_partial_attr.attr, 4539 &alloc_from_partial_attr.attr, 4540 &alloc_slab_attr.attr, 4541 &alloc_refill_attr.attr, 4542 &free_slab_attr.attr, 4543 &cpuslab_flush_attr.attr, 4544 &deactivate_full_attr.attr, 4545 &deactivate_empty_attr.attr, 4546 &deactivate_to_head_attr.attr, 4547 &deactivate_to_tail_attr.attr, 4548 &deactivate_remote_frees_attr.attr, 4549 &order_fallback_attr.attr, 4550 #endif 4551 #ifdef CONFIG_FAILSLAB 4552 &failslab_attr.attr, 4553 #endif 4554 4555 NULL 4556 }; 4557 4558 static struct attribute_group slab_attr_group = { 4559 .attrs = slab_attrs, 4560 }; 4561 4562 static ssize_t slab_attr_show(struct kobject *kobj, 4563 struct attribute *attr, 4564 char *buf) 4565 { 4566 struct slab_attribute *attribute; 4567 struct kmem_cache *s; 4568 int err; 4569 4570 attribute = to_slab_attr(attr); 4571 s = to_slab(kobj); 4572 4573 if (!attribute->show) 4574 return -EIO; 4575 4576 err = attribute->show(s, buf); 4577 4578 return err; 4579 } 4580 4581 static ssize_t slab_attr_store(struct kobject *kobj, 4582 struct attribute *attr, 4583 const char *buf, size_t len) 4584 { 4585 struct slab_attribute *attribute; 4586 struct kmem_cache *s; 4587 int err; 4588 4589 attribute = to_slab_attr(attr); 4590 s = to_slab(kobj); 4591 4592 if (!attribute->store) 4593 return -EIO; 4594 4595 err = attribute->store(s, buf, len); 4596 4597 return err; 4598 } 4599 4600 static void kmem_cache_release(struct kobject *kobj) 4601 { 4602 struct kmem_cache *s = to_slab(kobj); 4603 4604 kfree(s->name); 4605 kfree(s); 4606 } 4607 4608 static const struct sysfs_ops slab_sysfs_ops = { 4609 .show = slab_attr_show, 4610 .store = slab_attr_store, 4611 }; 4612 4613 static struct kobj_type slab_ktype = { 4614 .sysfs_ops = &slab_sysfs_ops, 4615 .release = kmem_cache_release 4616 }; 4617 4618 static int uevent_filter(struct kset *kset, struct kobject *kobj) 4619 { 4620 struct kobj_type *ktype = get_ktype(kobj); 4621 4622 if (ktype == &slab_ktype) 4623 return 1; 4624 return 0; 4625 } 4626 4627 static const struct kset_uevent_ops slab_uevent_ops = { 4628 .filter = uevent_filter, 4629 }; 4630 4631 static struct kset *slab_kset; 4632 4633 #define ID_STR_LENGTH 64 4634 4635 /* Create a unique string id for a slab cache: 4636 * 4637 * Format :[flags-]size 4638 */ 4639 static char *create_unique_id(struct kmem_cache *s) 4640 { 4641 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 4642 char *p = name; 4643 4644 BUG_ON(!name); 4645 4646 *p++ = ':'; 4647 /* 4648 * First flags affecting slabcache operations. We will only 4649 * get here for aliasable slabs so we do not need to support 4650 * too many flags. The flags here must cover all flags that 4651 * are matched during merging to guarantee that the id is 4652 * unique. 4653 */ 4654 if (s->flags & SLAB_CACHE_DMA) 4655 *p++ = 'd'; 4656 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4657 *p++ = 'a'; 4658 if (s->flags & SLAB_DEBUG_FREE) 4659 *p++ = 'F'; 4660 if (!(s->flags & SLAB_NOTRACK)) 4661 *p++ = 't'; 4662 if (p != name + 1) 4663 *p++ = '-'; 4664 p += sprintf(p, "%07d", s->size); 4665 BUG_ON(p > name + ID_STR_LENGTH - 1); 4666 return name; 4667 } 4668 4669 static int sysfs_slab_add(struct kmem_cache *s) 4670 { 4671 int err; 4672 const char *name; 4673 int unmergeable; 4674 4675 if (slab_state < SYSFS) 4676 /* Defer until later */ 4677 return 0; 4678 4679 unmergeable = slab_unmergeable(s); 4680 if (unmergeable) { 4681 /* 4682 * Slabcache can never be merged so we can use the name proper. 4683 * This is typically the case for debug situations. In that 4684 * case we can catch duplicate names easily. 4685 */ 4686 sysfs_remove_link(&slab_kset->kobj, s->name); 4687 name = s->name; 4688 } else { 4689 /* 4690 * Create a unique name for the slab as a target 4691 * for the symlinks. 4692 */ 4693 name = create_unique_id(s); 4694 } 4695 4696 s->kobj.kset = slab_kset; 4697 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 4698 if (err) { 4699 kobject_put(&s->kobj); 4700 return err; 4701 } 4702 4703 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4704 if (err) { 4705 kobject_del(&s->kobj); 4706 kobject_put(&s->kobj); 4707 return err; 4708 } 4709 kobject_uevent(&s->kobj, KOBJ_ADD); 4710 if (!unmergeable) { 4711 /* Setup first alias */ 4712 sysfs_slab_alias(s, s->name); 4713 kfree(name); 4714 } 4715 return 0; 4716 } 4717 4718 static void sysfs_slab_remove(struct kmem_cache *s) 4719 { 4720 if (slab_state < SYSFS) 4721 /* 4722 * Sysfs has not been setup yet so no need to remove the 4723 * cache from sysfs. 4724 */ 4725 return; 4726 4727 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4728 kobject_del(&s->kobj); 4729 kobject_put(&s->kobj); 4730 } 4731 4732 /* 4733 * Need to buffer aliases during bootup until sysfs becomes 4734 * available lest we lose that information. 4735 */ 4736 struct saved_alias { 4737 struct kmem_cache *s; 4738 const char *name; 4739 struct saved_alias *next; 4740 }; 4741 4742 static struct saved_alias *alias_list; 4743 4744 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 4745 { 4746 struct saved_alias *al; 4747 4748 if (slab_state == SYSFS) { 4749 /* 4750 * If we have a leftover link then remove it. 4751 */ 4752 sysfs_remove_link(&slab_kset->kobj, name); 4753 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 4754 } 4755 4756 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 4757 if (!al) 4758 return -ENOMEM; 4759 4760 al->s = s; 4761 al->name = name; 4762 al->next = alias_list; 4763 alias_list = al; 4764 return 0; 4765 } 4766 4767 static int __init slab_sysfs_init(void) 4768 { 4769 struct kmem_cache *s; 4770 int err; 4771 4772 down_write(&slub_lock); 4773 4774 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 4775 if (!slab_kset) { 4776 up_write(&slub_lock); 4777 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4778 return -ENOSYS; 4779 } 4780 4781 slab_state = SYSFS; 4782 4783 list_for_each_entry(s, &slab_caches, list) { 4784 err = sysfs_slab_add(s); 4785 if (err) 4786 printk(KERN_ERR "SLUB: Unable to add boot slab %s" 4787 " to sysfs\n", s->name); 4788 } 4789 4790 while (alias_list) { 4791 struct saved_alias *al = alias_list; 4792 4793 alias_list = alias_list->next; 4794 err = sysfs_slab_alias(al->s, al->name); 4795 if (err) 4796 printk(KERN_ERR "SLUB: Unable to add boot slab alias" 4797 " %s to sysfs\n", s->name); 4798 kfree(al); 4799 } 4800 4801 up_write(&slub_lock); 4802 resiliency_test(); 4803 return 0; 4804 } 4805 4806 __initcall(slab_sysfs_init); 4807 #endif /* CONFIG_SYSFS */ 4808 4809 /* 4810 * The /proc/slabinfo ABI 4811 */ 4812 #ifdef CONFIG_SLABINFO 4813 static void print_slabinfo_header(struct seq_file *m) 4814 { 4815 seq_puts(m, "slabinfo - version: 2.1\n"); 4816 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4817 "<objperslab> <pagesperslab>"); 4818 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4819 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4820 seq_putc(m, '\n'); 4821 } 4822 4823 static void *s_start(struct seq_file *m, loff_t *pos) 4824 { 4825 loff_t n = *pos; 4826 4827 down_read(&slub_lock); 4828 if (!n) 4829 print_slabinfo_header(m); 4830 4831 return seq_list_start(&slab_caches, *pos); 4832 } 4833 4834 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4835 { 4836 return seq_list_next(p, &slab_caches, pos); 4837 } 4838 4839 static void s_stop(struct seq_file *m, void *p) 4840 { 4841 up_read(&slub_lock); 4842 } 4843 4844 static int s_show(struct seq_file *m, void *p) 4845 { 4846 unsigned long nr_partials = 0; 4847 unsigned long nr_slabs = 0; 4848 unsigned long nr_inuse = 0; 4849 unsigned long nr_objs = 0; 4850 unsigned long nr_free = 0; 4851 struct kmem_cache *s; 4852 int node; 4853 4854 s = list_entry(p, struct kmem_cache, list); 4855 4856 for_each_online_node(node) { 4857 struct kmem_cache_node *n = get_node(s, node); 4858 4859 if (!n) 4860 continue; 4861 4862 nr_partials += n->nr_partial; 4863 nr_slabs += atomic_long_read(&n->nr_slabs); 4864 nr_objs += atomic_long_read(&n->total_objects); 4865 nr_free += count_partial(n, count_free); 4866 } 4867 4868 nr_inuse = nr_objs - nr_free; 4869 4870 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 4871 nr_objs, s->size, oo_objects(s->oo), 4872 (1 << oo_order(s->oo))); 4873 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); 4874 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 4875 0UL); 4876 seq_putc(m, '\n'); 4877 return 0; 4878 } 4879 4880 static const struct seq_operations slabinfo_op = { 4881 .start = s_start, 4882 .next = s_next, 4883 .stop = s_stop, 4884 .show = s_show, 4885 }; 4886 4887 static int slabinfo_open(struct inode *inode, struct file *file) 4888 { 4889 return seq_open(file, &slabinfo_op); 4890 } 4891 4892 static const struct file_operations proc_slabinfo_operations = { 4893 .open = slabinfo_open, 4894 .read = seq_read, 4895 .llseek = seq_lseek, 4896 .release = seq_release, 4897 }; 4898 4899 static int __init slab_proc_init(void) 4900 { 4901 proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations); 4902 return 0; 4903 } 4904 module_init(slab_proc_init); 4905 #endif /* CONFIG_SLABINFO */ 4906