1 /* 2 * SLUB: A slab allocator that limits cache line use instead of queuing 3 * objects in per cpu and per node lists. 4 * 5 * The allocator synchronizes using per slab locks and only 6 * uses a centralized lock to manage a pool of partial slabs. 7 * 8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/module.h> 13 #include <linux/bit_spinlock.h> 14 #include <linux/interrupt.h> 15 #include <linux/bitops.h> 16 #include <linux/slab.h> 17 #include <linux/seq_file.h> 18 #include <linux/cpu.h> 19 #include <linux/cpuset.h> 20 #include <linux/mempolicy.h> 21 #include <linux/ctype.h> 22 #include <linux/kallsyms.h> 23 24 /* 25 * Lock order: 26 * 1. slab_lock(page) 27 * 2. slab->list_lock 28 * 29 * The slab_lock protects operations on the object of a particular 30 * slab and its metadata in the page struct. If the slab lock 31 * has been taken then no allocations nor frees can be performed 32 * on the objects in the slab nor can the slab be added or removed 33 * from the partial or full lists since this would mean modifying 34 * the page_struct of the slab. 35 * 36 * The list_lock protects the partial and full list on each node and 37 * the partial slab counter. If taken then no new slabs may be added or 38 * removed from the lists nor make the number of partial slabs be modified. 39 * (Note that the total number of slabs is an atomic value that may be 40 * modified without taking the list lock). 41 * 42 * The list_lock is a centralized lock and thus we avoid taking it as 43 * much as possible. As long as SLUB does not have to handle partial 44 * slabs, operations can continue without any centralized lock. F.e. 45 * allocating a long series of objects that fill up slabs does not require 46 * the list lock. 47 * 48 * The lock order is sometimes inverted when we are trying to get a slab 49 * off a list. We take the list_lock and then look for a page on the list 50 * to use. While we do that objects in the slabs may be freed. We can 51 * only operate on the slab if we have also taken the slab_lock. So we use 52 * a slab_trylock() on the slab. If trylock was successful then no frees 53 * can occur anymore and we can use the slab for allocations etc. If the 54 * slab_trylock() does not succeed then frees are in progress in the slab and 55 * we must stay away from it for a while since we may cause a bouncing 56 * cacheline if we try to acquire the lock. So go onto the next slab. 57 * If all pages are busy then we may allocate a new slab instead of reusing 58 * a partial slab. A new slab has noone operating on it and thus there is 59 * no danger of cacheline contention. 60 * 61 * Interrupts are disabled during allocation and deallocation in order to 62 * make the slab allocator safe to use in the context of an irq. In addition 63 * interrupts are disabled to ensure that the processor does not change 64 * while handling per_cpu slabs, due to kernel preemption. 65 * 66 * SLUB assigns one slab for allocation to each processor. 67 * Allocations only occur from these slabs called cpu slabs. 68 * 69 * Slabs with free elements are kept on a partial list and during regular 70 * operations no list for full slabs is used. If an object in a full slab is 71 * freed then the slab will show up again on the partial lists. 72 * We track full slabs for debugging purposes though because otherwise we 73 * cannot scan all objects. 74 * 75 * Slabs are freed when they become empty. Teardown and setup is 76 * minimal so we rely on the page allocators per cpu caches for 77 * fast frees and allocs. 78 * 79 * Overloading of page flags that are otherwise used for LRU management. 80 * 81 * PageActive The slab is frozen and exempt from list processing. 82 * This means that the slab is dedicated to a purpose 83 * such as satisfying allocations for a specific 84 * processor. Objects may be freed in the slab while 85 * it is frozen but slab_free will then skip the usual 86 * list operations. It is up to the processor holding 87 * the slab to integrate the slab into the slab lists 88 * when the slab is no longer needed. 89 * 90 * One use of this flag is to mark slabs that are 91 * used for allocations. Then such a slab becomes a cpu 92 * slab. The cpu slab may be equipped with an additional 93 * lockless_freelist that allows lockless access to 94 * free objects in addition to the regular freelist 95 * that requires the slab lock. 96 * 97 * PageError Slab requires special handling due to debug 98 * options set. This moves slab handling out of 99 * the fast path and disables lockless freelists. 100 */ 101 102 #define FROZEN (1 << PG_active) 103 104 #ifdef CONFIG_SLUB_DEBUG 105 #define SLABDEBUG (1 << PG_error) 106 #else 107 #define SLABDEBUG 0 108 #endif 109 110 static inline int SlabFrozen(struct page *page) 111 { 112 return page->flags & FROZEN; 113 } 114 115 static inline void SetSlabFrozen(struct page *page) 116 { 117 page->flags |= FROZEN; 118 } 119 120 static inline void ClearSlabFrozen(struct page *page) 121 { 122 page->flags &= ~FROZEN; 123 } 124 125 static inline int SlabDebug(struct page *page) 126 { 127 return page->flags & SLABDEBUG; 128 } 129 130 static inline void SetSlabDebug(struct page *page) 131 { 132 page->flags |= SLABDEBUG; 133 } 134 135 static inline void ClearSlabDebug(struct page *page) 136 { 137 page->flags &= ~SLABDEBUG; 138 } 139 140 /* 141 * Issues still to be resolved: 142 * 143 * - The per cpu array is updated for each new slab and and is a remote 144 * cacheline for most nodes. This could become a bouncing cacheline given 145 * enough frequent updates. There are 16 pointers in a cacheline, so at 146 * max 16 cpus could compete for the cacheline which may be okay. 147 * 148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 149 * 150 * - Variable sizing of the per node arrays 151 */ 152 153 /* Enable to test recovery from slab corruption on boot */ 154 #undef SLUB_RESILIENCY_TEST 155 156 #if PAGE_SHIFT <= 12 157 158 /* 159 * Small page size. Make sure that we do not fragment memory 160 */ 161 #define DEFAULT_MAX_ORDER 1 162 #define DEFAULT_MIN_OBJECTS 4 163 164 #else 165 166 /* 167 * Large page machines are customarily able to handle larger 168 * page orders. 169 */ 170 #define DEFAULT_MAX_ORDER 2 171 #define DEFAULT_MIN_OBJECTS 8 172 173 #endif 174 175 /* 176 * Mininum number of partial slabs. These will be left on the partial 177 * lists even if they are empty. kmem_cache_shrink may reclaim them. 178 */ 179 #define MIN_PARTIAL 2 180 181 /* 182 * Maximum number of desirable partial slabs. 183 * The existence of more partial slabs makes kmem_cache_shrink 184 * sort the partial list by the number of objects in the. 185 */ 186 #define MAX_PARTIAL 10 187 188 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 189 SLAB_POISON | SLAB_STORE_USER) 190 191 /* 192 * Set of flags that will prevent slab merging 193 */ 194 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 195 SLAB_TRACE | SLAB_DESTROY_BY_RCU) 196 197 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 198 SLAB_CACHE_DMA) 199 200 #ifndef ARCH_KMALLOC_MINALIGN 201 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 202 #endif 203 204 #ifndef ARCH_SLAB_MINALIGN 205 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 206 #endif 207 208 /* Internal SLUB flags */ 209 #define __OBJECT_POISON 0x80000000 /* Poison object */ 210 211 /* Not all arches define cache_line_size */ 212 #ifndef cache_line_size 213 #define cache_line_size() L1_CACHE_BYTES 214 #endif 215 216 static int kmem_size = sizeof(struct kmem_cache); 217 218 #ifdef CONFIG_SMP 219 static struct notifier_block slab_notifier; 220 #endif 221 222 static enum { 223 DOWN, /* No slab functionality available */ 224 PARTIAL, /* kmem_cache_open() works but kmalloc does not */ 225 UP, /* Everything works but does not show up in sysfs */ 226 SYSFS /* Sysfs up */ 227 } slab_state = DOWN; 228 229 /* A list of all slab caches on the system */ 230 static DECLARE_RWSEM(slub_lock); 231 LIST_HEAD(slab_caches); 232 233 /* 234 * Tracking user of a slab. 235 */ 236 struct track { 237 void *addr; /* Called from address */ 238 int cpu; /* Was running on cpu */ 239 int pid; /* Pid context */ 240 unsigned long when; /* When did the operation occur */ 241 }; 242 243 enum track_item { TRACK_ALLOC, TRACK_FREE }; 244 245 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 246 static int sysfs_slab_add(struct kmem_cache *); 247 static int sysfs_slab_alias(struct kmem_cache *, const char *); 248 static void sysfs_slab_remove(struct kmem_cache *); 249 #else 250 static int sysfs_slab_add(struct kmem_cache *s) { return 0; } 251 static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } 252 static void sysfs_slab_remove(struct kmem_cache *s) {} 253 #endif 254 255 /******************************************************************** 256 * Core slab cache functions 257 *******************************************************************/ 258 259 int slab_is_available(void) 260 { 261 return slab_state >= UP; 262 } 263 264 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 265 { 266 #ifdef CONFIG_NUMA 267 return s->node[node]; 268 #else 269 return &s->local_node; 270 #endif 271 } 272 273 static inline int check_valid_pointer(struct kmem_cache *s, 274 struct page *page, const void *object) 275 { 276 void *base; 277 278 if (!object) 279 return 1; 280 281 base = page_address(page); 282 if (object < base || object >= base + s->objects * s->size || 283 (object - base) % s->size) { 284 return 0; 285 } 286 287 return 1; 288 } 289 290 /* 291 * Slow version of get and set free pointer. 292 * 293 * This version requires touching the cache lines of kmem_cache which 294 * we avoid to do in the fast alloc free paths. There we obtain the offset 295 * from the page struct. 296 */ 297 static inline void *get_freepointer(struct kmem_cache *s, void *object) 298 { 299 return *(void **)(object + s->offset); 300 } 301 302 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 303 { 304 *(void **)(object + s->offset) = fp; 305 } 306 307 /* Loop over all objects in a slab */ 308 #define for_each_object(__p, __s, __addr) \ 309 for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\ 310 __p += (__s)->size) 311 312 /* Scan freelist */ 313 #define for_each_free_object(__p, __s, __free) \ 314 for (__p = (__free); __p; __p = get_freepointer((__s), __p)) 315 316 /* Determine object index from a given position */ 317 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 318 { 319 return (p - addr) / s->size; 320 } 321 322 #ifdef CONFIG_SLUB_DEBUG 323 /* 324 * Debug settings: 325 */ 326 static int slub_debug; 327 328 static char *slub_debug_slabs; 329 330 /* 331 * Object debugging 332 */ 333 static void print_section(char *text, u8 *addr, unsigned int length) 334 { 335 int i, offset; 336 int newline = 1; 337 char ascii[17]; 338 339 ascii[16] = 0; 340 341 for (i = 0; i < length; i++) { 342 if (newline) { 343 printk(KERN_ERR "%10s 0x%p: ", text, addr + i); 344 newline = 0; 345 } 346 printk(" %02x", addr[i]); 347 offset = i % 16; 348 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 349 if (offset == 15) { 350 printk(" %s\n",ascii); 351 newline = 1; 352 } 353 } 354 if (!newline) { 355 i %= 16; 356 while (i < 16) { 357 printk(" "); 358 ascii[i] = ' '; 359 i++; 360 } 361 printk(" %s\n", ascii); 362 } 363 } 364 365 static struct track *get_track(struct kmem_cache *s, void *object, 366 enum track_item alloc) 367 { 368 struct track *p; 369 370 if (s->offset) 371 p = object + s->offset + sizeof(void *); 372 else 373 p = object + s->inuse; 374 375 return p + alloc; 376 } 377 378 static void set_track(struct kmem_cache *s, void *object, 379 enum track_item alloc, void *addr) 380 { 381 struct track *p; 382 383 if (s->offset) 384 p = object + s->offset + sizeof(void *); 385 else 386 p = object + s->inuse; 387 388 p += alloc; 389 if (addr) { 390 p->addr = addr; 391 p->cpu = smp_processor_id(); 392 p->pid = current ? current->pid : -1; 393 p->when = jiffies; 394 } else 395 memset(p, 0, sizeof(struct track)); 396 } 397 398 static void init_tracking(struct kmem_cache *s, void *object) 399 { 400 if (s->flags & SLAB_STORE_USER) { 401 set_track(s, object, TRACK_FREE, NULL); 402 set_track(s, object, TRACK_ALLOC, NULL); 403 } 404 } 405 406 static void print_track(const char *s, struct track *t) 407 { 408 if (!t->addr) 409 return; 410 411 printk(KERN_ERR "%s: ", s); 412 __print_symbol("%s", (unsigned long)t->addr); 413 printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid); 414 } 415 416 static void print_trailer(struct kmem_cache *s, u8 *p) 417 { 418 unsigned int off; /* Offset of last byte */ 419 420 if (s->flags & SLAB_RED_ZONE) 421 print_section("Redzone", p + s->objsize, 422 s->inuse - s->objsize); 423 424 printk(KERN_ERR "FreePointer 0x%p -> 0x%p\n", 425 p + s->offset, 426 get_freepointer(s, p)); 427 428 if (s->offset) 429 off = s->offset + sizeof(void *); 430 else 431 off = s->inuse; 432 433 if (s->flags & SLAB_STORE_USER) { 434 print_track("Last alloc", get_track(s, p, TRACK_ALLOC)); 435 print_track("Last free ", get_track(s, p, TRACK_FREE)); 436 off += 2 * sizeof(struct track); 437 } 438 439 if (off != s->size) 440 /* Beginning of the filler is the free pointer */ 441 print_section("Filler", p + off, s->size - off); 442 } 443 444 static void object_err(struct kmem_cache *s, struct page *page, 445 u8 *object, char *reason) 446 { 447 u8 *addr = page_address(page); 448 449 printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n", 450 s->name, reason, object, page); 451 printk(KERN_ERR " offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n", 452 object - addr, page->flags, page->inuse, page->freelist); 453 if (object > addr + 16) 454 print_section("Bytes b4", object - 16, 16); 455 print_section("Object", object, min(s->objsize, 128)); 456 print_trailer(s, object); 457 dump_stack(); 458 } 459 460 static void slab_err(struct kmem_cache *s, struct page *page, char *reason, ...) 461 { 462 va_list args; 463 char buf[100]; 464 465 va_start(args, reason); 466 vsnprintf(buf, sizeof(buf), reason, args); 467 va_end(args); 468 printk(KERN_ERR "*** SLUB %s: %s in slab @0x%p\n", s->name, buf, 469 page); 470 dump_stack(); 471 } 472 473 static void init_object(struct kmem_cache *s, void *object, int active) 474 { 475 u8 *p = object; 476 477 if (s->flags & __OBJECT_POISON) { 478 memset(p, POISON_FREE, s->objsize - 1); 479 p[s->objsize -1] = POISON_END; 480 } 481 482 if (s->flags & SLAB_RED_ZONE) 483 memset(p + s->objsize, 484 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, 485 s->inuse - s->objsize); 486 } 487 488 static int check_bytes(u8 *start, unsigned int value, unsigned int bytes) 489 { 490 while (bytes) { 491 if (*start != (u8)value) 492 return 0; 493 start++; 494 bytes--; 495 } 496 return 1; 497 } 498 499 /* 500 * Object layout: 501 * 502 * object address 503 * Bytes of the object to be managed. 504 * If the freepointer may overlay the object then the free 505 * pointer is the first word of the object. 506 * 507 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 508 * 0xa5 (POISON_END) 509 * 510 * object + s->objsize 511 * Padding to reach word boundary. This is also used for Redzoning. 512 * Padding is extended by another word if Redzoning is enabled and 513 * objsize == inuse. 514 * 515 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 516 * 0xcc (RED_ACTIVE) for objects in use. 517 * 518 * object + s->inuse 519 * Meta data starts here. 520 * 521 * A. Free pointer (if we cannot overwrite object on free) 522 * B. Tracking data for SLAB_STORE_USER 523 * C. Padding to reach required alignment boundary or at mininum 524 * one word if debuggin is on to be able to detect writes 525 * before the word boundary. 526 * 527 * Padding is done using 0x5a (POISON_INUSE) 528 * 529 * object + s->size 530 * Nothing is used beyond s->size. 531 * 532 * If slabcaches are merged then the objsize and inuse boundaries are mostly 533 * ignored. And therefore no slab options that rely on these boundaries 534 * may be used with merged slabcaches. 535 */ 536 537 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 538 void *from, void *to) 539 { 540 printk(KERN_ERR "@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n", 541 s->name, message, data, from, to - 1); 542 memset(from, data, to - from); 543 } 544 545 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 546 { 547 unsigned long off = s->inuse; /* The end of info */ 548 549 if (s->offset) 550 /* Freepointer is placed after the object. */ 551 off += sizeof(void *); 552 553 if (s->flags & SLAB_STORE_USER) 554 /* We also have user information there */ 555 off += 2 * sizeof(struct track); 556 557 if (s->size == off) 558 return 1; 559 560 if (check_bytes(p + off, POISON_INUSE, s->size - off)) 561 return 1; 562 563 object_err(s, page, p, "Object padding check fails"); 564 565 /* 566 * Restore padding 567 */ 568 restore_bytes(s, "object padding", POISON_INUSE, p + off, p + s->size); 569 return 0; 570 } 571 572 static int slab_pad_check(struct kmem_cache *s, struct page *page) 573 { 574 u8 *p; 575 int length, remainder; 576 577 if (!(s->flags & SLAB_POISON)) 578 return 1; 579 580 p = page_address(page); 581 length = s->objects * s->size; 582 remainder = (PAGE_SIZE << s->order) - length; 583 if (!remainder) 584 return 1; 585 586 if (!check_bytes(p + length, POISON_INUSE, remainder)) { 587 slab_err(s, page, "Padding check failed"); 588 restore_bytes(s, "slab padding", POISON_INUSE, p + length, 589 p + length + remainder); 590 return 0; 591 } 592 return 1; 593 } 594 595 static int check_object(struct kmem_cache *s, struct page *page, 596 void *object, int active) 597 { 598 u8 *p = object; 599 u8 *endobject = object + s->objsize; 600 601 if (s->flags & SLAB_RED_ZONE) { 602 unsigned int red = 603 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; 604 605 if (!check_bytes(endobject, red, s->inuse - s->objsize)) { 606 object_err(s, page, object, 607 active ? "Redzone Active" : "Redzone Inactive"); 608 restore_bytes(s, "redzone", red, 609 endobject, object + s->inuse); 610 return 0; 611 } 612 } else { 613 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse && 614 !check_bytes(endobject, POISON_INUSE, 615 s->inuse - s->objsize)) { 616 object_err(s, page, p, "Alignment padding check fails"); 617 /* 618 * Fix it so that there will not be another report. 619 * 620 * Hmmm... We may be corrupting an object that now expects 621 * to be longer than allowed. 622 */ 623 restore_bytes(s, "alignment padding", POISON_INUSE, 624 endobject, object + s->inuse); 625 } 626 } 627 628 if (s->flags & SLAB_POISON) { 629 if (!active && (s->flags & __OBJECT_POISON) && 630 (!check_bytes(p, POISON_FREE, s->objsize - 1) || 631 p[s->objsize - 1] != POISON_END)) { 632 633 object_err(s, page, p, "Poison check failed"); 634 restore_bytes(s, "Poison", POISON_FREE, 635 p, p + s->objsize -1); 636 restore_bytes(s, "Poison", POISON_END, 637 p + s->objsize - 1, p + s->objsize); 638 return 0; 639 } 640 /* 641 * check_pad_bytes cleans up on its own. 642 */ 643 check_pad_bytes(s, page, p); 644 } 645 646 if (!s->offset && active) 647 /* 648 * Object and freepointer overlap. Cannot check 649 * freepointer while object is allocated. 650 */ 651 return 1; 652 653 /* Check free pointer validity */ 654 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 655 object_err(s, page, p, "Freepointer corrupt"); 656 /* 657 * No choice but to zap it and thus loose the remainder 658 * of the free objects in this slab. May cause 659 * another error because the object count is now wrong. 660 */ 661 set_freepointer(s, p, NULL); 662 return 0; 663 } 664 return 1; 665 } 666 667 static int check_slab(struct kmem_cache *s, struct page *page) 668 { 669 VM_BUG_ON(!irqs_disabled()); 670 671 if (!PageSlab(page)) { 672 slab_err(s, page, "Not a valid slab page flags=%lx " 673 "mapping=0x%p count=%d", page->flags, page->mapping, 674 page_count(page)); 675 return 0; 676 } 677 if (page->offset * sizeof(void *) != s->offset) { 678 slab_err(s, page, "Corrupted offset %lu flags=0x%lx " 679 "mapping=0x%p count=%d", 680 (unsigned long)(page->offset * sizeof(void *)), 681 page->flags, 682 page->mapping, 683 page_count(page)); 684 return 0; 685 } 686 if (page->inuse > s->objects) { 687 slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx " 688 "mapping=0x%p count=%d", 689 s->name, page->inuse, s->objects, page->flags, 690 page->mapping, page_count(page)); 691 return 0; 692 } 693 /* Slab_pad_check fixes things up after itself */ 694 slab_pad_check(s, page); 695 return 1; 696 } 697 698 /* 699 * Determine if a certain object on a page is on the freelist. Must hold the 700 * slab lock to guarantee that the chains are in a consistent state. 701 */ 702 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 703 { 704 int nr = 0; 705 void *fp = page->freelist; 706 void *object = NULL; 707 708 while (fp && nr <= s->objects) { 709 if (fp == search) 710 return 1; 711 if (!check_valid_pointer(s, page, fp)) { 712 if (object) { 713 object_err(s, page, object, 714 "Freechain corrupt"); 715 set_freepointer(s, object, NULL); 716 break; 717 } else { 718 slab_err(s, page, "Freepointer 0x%p corrupt", 719 fp); 720 page->freelist = NULL; 721 page->inuse = s->objects; 722 printk(KERN_ERR "@@@ SLUB %s: Freelist " 723 "cleared. Slab 0x%p\n", 724 s->name, page); 725 return 0; 726 } 727 break; 728 } 729 object = fp; 730 fp = get_freepointer(s, object); 731 nr++; 732 } 733 734 if (page->inuse != s->objects - nr) { 735 slab_err(s, page, "Wrong object count. Counter is %d but " 736 "counted were %d", s, page, page->inuse, 737 s->objects - nr); 738 page->inuse = s->objects - nr; 739 printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. " 740 "Slab @0x%p\n", s->name, page); 741 } 742 return search == NULL; 743 } 744 745 static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) 746 { 747 if (s->flags & SLAB_TRACE) { 748 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 749 s->name, 750 alloc ? "alloc" : "free", 751 object, page->inuse, 752 page->freelist); 753 754 if (!alloc) 755 print_section("Object", (void *)object, s->objsize); 756 757 dump_stack(); 758 } 759 } 760 761 /* 762 * Tracking of fully allocated slabs for debugging purposes. 763 */ 764 static void add_full(struct kmem_cache_node *n, struct page *page) 765 { 766 spin_lock(&n->list_lock); 767 list_add(&page->lru, &n->full); 768 spin_unlock(&n->list_lock); 769 } 770 771 static void remove_full(struct kmem_cache *s, struct page *page) 772 { 773 struct kmem_cache_node *n; 774 775 if (!(s->flags & SLAB_STORE_USER)) 776 return; 777 778 n = get_node(s, page_to_nid(page)); 779 780 spin_lock(&n->list_lock); 781 list_del(&page->lru); 782 spin_unlock(&n->list_lock); 783 } 784 785 static void setup_object_debug(struct kmem_cache *s, struct page *page, 786 void *object) 787 { 788 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 789 return; 790 791 init_object(s, object, 0); 792 init_tracking(s, object); 793 } 794 795 static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 796 void *object, void *addr) 797 { 798 if (!check_slab(s, page)) 799 goto bad; 800 801 if (object && !on_freelist(s, page, object)) { 802 slab_err(s, page, "Object 0x%p already allocated", object); 803 goto bad; 804 } 805 806 if (!check_valid_pointer(s, page, object)) { 807 object_err(s, page, object, "Freelist Pointer check fails"); 808 goto bad; 809 } 810 811 if (object && !check_object(s, page, object, 0)) 812 goto bad; 813 814 /* Success perform special debug activities for allocs */ 815 if (s->flags & SLAB_STORE_USER) 816 set_track(s, object, TRACK_ALLOC, addr); 817 trace(s, page, object, 1); 818 init_object(s, object, 1); 819 return 1; 820 821 bad: 822 if (PageSlab(page)) { 823 /* 824 * If this is a slab page then lets do the best we can 825 * to avoid issues in the future. Marking all objects 826 * as used avoids touching the remaining objects. 827 */ 828 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n", 829 s->name, page); 830 page->inuse = s->objects; 831 page->freelist = NULL; 832 /* Fix up fields that may be corrupted */ 833 page->offset = s->offset / sizeof(void *); 834 } 835 return 0; 836 } 837 838 static int free_debug_processing(struct kmem_cache *s, struct page *page, 839 void *object, void *addr) 840 { 841 if (!check_slab(s, page)) 842 goto fail; 843 844 if (!check_valid_pointer(s, page, object)) { 845 slab_err(s, page, "Invalid object pointer 0x%p", object); 846 goto fail; 847 } 848 849 if (on_freelist(s, page, object)) { 850 slab_err(s, page, "Object 0x%p already free", object); 851 goto fail; 852 } 853 854 if (!check_object(s, page, object, 1)) 855 return 0; 856 857 if (unlikely(s != page->slab)) { 858 if (!PageSlab(page)) 859 slab_err(s, page, "Attempt to free object(0x%p) " 860 "outside of slab", object); 861 else 862 if (!page->slab) { 863 printk(KERN_ERR 864 "SLUB <none>: no slab for object 0x%p.\n", 865 object); 866 dump_stack(); 867 } 868 else 869 slab_err(s, page, "object at 0x%p belongs " 870 "to slab %s", object, page->slab->name); 871 goto fail; 872 } 873 874 /* Special debug activities for freeing objects */ 875 if (!SlabFrozen(page) && !page->freelist) 876 remove_full(s, page); 877 if (s->flags & SLAB_STORE_USER) 878 set_track(s, object, TRACK_FREE, addr); 879 trace(s, page, object, 0); 880 init_object(s, object, 0); 881 return 1; 882 883 fail: 884 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 885 s->name, page, object); 886 return 0; 887 } 888 889 static int __init setup_slub_debug(char *str) 890 { 891 if (!str || *str != '=') 892 slub_debug = DEBUG_DEFAULT_FLAGS; 893 else { 894 str++; 895 if (*str == 0 || *str == ',') 896 slub_debug = DEBUG_DEFAULT_FLAGS; 897 else 898 for( ;*str && *str != ','; str++) 899 switch (*str) { 900 case 'f' : case 'F' : 901 slub_debug |= SLAB_DEBUG_FREE; 902 break; 903 case 'z' : case 'Z' : 904 slub_debug |= SLAB_RED_ZONE; 905 break; 906 case 'p' : case 'P' : 907 slub_debug |= SLAB_POISON; 908 break; 909 case 'u' : case 'U' : 910 slub_debug |= SLAB_STORE_USER; 911 break; 912 case 't' : case 'T' : 913 slub_debug |= SLAB_TRACE; 914 break; 915 default: 916 printk(KERN_ERR "slub_debug option '%c' " 917 "unknown. skipped\n",*str); 918 } 919 } 920 921 if (*str == ',') 922 slub_debug_slabs = str + 1; 923 return 1; 924 } 925 926 __setup("slub_debug", setup_slub_debug); 927 928 static void kmem_cache_open_debug_check(struct kmem_cache *s) 929 { 930 /* 931 * The page->offset field is only 16 bit wide. This is an offset 932 * in units of words from the beginning of an object. If the slab 933 * size is bigger then we cannot move the free pointer behind the 934 * object anymore. 935 * 936 * On 32 bit platforms the limit is 256k. On 64bit platforms 937 * the limit is 512k. 938 * 939 * Debugging or ctor may create a need to move the free 940 * pointer. Fail if this happens. 941 */ 942 if (s->objsize >= 65535 * sizeof(void *)) { 943 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | 944 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); 945 BUG_ON(s->ctor); 946 } 947 else 948 /* 949 * Enable debugging if selected on the kernel commandline. 950 */ 951 if (slub_debug && (!slub_debug_slabs || 952 strncmp(slub_debug_slabs, s->name, 953 strlen(slub_debug_slabs)) == 0)) 954 s->flags |= slub_debug; 955 } 956 #else 957 static inline void setup_object_debug(struct kmem_cache *s, 958 struct page *page, void *object) {} 959 960 static inline int alloc_debug_processing(struct kmem_cache *s, 961 struct page *page, void *object, void *addr) { return 0; } 962 963 static inline int free_debug_processing(struct kmem_cache *s, 964 struct page *page, void *object, void *addr) { return 0; } 965 966 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 967 { return 1; } 968 static inline int check_object(struct kmem_cache *s, struct page *page, 969 void *object, int active) { return 1; } 970 static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 971 static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} 972 #define slub_debug 0 973 #endif 974 /* 975 * Slab allocation and freeing 976 */ 977 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 978 { 979 struct page * page; 980 int pages = 1 << s->order; 981 982 if (s->order) 983 flags |= __GFP_COMP; 984 985 if (s->flags & SLAB_CACHE_DMA) 986 flags |= SLUB_DMA; 987 988 if (node == -1) 989 page = alloc_pages(flags, s->order); 990 else 991 page = alloc_pages_node(node, flags, s->order); 992 993 if (!page) 994 return NULL; 995 996 mod_zone_page_state(page_zone(page), 997 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 998 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 999 pages); 1000 1001 return page; 1002 } 1003 1004 static void setup_object(struct kmem_cache *s, struct page *page, 1005 void *object) 1006 { 1007 setup_object_debug(s, page, object); 1008 if (unlikely(s->ctor)) 1009 s->ctor(object, s, 0); 1010 } 1011 1012 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1013 { 1014 struct page *page; 1015 struct kmem_cache_node *n; 1016 void *start; 1017 void *end; 1018 void *last; 1019 void *p; 1020 1021 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 1022 1023 if (flags & __GFP_WAIT) 1024 local_irq_enable(); 1025 1026 page = allocate_slab(s, flags & GFP_LEVEL_MASK, node); 1027 if (!page) 1028 goto out; 1029 1030 n = get_node(s, page_to_nid(page)); 1031 if (n) 1032 atomic_long_inc(&n->nr_slabs); 1033 page->offset = s->offset / sizeof(void *); 1034 page->slab = s; 1035 page->flags |= 1 << PG_slab; 1036 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1037 SLAB_STORE_USER | SLAB_TRACE)) 1038 SetSlabDebug(page); 1039 1040 start = page_address(page); 1041 end = start + s->objects * s->size; 1042 1043 if (unlikely(s->flags & SLAB_POISON)) 1044 memset(start, POISON_INUSE, PAGE_SIZE << s->order); 1045 1046 last = start; 1047 for_each_object(p, s, start) { 1048 setup_object(s, page, last); 1049 set_freepointer(s, last, p); 1050 last = p; 1051 } 1052 setup_object(s, page, last); 1053 set_freepointer(s, last, NULL); 1054 1055 page->freelist = start; 1056 page->lockless_freelist = NULL; 1057 page->inuse = 0; 1058 out: 1059 if (flags & __GFP_WAIT) 1060 local_irq_disable(); 1061 return page; 1062 } 1063 1064 static void __free_slab(struct kmem_cache *s, struct page *page) 1065 { 1066 int pages = 1 << s->order; 1067 1068 if (unlikely(SlabDebug(page))) { 1069 void *p; 1070 1071 slab_pad_check(s, page); 1072 for_each_object(p, s, page_address(page)) 1073 check_object(s, page, p, 0); 1074 } 1075 1076 mod_zone_page_state(page_zone(page), 1077 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1078 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1079 - pages); 1080 1081 page->mapping = NULL; 1082 __free_pages(page, s->order); 1083 } 1084 1085 static void rcu_free_slab(struct rcu_head *h) 1086 { 1087 struct page *page; 1088 1089 page = container_of((struct list_head *)h, struct page, lru); 1090 __free_slab(page->slab, page); 1091 } 1092 1093 static void free_slab(struct kmem_cache *s, struct page *page) 1094 { 1095 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1096 /* 1097 * RCU free overloads the RCU head over the LRU 1098 */ 1099 struct rcu_head *head = (void *)&page->lru; 1100 1101 call_rcu(head, rcu_free_slab); 1102 } else 1103 __free_slab(s, page); 1104 } 1105 1106 static void discard_slab(struct kmem_cache *s, struct page *page) 1107 { 1108 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1109 1110 atomic_long_dec(&n->nr_slabs); 1111 reset_page_mapcount(page); 1112 ClearSlabDebug(page); 1113 __ClearPageSlab(page); 1114 free_slab(s, page); 1115 } 1116 1117 /* 1118 * Per slab locking using the pagelock 1119 */ 1120 static __always_inline void slab_lock(struct page *page) 1121 { 1122 bit_spin_lock(PG_locked, &page->flags); 1123 } 1124 1125 static __always_inline void slab_unlock(struct page *page) 1126 { 1127 bit_spin_unlock(PG_locked, &page->flags); 1128 } 1129 1130 static __always_inline int slab_trylock(struct page *page) 1131 { 1132 int rc = 1; 1133 1134 rc = bit_spin_trylock(PG_locked, &page->flags); 1135 return rc; 1136 } 1137 1138 /* 1139 * Management of partially allocated slabs 1140 */ 1141 static void add_partial_tail(struct kmem_cache_node *n, struct page *page) 1142 { 1143 spin_lock(&n->list_lock); 1144 n->nr_partial++; 1145 list_add_tail(&page->lru, &n->partial); 1146 spin_unlock(&n->list_lock); 1147 } 1148 1149 static void add_partial(struct kmem_cache_node *n, struct page *page) 1150 { 1151 spin_lock(&n->list_lock); 1152 n->nr_partial++; 1153 list_add(&page->lru, &n->partial); 1154 spin_unlock(&n->list_lock); 1155 } 1156 1157 static void remove_partial(struct kmem_cache *s, 1158 struct page *page) 1159 { 1160 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1161 1162 spin_lock(&n->list_lock); 1163 list_del(&page->lru); 1164 n->nr_partial--; 1165 spin_unlock(&n->list_lock); 1166 } 1167 1168 /* 1169 * Lock slab and remove from the partial list. 1170 * 1171 * Must hold list_lock. 1172 */ 1173 static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) 1174 { 1175 if (slab_trylock(page)) { 1176 list_del(&page->lru); 1177 n->nr_partial--; 1178 SetSlabFrozen(page); 1179 return 1; 1180 } 1181 return 0; 1182 } 1183 1184 /* 1185 * Try to allocate a partial slab from a specific node. 1186 */ 1187 static struct page *get_partial_node(struct kmem_cache_node *n) 1188 { 1189 struct page *page; 1190 1191 /* 1192 * Racy check. If we mistakenly see no partial slabs then we 1193 * just allocate an empty slab. If we mistakenly try to get a 1194 * partial slab and there is none available then get_partials() 1195 * will return NULL. 1196 */ 1197 if (!n || !n->nr_partial) 1198 return NULL; 1199 1200 spin_lock(&n->list_lock); 1201 list_for_each_entry(page, &n->partial, lru) 1202 if (lock_and_freeze_slab(n, page)) 1203 goto out; 1204 page = NULL; 1205 out: 1206 spin_unlock(&n->list_lock); 1207 return page; 1208 } 1209 1210 /* 1211 * Get a page from somewhere. Search in increasing NUMA distances. 1212 */ 1213 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) 1214 { 1215 #ifdef CONFIG_NUMA 1216 struct zonelist *zonelist; 1217 struct zone **z; 1218 struct page *page; 1219 1220 /* 1221 * The defrag ratio allows a configuration of the tradeoffs between 1222 * inter node defragmentation and node local allocations. A lower 1223 * defrag_ratio increases the tendency to do local allocations 1224 * instead of attempting to obtain partial slabs from other nodes. 1225 * 1226 * If the defrag_ratio is set to 0 then kmalloc() always 1227 * returns node local objects. If the ratio is higher then kmalloc() 1228 * may return off node objects because partial slabs are obtained 1229 * from other nodes and filled up. 1230 * 1231 * If /sys/slab/xx/defrag_ratio is set to 100 (which makes 1232 * defrag_ratio = 1000) then every (well almost) allocation will 1233 * first attempt to defrag slab caches on other nodes. This means 1234 * scanning over all nodes to look for partial slabs which may be 1235 * expensive if we do it every time we are trying to find a slab 1236 * with available objects. 1237 */ 1238 if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio) 1239 return NULL; 1240 1241 zonelist = &NODE_DATA(slab_node(current->mempolicy)) 1242 ->node_zonelists[gfp_zone(flags)]; 1243 for (z = zonelist->zones; *z; z++) { 1244 struct kmem_cache_node *n; 1245 1246 n = get_node(s, zone_to_nid(*z)); 1247 1248 if (n && cpuset_zone_allowed_hardwall(*z, flags) && 1249 n->nr_partial > MIN_PARTIAL) { 1250 page = get_partial_node(n); 1251 if (page) 1252 return page; 1253 } 1254 } 1255 #endif 1256 return NULL; 1257 } 1258 1259 /* 1260 * Get a partial page, lock it and return it. 1261 */ 1262 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1263 { 1264 struct page *page; 1265 int searchnode = (node == -1) ? numa_node_id() : node; 1266 1267 page = get_partial_node(get_node(s, searchnode)); 1268 if (page || (flags & __GFP_THISNODE)) 1269 return page; 1270 1271 return get_any_partial(s, flags); 1272 } 1273 1274 /* 1275 * Move a page back to the lists. 1276 * 1277 * Must be called with the slab lock held. 1278 * 1279 * On exit the slab lock will have been dropped. 1280 */ 1281 static void unfreeze_slab(struct kmem_cache *s, struct page *page) 1282 { 1283 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1284 1285 ClearSlabFrozen(page); 1286 if (page->inuse) { 1287 1288 if (page->freelist) 1289 add_partial(n, page); 1290 else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) 1291 add_full(n, page); 1292 slab_unlock(page); 1293 1294 } else { 1295 if (n->nr_partial < MIN_PARTIAL) { 1296 /* 1297 * Adding an empty slab to the partial slabs in order 1298 * to avoid page allocator overhead. This slab needs 1299 * to come after the other slabs with objects in 1300 * order to fill them up. That way the size of the 1301 * partial list stays small. kmem_cache_shrink can 1302 * reclaim empty slabs from the partial list. 1303 */ 1304 add_partial_tail(n, page); 1305 slab_unlock(page); 1306 } else { 1307 slab_unlock(page); 1308 discard_slab(s, page); 1309 } 1310 } 1311 } 1312 1313 /* 1314 * Remove the cpu slab 1315 */ 1316 static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu) 1317 { 1318 /* 1319 * Merge cpu freelist into freelist. Typically we get here 1320 * because both freelists are empty. So this is unlikely 1321 * to occur. 1322 */ 1323 while (unlikely(page->lockless_freelist)) { 1324 void **object; 1325 1326 /* Retrieve object from cpu_freelist */ 1327 object = page->lockless_freelist; 1328 page->lockless_freelist = page->lockless_freelist[page->offset]; 1329 1330 /* And put onto the regular freelist */ 1331 object[page->offset] = page->freelist; 1332 page->freelist = object; 1333 page->inuse--; 1334 } 1335 s->cpu_slab[cpu] = NULL; 1336 unfreeze_slab(s, page); 1337 } 1338 1339 static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) 1340 { 1341 slab_lock(page); 1342 deactivate_slab(s, page, cpu); 1343 } 1344 1345 /* 1346 * Flush cpu slab. 1347 * Called from IPI handler with interrupts disabled. 1348 */ 1349 static void __flush_cpu_slab(struct kmem_cache *s, int cpu) 1350 { 1351 struct page *page = s->cpu_slab[cpu]; 1352 1353 if (likely(page)) 1354 flush_slab(s, page, cpu); 1355 } 1356 1357 static void flush_cpu_slab(void *d) 1358 { 1359 struct kmem_cache *s = d; 1360 int cpu = smp_processor_id(); 1361 1362 __flush_cpu_slab(s, cpu); 1363 } 1364 1365 static void flush_all(struct kmem_cache *s) 1366 { 1367 #ifdef CONFIG_SMP 1368 on_each_cpu(flush_cpu_slab, s, 1, 1); 1369 #else 1370 unsigned long flags; 1371 1372 local_irq_save(flags); 1373 flush_cpu_slab(s); 1374 local_irq_restore(flags); 1375 #endif 1376 } 1377 1378 /* 1379 * Slow path. The lockless freelist is empty or we need to perform 1380 * debugging duties. 1381 * 1382 * Interrupts are disabled. 1383 * 1384 * Processing is still very fast if new objects have been freed to the 1385 * regular freelist. In that case we simply take over the regular freelist 1386 * as the lockless freelist and zap the regular freelist. 1387 * 1388 * If that is not working then we fall back to the partial lists. We take the 1389 * first element of the freelist as the object to allocate now and move the 1390 * rest of the freelist to the lockless freelist. 1391 * 1392 * And if we were unable to get a new slab from the partial slab lists then 1393 * we need to allocate a new slab. This is slowest path since we may sleep. 1394 */ 1395 static void *__slab_alloc(struct kmem_cache *s, 1396 gfp_t gfpflags, int node, void *addr, struct page *page) 1397 { 1398 void **object; 1399 int cpu = smp_processor_id(); 1400 1401 if (!page) 1402 goto new_slab; 1403 1404 slab_lock(page); 1405 if (unlikely(node != -1 && page_to_nid(page) != node)) 1406 goto another_slab; 1407 load_freelist: 1408 object = page->freelist; 1409 if (unlikely(!object)) 1410 goto another_slab; 1411 if (unlikely(SlabDebug(page))) 1412 goto debug; 1413 1414 object = page->freelist; 1415 page->lockless_freelist = object[page->offset]; 1416 page->inuse = s->objects; 1417 page->freelist = NULL; 1418 slab_unlock(page); 1419 return object; 1420 1421 another_slab: 1422 deactivate_slab(s, page, cpu); 1423 1424 new_slab: 1425 page = get_partial(s, gfpflags, node); 1426 if (page) { 1427 s->cpu_slab[cpu] = page; 1428 goto load_freelist; 1429 } 1430 1431 page = new_slab(s, gfpflags, node); 1432 if (page) { 1433 cpu = smp_processor_id(); 1434 if (s->cpu_slab[cpu]) { 1435 /* 1436 * Someone else populated the cpu_slab while we 1437 * enabled interrupts, or we have gotten scheduled 1438 * on another cpu. The page may not be on the 1439 * requested node even if __GFP_THISNODE was 1440 * specified. So we need to recheck. 1441 */ 1442 if (node == -1 || 1443 page_to_nid(s->cpu_slab[cpu]) == node) { 1444 /* 1445 * Current cpuslab is acceptable and we 1446 * want the current one since its cache hot 1447 */ 1448 discard_slab(s, page); 1449 page = s->cpu_slab[cpu]; 1450 slab_lock(page); 1451 goto load_freelist; 1452 } 1453 /* New slab does not fit our expectations */ 1454 flush_slab(s, s->cpu_slab[cpu], cpu); 1455 } 1456 slab_lock(page); 1457 SetSlabFrozen(page); 1458 s->cpu_slab[cpu] = page; 1459 goto load_freelist; 1460 } 1461 return NULL; 1462 debug: 1463 object = page->freelist; 1464 if (!alloc_debug_processing(s, page, object, addr)) 1465 goto another_slab; 1466 1467 page->inuse++; 1468 page->freelist = object[page->offset]; 1469 slab_unlock(page); 1470 return object; 1471 } 1472 1473 /* 1474 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 1475 * have the fastpath folded into their functions. So no function call 1476 * overhead for requests that can be satisfied on the fastpath. 1477 * 1478 * The fastpath works by first checking if the lockless freelist can be used. 1479 * If not then __slab_alloc is called for slow processing. 1480 * 1481 * Otherwise we can simply pick the next object from the lockless free list. 1482 */ 1483 static void __always_inline *slab_alloc(struct kmem_cache *s, 1484 gfp_t gfpflags, int node, void *addr) 1485 { 1486 struct page *page; 1487 void **object; 1488 unsigned long flags; 1489 1490 local_irq_save(flags); 1491 page = s->cpu_slab[smp_processor_id()]; 1492 if (unlikely(!page || !page->lockless_freelist || 1493 (node != -1 && page_to_nid(page) != node))) 1494 1495 object = __slab_alloc(s, gfpflags, node, addr, page); 1496 1497 else { 1498 object = page->lockless_freelist; 1499 page->lockless_freelist = object[page->offset]; 1500 } 1501 local_irq_restore(flags); 1502 return object; 1503 } 1504 1505 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1506 { 1507 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1508 } 1509 EXPORT_SYMBOL(kmem_cache_alloc); 1510 1511 #ifdef CONFIG_NUMA 1512 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1513 { 1514 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1515 } 1516 EXPORT_SYMBOL(kmem_cache_alloc_node); 1517 #endif 1518 1519 /* 1520 * Slow patch handling. This may still be called frequently since objects 1521 * have a longer lifetime than the cpu slabs in most processing loads. 1522 * 1523 * So we still attempt to reduce cache line usage. Just take the slab 1524 * lock and free the item. If there is no additional partial page 1525 * handling required then we can return immediately. 1526 */ 1527 static void __slab_free(struct kmem_cache *s, struct page *page, 1528 void *x, void *addr) 1529 { 1530 void *prior; 1531 void **object = (void *)x; 1532 1533 slab_lock(page); 1534 1535 if (unlikely(SlabDebug(page))) 1536 goto debug; 1537 checks_ok: 1538 prior = object[page->offset] = page->freelist; 1539 page->freelist = object; 1540 page->inuse--; 1541 1542 if (unlikely(SlabFrozen(page))) 1543 goto out_unlock; 1544 1545 if (unlikely(!page->inuse)) 1546 goto slab_empty; 1547 1548 /* 1549 * Objects left in the slab. If it 1550 * was not on the partial list before 1551 * then add it. 1552 */ 1553 if (unlikely(!prior)) 1554 add_partial(get_node(s, page_to_nid(page)), page); 1555 1556 out_unlock: 1557 slab_unlock(page); 1558 return; 1559 1560 slab_empty: 1561 if (prior) 1562 /* 1563 * Slab still on the partial list. 1564 */ 1565 remove_partial(s, page); 1566 1567 slab_unlock(page); 1568 discard_slab(s, page); 1569 return; 1570 1571 debug: 1572 if (!free_debug_processing(s, page, x, addr)) 1573 goto out_unlock; 1574 goto checks_ok; 1575 } 1576 1577 /* 1578 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 1579 * can perform fastpath freeing without additional function calls. 1580 * 1581 * The fastpath is only possible if we are freeing to the current cpu slab 1582 * of this processor. This typically the case if we have just allocated 1583 * the item before. 1584 * 1585 * If fastpath is not possible then fall back to __slab_free where we deal 1586 * with all sorts of special processing. 1587 */ 1588 static void __always_inline slab_free(struct kmem_cache *s, 1589 struct page *page, void *x, void *addr) 1590 { 1591 void **object = (void *)x; 1592 unsigned long flags; 1593 1594 local_irq_save(flags); 1595 if (likely(page == s->cpu_slab[smp_processor_id()] && 1596 !SlabDebug(page))) { 1597 object[page->offset] = page->lockless_freelist; 1598 page->lockless_freelist = object; 1599 } else 1600 __slab_free(s, page, x, addr); 1601 1602 local_irq_restore(flags); 1603 } 1604 1605 void kmem_cache_free(struct kmem_cache *s, void *x) 1606 { 1607 struct page *page; 1608 1609 page = virt_to_head_page(x); 1610 1611 slab_free(s, page, x, __builtin_return_address(0)); 1612 } 1613 EXPORT_SYMBOL(kmem_cache_free); 1614 1615 /* Figure out on which slab object the object resides */ 1616 static struct page *get_object_page(const void *x) 1617 { 1618 struct page *page = virt_to_head_page(x); 1619 1620 if (!PageSlab(page)) 1621 return NULL; 1622 1623 return page; 1624 } 1625 1626 /* 1627 * Object placement in a slab is made very easy because we always start at 1628 * offset 0. If we tune the size of the object to the alignment then we can 1629 * get the required alignment by putting one properly sized object after 1630 * another. 1631 * 1632 * Notice that the allocation order determines the sizes of the per cpu 1633 * caches. Each processor has always one slab available for allocations. 1634 * Increasing the allocation order reduces the number of times that slabs 1635 * must be moved on and off the partial lists and is therefore a factor in 1636 * locking overhead. 1637 */ 1638 1639 /* 1640 * Mininum / Maximum order of slab pages. This influences locking overhead 1641 * and slab fragmentation. A higher order reduces the number of partial slabs 1642 * and increases the number of allocations possible without having to 1643 * take the list_lock. 1644 */ 1645 static int slub_min_order; 1646 static int slub_max_order = DEFAULT_MAX_ORDER; 1647 static int slub_min_objects = DEFAULT_MIN_OBJECTS; 1648 1649 /* 1650 * Merge control. If this is set then no merging of slab caches will occur. 1651 * (Could be removed. This was introduced to pacify the merge skeptics.) 1652 */ 1653 static int slub_nomerge; 1654 1655 /* 1656 * Calculate the order of allocation given an slab object size. 1657 * 1658 * The order of allocation has significant impact on performance and other 1659 * system components. Generally order 0 allocations should be preferred since 1660 * order 0 does not cause fragmentation in the page allocator. Larger objects 1661 * be problematic to put into order 0 slabs because there may be too much 1662 * unused space left. We go to a higher order if more than 1/8th of the slab 1663 * would be wasted. 1664 * 1665 * In order to reach satisfactory performance we must ensure that a minimum 1666 * number of objects is in one slab. Otherwise we may generate too much 1667 * activity on the partial lists which requires taking the list_lock. This is 1668 * less a concern for large slabs though which are rarely used. 1669 * 1670 * slub_max_order specifies the order where we begin to stop considering the 1671 * number of objects in a slab as critical. If we reach slub_max_order then 1672 * we try to keep the page order as low as possible. So we accept more waste 1673 * of space in favor of a small page order. 1674 * 1675 * Higher order allocations also allow the placement of more objects in a 1676 * slab and thereby reduce object handling overhead. If the user has 1677 * requested a higher mininum order then we start with that one instead of 1678 * the smallest order which will fit the object. 1679 */ 1680 static inline int slab_order(int size, int min_objects, 1681 int max_order, int fract_leftover) 1682 { 1683 int order; 1684 int rem; 1685 1686 for (order = max(slub_min_order, 1687 fls(min_objects * size - 1) - PAGE_SHIFT); 1688 order <= max_order; order++) { 1689 1690 unsigned long slab_size = PAGE_SIZE << order; 1691 1692 if (slab_size < min_objects * size) 1693 continue; 1694 1695 rem = slab_size % size; 1696 1697 if (rem <= slab_size / fract_leftover) 1698 break; 1699 1700 } 1701 1702 return order; 1703 } 1704 1705 static inline int calculate_order(int size) 1706 { 1707 int order; 1708 int min_objects; 1709 int fraction; 1710 1711 /* 1712 * Attempt to find best configuration for a slab. This 1713 * works by first attempting to generate a layout with 1714 * the best configuration and backing off gradually. 1715 * 1716 * First we reduce the acceptable waste in a slab. Then 1717 * we reduce the minimum objects required in a slab. 1718 */ 1719 min_objects = slub_min_objects; 1720 while (min_objects > 1) { 1721 fraction = 8; 1722 while (fraction >= 4) { 1723 order = slab_order(size, min_objects, 1724 slub_max_order, fraction); 1725 if (order <= slub_max_order) 1726 return order; 1727 fraction /= 2; 1728 } 1729 min_objects /= 2; 1730 } 1731 1732 /* 1733 * We were unable to place multiple objects in a slab. Now 1734 * lets see if we can place a single object there. 1735 */ 1736 order = slab_order(size, 1, slub_max_order, 1); 1737 if (order <= slub_max_order) 1738 return order; 1739 1740 /* 1741 * Doh this slab cannot be placed using slub_max_order. 1742 */ 1743 order = slab_order(size, 1, MAX_ORDER, 1); 1744 if (order <= MAX_ORDER) 1745 return order; 1746 return -ENOSYS; 1747 } 1748 1749 /* 1750 * Figure out what the alignment of the objects will be. 1751 */ 1752 static unsigned long calculate_alignment(unsigned long flags, 1753 unsigned long align, unsigned long size) 1754 { 1755 /* 1756 * If the user wants hardware cache aligned objects then 1757 * follow that suggestion if the object is sufficiently 1758 * large. 1759 * 1760 * The hardware cache alignment cannot override the 1761 * specified alignment though. If that is greater 1762 * then use it. 1763 */ 1764 if ((flags & SLAB_HWCACHE_ALIGN) && 1765 size > cache_line_size() / 2) 1766 return max_t(unsigned long, align, cache_line_size()); 1767 1768 if (align < ARCH_SLAB_MINALIGN) 1769 return ARCH_SLAB_MINALIGN; 1770 1771 return ALIGN(align, sizeof(void *)); 1772 } 1773 1774 static void init_kmem_cache_node(struct kmem_cache_node *n) 1775 { 1776 n->nr_partial = 0; 1777 atomic_long_set(&n->nr_slabs, 0); 1778 spin_lock_init(&n->list_lock); 1779 INIT_LIST_HEAD(&n->partial); 1780 INIT_LIST_HEAD(&n->full); 1781 } 1782 1783 #ifdef CONFIG_NUMA 1784 /* 1785 * No kmalloc_node yet so do it by hand. We know that this is the first 1786 * slab on the node for this slabcache. There are no concurrent accesses 1787 * possible. 1788 * 1789 * Note that this function only works on the kmalloc_node_cache 1790 * when allocating for the kmalloc_node_cache. 1791 */ 1792 static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflags, 1793 int node) 1794 { 1795 struct page *page; 1796 struct kmem_cache_node *n; 1797 1798 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 1799 1800 page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node); 1801 /* new_slab() disables interupts */ 1802 local_irq_enable(); 1803 1804 BUG_ON(!page); 1805 n = page->freelist; 1806 BUG_ON(!n); 1807 page->freelist = get_freepointer(kmalloc_caches, n); 1808 page->inuse++; 1809 kmalloc_caches->node[node] = n; 1810 setup_object_debug(kmalloc_caches, page, n); 1811 init_kmem_cache_node(n); 1812 atomic_long_inc(&n->nr_slabs); 1813 add_partial(n, page); 1814 return n; 1815 } 1816 1817 static void free_kmem_cache_nodes(struct kmem_cache *s) 1818 { 1819 int node; 1820 1821 for_each_online_node(node) { 1822 struct kmem_cache_node *n = s->node[node]; 1823 if (n && n != &s->local_node) 1824 kmem_cache_free(kmalloc_caches, n); 1825 s->node[node] = NULL; 1826 } 1827 } 1828 1829 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 1830 { 1831 int node; 1832 int local_node; 1833 1834 if (slab_state >= UP) 1835 local_node = page_to_nid(virt_to_page(s)); 1836 else 1837 local_node = 0; 1838 1839 for_each_online_node(node) { 1840 struct kmem_cache_node *n; 1841 1842 if (local_node == node) 1843 n = &s->local_node; 1844 else { 1845 if (slab_state == DOWN) { 1846 n = early_kmem_cache_node_alloc(gfpflags, 1847 node); 1848 continue; 1849 } 1850 n = kmem_cache_alloc_node(kmalloc_caches, 1851 gfpflags, node); 1852 1853 if (!n) { 1854 free_kmem_cache_nodes(s); 1855 return 0; 1856 } 1857 1858 } 1859 s->node[node] = n; 1860 init_kmem_cache_node(n); 1861 } 1862 return 1; 1863 } 1864 #else 1865 static void free_kmem_cache_nodes(struct kmem_cache *s) 1866 { 1867 } 1868 1869 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 1870 { 1871 init_kmem_cache_node(&s->local_node); 1872 return 1; 1873 } 1874 #endif 1875 1876 /* 1877 * calculate_sizes() determines the order and the distribution of data within 1878 * a slab object. 1879 */ 1880 static int calculate_sizes(struct kmem_cache *s) 1881 { 1882 unsigned long flags = s->flags; 1883 unsigned long size = s->objsize; 1884 unsigned long align = s->align; 1885 1886 /* 1887 * Determine if we can poison the object itself. If the user of 1888 * the slab may touch the object after free or before allocation 1889 * then we should never poison the object itself. 1890 */ 1891 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 1892 !s->ctor) 1893 s->flags |= __OBJECT_POISON; 1894 else 1895 s->flags &= ~__OBJECT_POISON; 1896 1897 /* 1898 * Round up object size to the next word boundary. We can only 1899 * place the free pointer at word boundaries and this determines 1900 * the possible location of the free pointer. 1901 */ 1902 size = ALIGN(size, sizeof(void *)); 1903 1904 #ifdef CONFIG_SLUB_DEBUG 1905 /* 1906 * If we are Redzoning then check if there is some space between the 1907 * end of the object and the free pointer. If not then add an 1908 * additional word to have some bytes to store Redzone information. 1909 */ 1910 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 1911 size += sizeof(void *); 1912 #endif 1913 1914 /* 1915 * With that we have determined the number of bytes in actual use 1916 * by the object. This is the potential offset to the free pointer. 1917 */ 1918 s->inuse = size; 1919 1920 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 1921 s->ctor)) { 1922 /* 1923 * Relocate free pointer after the object if it is not 1924 * permitted to overwrite the first word of the object on 1925 * kmem_cache_free. 1926 * 1927 * This is the case if we do RCU, have a constructor or 1928 * destructor or are poisoning the objects. 1929 */ 1930 s->offset = size; 1931 size += sizeof(void *); 1932 } 1933 1934 #ifdef CONFIG_SLUB_DEBUG 1935 if (flags & SLAB_STORE_USER) 1936 /* 1937 * Need to store information about allocs and frees after 1938 * the object. 1939 */ 1940 size += 2 * sizeof(struct track); 1941 1942 if (flags & SLAB_RED_ZONE) 1943 /* 1944 * Add some empty padding so that we can catch 1945 * overwrites from earlier objects rather than let 1946 * tracking information or the free pointer be 1947 * corrupted if an user writes before the start 1948 * of the object. 1949 */ 1950 size += sizeof(void *); 1951 #endif 1952 1953 /* 1954 * Determine the alignment based on various parameters that the 1955 * user specified and the dynamic determination of cache line size 1956 * on bootup. 1957 */ 1958 align = calculate_alignment(flags, align, s->objsize); 1959 1960 /* 1961 * SLUB stores one object immediately after another beginning from 1962 * offset 0. In order to align the objects we have to simply size 1963 * each object to conform to the alignment. 1964 */ 1965 size = ALIGN(size, align); 1966 s->size = size; 1967 1968 s->order = calculate_order(size); 1969 if (s->order < 0) 1970 return 0; 1971 1972 /* 1973 * Determine the number of objects per slab 1974 */ 1975 s->objects = (PAGE_SIZE << s->order) / size; 1976 1977 /* 1978 * Verify that the number of objects is within permitted limits. 1979 * The page->inuse field is only 16 bit wide! So we cannot have 1980 * more than 64k objects per slab. 1981 */ 1982 if (!s->objects || s->objects > 65535) 1983 return 0; 1984 return 1; 1985 1986 } 1987 1988 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 1989 const char *name, size_t size, 1990 size_t align, unsigned long flags, 1991 void (*ctor)(void *, struct kmem_cache *, unsigned long)) 1992 { 1993 memset(s, 0, kmem_size); 1994 s->name = name; 1995 s->ctor = ctor; 1996 s->objsize = size; 1997 s->flags = flags; 1998 s->align = align; 1999 kmem_cache_open_debug_check(s); 2000 2001 if (!calculate_sizes(s)) 2002 goto error; 2003 2004 s->refcount = 1; 2005 #ifdef CONFIG_NUMA 2006 s->defrag_ratio = 100; 2007 #endif 2008 2009 if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2010 return 1; 2011 error: 2012 if (flags & SLAB_PANIC) 2013 panic("Cannot create slab %s size=%lu realsize=%u " 2014 "order=%u offset=%u flags=%lx\n", 2015 s->name, (unsigned long)size, s->size, s->order, 2016 s->offset, flags); 2017 return 0; 2018 } 2019 EXPORT_SYMBOL(kmem_cache_open); 2020 2021 /* 2022 * Check if a given pointer is valid 2023 */ 2024 int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2025 { 2026 struct page * page; 2027 2028 page = get_object_page(object); 2029 2030 if (!page || s != page->slab) 2031 /* No slab or wrong slab */ 2032 return 0; 2033 2034 if (!check_valid_pointer(s, page, object)) 2035 return 0; 2036 2037 /* 2038 * We could also check if the object is on the slabs freelist. 2039 * But this would be too expensive and it seems that the main 2040 * purpose of kmem_ptr_valid is to check if the object belongs 2041 * to a certain slab. 2042 */ 2043 return 1; 2044 } 2045 EXPORT_SYMBOL(kmem_ptr_validate); 2046 2047 /* 2048 * Determine the size of a slab object 2049 */ 2050 unsigned int kmem_cache_size(struct kmem_cache *s) 2051 { 2052 return s->objsize; 2053 } 2054 EXPORT_SYMBOL(kmem_cache_size); 2055 2056 const char *kmem_cache_name(struct kmem_cache *s) 2057 { 2058 return s->name; 2059 } 2060 EXPORT_SYMBOL(kmem_cache_name); 2061 2062 /* 2063 * Attempt to free all slabs on a node. Return the number of slabs we 2064 * were unable to free. 2065 */ 2066 static int free_list(struct kmem_cache *s, struct kmem_cache_node *n, 2067 struct list_head *list) 2068 { 2069 int slabs_inuse = 0; 2070 unsigned long flags; 2071 struct page *page, *h; 2072 2073 spin_lock_irqsave(&n->list_lock, flags); 2074 list_for_each_entry_safe(page, h, list, lru) 2075 if (!page->inuse) { 2076 list_del(&page->lru); 2077 discard_slab(s, page); 2078 } else 2079 slabs_inuse++; 2080 spin_unlock_irqrestore(&n->list_lock, flags); 2081 return slabs_inuse; 2082 } 2083 2084 /* 2085 * Release all resources used by a slab cache. 2086 */ 2087 static int kmem_cache_close(struct kmem_cache *s) 2088 { 2089 int node; 2090 2091 flush_all(s); 2092 2093 /* Attempt to free all objects */ 2094 for_each_online_node(node) { 2095 struct kmem_cache_node *n = get_node(s, node); 2096 2097 n->nr_partial -= free_list(s, n, &n->partial); 2098 if (atomic_long_read(&n->nr_slabs)) 2099 return 1; 2100 } 2101 free_kmem_cache_nodes(s); 2102 return 0; 2103 } 2104 2105 /* 2106 * Close a cache and release the kmem_cache structure 2107 * (must be used for caches created using kmem_cache_create) 2108 */ 2109 void kmem_cache_destroy(struct kmem_cache *s) 2110 { 2111 down_write(&slub_lock); 2112 s->refcount--; 2113 if (!s->refcount) { 2114 list_del(&s->list); 2115 if (kmem_cache_close(s)) 2116 WARN_ON(1); 2117 sysfs_slab_remove(s); 2118 kfree(s); 2119 } 2120 up_write(&slub_lock); 2121 } 2122 EXPORT_SYMBOL(kmem_cache_destroy); 2123 2124 /******************************************************************** 2125 * Kmalloc subsystem 2126 *******************************************************************/ 2127 2128 struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned; 2129 EXPORT_SYMBOL(kmalloc_caches); 2130 2131 #ifdef CONFIG_ZONE_DMA 2132 static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1]; 2133 #endif 2134 2135 static int __init setup_slub_min_order(char *str) 2136 { 2137 get_option (&str, &slub_min_order); 2138 2139 return 1; 2140 } 2141 2142 __setup("slub_min_order=", setup_slub_min_order); 2143 2144 static int __init setup_slub_max_order(char *str) 2145 { 2146 get_option (&str, &slub_max_order); 2147 2148 return 1; 2149 } 2150 2151 __setup("slub_max_order=", setup_slub_max_order); 2152 2153 static int __init setup_slub_min_objects(char *str) 2154 { 2155 get_option (&str, &slub_min_objects); 2156 2157 return 1; 2158 } 2159 2160 __setup("slub_min_objects=", setup_slub_min_objects); 2161 2162 static int __init setup_slub_nomerge(char *str) 2163 { 2164 slub_nomerge = 1; 2165 return 1; 2166 } 2167 2168 __setup("slub_nomerge", setup_slub_nomerge); 2169 2170 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, 2171 const char *name, int size, gfp_t gfp_flags) 2172 { 2173 unsigned int flags = 0; 2174 2175 if (gfp_flags & SLUB_DMA) 2176 flags = SLAB_CACHE_DMA; 2177 2178 down_write(&slub_lock); 2179 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2180 flags, NULL)) 2181 goto panic; 2182 2183 list_add(&s->list, &slab_caches); 2184 up_write(&slub_lock); 2185 if (sysfs_slab_add(s)) 2186 goto panic; 2187 return s; 2188 2189 panic: 2190 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2191 } 2192 2193 static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2194 { 2195 int index = kmalloc_index(size); 2196 2197 if (!index) 2198 return NULL; 2199 2200 /* Allocation too large? */ 2201 BUG_ON(index < 0); 2202 2203 #ifdef CONFIG_ZONE_DMA 2204 if ((flags & SLUB_DMA)) { 2205 struct kmem_cache *s; 2206 struct kmem_cache *x; 2207 char *text; 2208 size_t realsize; 2209 2210 s = kmalloc_caches_dma[index]; 2211 if (s) 2212 return s; 2213 2214 /* Dynamically create dma cache */ 2215 x = kmalloc(kmem_size, flags & ~SLUB_DMA); 2216 if (!x) 2217 panic("Unable to allocate memory for dma cache\n"); 2218 2219 if (index <= KMALLOC_SHIFT_HIGH) 2220 realsize = 1 << index; 2221 else { 2222 if (index == 1) 2223 realsize = 96; 2224 else 2225 realsize = 192; 2226 } 2227 2228 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", 2229 (unsigned int)realsize); 2230 s = create_kmalloc_cache(x, text, realsize, flags); 2231 kmalloc_caches_dma[index] = s; 2232 return s; 2233 } 2234 #endif 2235 return &kmalloc_caches[index]; 2236 } 2237 2238 void *__kmalloc(size_t size, gfp_t flags) 2239 { 2240 struct kmem_cache *s = get_slab(size, flags); 2241 2242 if (s) 2243 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2244 return NULL; 2245 } 2246 EXPORT_SYMBOL(__kmalloc); 2247 2248 #ifdef CONFIG_NUMA 2249 void *__kmalloc_node(size_t size, gfp_t flags, int node) 2250 { 2251 struct kmem_cache *s = get_slab(size, flags); 2252 2253 if (s) 2254 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2255 return NULL; 2256 } 2257 EXPORT_SYMBOL(__kmalloc_node); 2258 #endif 2259 2260 size_t ksize(const void *object) 2261 { 2262 struct page *page = get_object_page(object); 2263 struct kmem_cache *s; 2264 2265 BUG_ON(!page); 2266 s = page->slab; 2267 BUG_ON(!s); 2268 2269 /* 2270 * Debugging requires use of the padding between object 2271 * and whatever may come after it. 2272 */ 2273 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 2274 return s->objsize; 2275 2276 /* 2277 * If we have the need to store the freelist pointer 2278 * back there or track user information then we can 2279 * only use the space before that information. 2280 */ 2281 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 2282 return s->inuse; 2283 2284 /* 2285 * Else we can use all the padding etc for the allocation 2286 */ 2287 return s->size; 2288 } 2289 EXPORT_SYMBOL(ksize); 2290 2291 void kfree(const void *x) 2292 { 2293 struct kmem_cache *s; 2294 struct page *page; 2295 2296 if (!x) 2297 return; 2298 2299 page = virt_to_head_page(x); 2300 s = page->slab; 2301 2302 slab_free(s, page, (void *)x, __builtin_return_address(0)); 2303 } 2304 EXPORT_SYMBOL(kfree); 2305 2306 /* 2307 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2308 * the remaining slabs by the number of items in use. The slabs with the 2309 * most items in use come first. New allocations will then fill those up 2310 * and thus they can be removed from the partial lists. 2311 * 2312 * The slabs with the least items are placed last. This results in them 2313 * being allocated from last increasing the chance that the last objects 2314 * are freed in them. 2315 */ 2316 int kmem_cache_shrink(struct kmem_cache *s) 2317 { 2318 int node; 2319 int i; 2320 struct kmem_cache_node *n; 2321 struct page *page; 2322 struct page *t; 2323 struct list_head *slabs_by_inuse = 2324 kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL); 2325 unsigned long flags; 2326 2327 if (!slabs_by_inuse) 2328 return -ENOMEM; 2329 2330 flush_all(s); 2331 for_each_online_node(node) { 2332 n = get_node(s, node); 2333 2334 if (!n->nr_partial) 2335 continue; 2336 2337 for (i = 0; i < s->objects; i++) 2338 INIT_LIST_HEAD(slabs_by_inuse + i); 2339 2340 spin_lock_irqsave(&n->list_lock, flags); 2341 2342 /* 2343 * Build lists indexed by the items in use in each slab. 2344 * 2345 * Note that concurrent frees may occur while we hold the 2346 * list_lock. page->inuse here is the upper limit. 2347 */ 2348 list_for_each_entry_safe(page, t, &n->partial, lru) { 2349 if (!page->inuse && slab_trylock(page)) { 2350 /* 2351 * Must hold slab lock here because slab_free 2352 * may have freed the last object and be 2353 * waiting to release the slab. 2354 */ 2355 list_del(&page->lru); 2356 n->nr_partial--; 2357 slab_unlock(page); 2358 discard_slab(s, page); 2359 } else { 2360 if (n->nr_partial > MAX_PARTIAL) 2361 list_move(&page->lru, 2362 slabs_by_inuse + page->inuse); 2363 } 2364 } 2365 2366 if (n->nr_partial <= MAX_PARTIAL) 2367 goto out; 2368 2369 /* 2370 * Rebuild the partial list with the slabs filled up most 2371 * first and the least used slabs at the end. 2372 */ 2373 for (i = s->objects - 1; i >= 0; i--) 2374 list_splice(slabs_by_inuse + i, n->partial.prev); 2375 2376 out: 2377 spin_unlock_irqrestore(&n->list_lock, flags); 2378 } 2379 2380 kfree(slabs_by_inuse); 2381 return 0; 2382 } 2383 EXPORT_SYMBOL(kmem_cache_shrink); 2384 2385 /** 2386 * krealloc - reallocate memory. The contents will remain unchanged. 2387 * @p: object to reallocate memory for. 2388 * @new_size: how many bytes of memory are required. 2389 * @flags: the type of memory to allocate. 2390 * 2391 * The contents of the object pointed to are preserved up to the 2392 * lesser of the new and old sizes. If @p is %NULL, krealloc() 2393 * behaves exactly like kmalloc(). If @size is 0 and @p is not a 2394 * %NULL pointer, the object pointed to is freed. 2395 */ 2396 void *krealloc(const void *p, size_t new_size, gfp_t flags) 2397 { 2398 void *ret; 2399 size_t ks; 2400 2401 if (unlikely(!p)) 2402 return kmalloc(new_size, flags); 2403 2404 if (unlikely(!new_size)) { 2405 kfree(p); 2406 return NULL; 2407 } 2408 2409 ks = ksize(p); 2410 if (ks >= new_size) 2411 return (void *)p; 2412 2413 ret = kmalloc(new_size, flags); 2414 if (ret) { 2415 memcpy(ret, p, min(new_size, ks)); 2416 kfree(p); 2417 } 2418 return ret; 2419 } 2420 EXPORT_SYMBOL(krealloc); 2421 2422 /******************************************************************** 2423 * Basic setup of slabs 2424 *******************************************************************/ 2425 2426 void __init kmem_cache_init(void) 2427 { 2428 int i; 2429 2430 #ifdef CONFIG_NUMA 2431 /* 2432 * Must first have the slab cache available for the allocations of the 2433 * struct kmem_cache_node's. There is special bootstrap code in 2434 * kmem_cache_open for slab_state == DOWN. 2435 */ 2436 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", 2437 sizeof(struct kmem_cache_node), GFP_KERNEL); 2438 kmalloc_caches[0].refcount = -1; 2439 #endif 2440 2441 /* Able to allocate the per node structures */ 2442 slab_state = PARTIAL; 2443 2444 /* Caches that are not of the two-to-the-power-of size */ 2445 create_kmalloc_cache(&kmalloc_caches[1], 2446 "kmalloc-96", 96, GFP_KERNEL); 2447 create_kmalloc_cache(&kmalloc_caches[2], 2448 "kmalloc-192", 192, GFP_KERNEL); 2449 2450 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) 2451 create_kmalloc_cache(&kmalloc_caches[i], 2452 "kmalloc", 1 << i, GFP_KERNEL); 2453 2454 slab_state = UP; 2455 2456 /* Provide the correct kmalloc names now that the caches are up */ 2457 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) 2458 kmalloc_caches[i]. name = 2459 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 2460 2461 #ifdef CONFIG_SMP 2462 register_cpu_notifier(&slab_notifier); 2463 #endif 2464 2465 kmem_size = offsetof(struct kmem_cache, cpu_slab) + 2466 nr_cpu_ids * sizeof(struct page *); 2467 2468 printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 2469 " Processors=%d, Nodes=%d\n", 2470 KMALLOC_SHIFT_HIGH, cache_line_size(), 2471 slub_min_order, slub_max_order, slub_min_objects, 2472 nr_cpu_ids, nr_node_ids); 2473 } 2474 2475 /* 2476 * Find a mergeable slab cache 2477 */ 2478 static int slab_unmergeable(struct kmem_cache *s) 2479 { 2480 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 2481 return 1; 2482 2483 if (s->ctor) 2484 return 1; 2485 2486 /* 2487 * We may have set a slab to be unmergeable during bootstrap. 2488 */ 2489 if (s->refcount < 0) 2490 return 1; 2491 2492 return 0; 2493 } 2494 2495 static struct kmem_cache *find_mergeable(size_t size, 2496 size_t align, unsigned long flags, 2497 void (*ctor)(void *, struct kmem_cache *, unsigned long)) 2498 { 2499 struct list_head *h; 2500 2501 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 2502 return NULL; 2503 2504 if (ctor) 2505 return NULL; 2506 2507 size = ALIGN(size, sizeof(void *)); 2508 align = calculate_alignment(flags, align, size); 2509 size = ALIGN(size, align); 2510 2511 list_for_each(h, &slab_caches) { 2512 struct kmem_cache *s = 2513 container_of(h, struct kmem_cache, list); 2514 2515 if (slab_unmergeable(s)) 2516 continue; 2517 2518 if (size > s->size) 2519 continue; 2520 2521 if (((flags | slub_debug) & SLUB_MERGE_SAME) != 2522 (s->flags & SLUB_MERGE_SAME)) 2523 continue; 2524 /* 2525 * Check if alignment is compatible. 2526 * Courtesy of Adrian Drzewiecki 2527 */ 2528 if ((s->size & ~(align -1)) != s->size) 2529 continue; 2530 2531 if (s->size - size >= sizeof(void *)) 2532 continue; 2533 2534 return s; 2535 } 2536 return NULL; 2537 } 2538 2539 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 2540 size_t align, unsigned long flags, 2541 void (*ctor)(void *, struct kmem_cache *, unsigned long), 2542 void (*dtor)(void *, struct kmem_cache *, unsigned long)) 2543 { 2544 struct kmem_cache *s; 2545 2546 BUG_ON(dtor); 2547 down_write(&slub_lock); 2548 s = find_mergeable(size, align, flags, ctor); 2549 if (s) { 2550 s->refcount++; 2551 /* 2552 * Adjust the object sizes so that we clear 2553 * the complete object on kzalloc. 2554 */ 2555 s->objsize = max(s->objsize, (int)size); 2556 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 2557 if (sysfs_slab_alias(s, name)) 2558 goto err; 2559 } else { 2560 s = kmalloc(kmem_size, GFP_KERNEL); 2561 if (s && kmem_cache_open(s, GFP_KERNEL, name, 2562 size, align, flags, ctor)) { 2563 if (sysfs_slab_add(s)) { 2564 kfree(s); 2565 goto err; 2566 } 2567 list_add(&s->list, &slab_caches); 2568 } else 2569 kfree(s); 2570 } 2571 up_write(&slub_lock); 2572 return s; 2573 2574 err: 2575 up_write(&slub_lock); 2576 if (flags & SLAB_PANIC) 2577 panic("Cannot create slabcache %s\n", name); 2578 else 2579 s = NULL; 2580 return s; 2581 } 2582 EXPORT_SYMBOL(kmem_cache_create); 2583 2584 void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags) 2585 { 2586 void *x; 2587 2588 x = slab_alloc(s, flags, -1, __builtin_return_address(0)); 2589 if (x) 2590 memset(x, 0, s->objsize); 2591 return x; 2592 } 2593 EXPORT_SYMBOL(kmem_cache_zalloc); 2594 2595 #ifdef CONFIG_SMP 2596 static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu) 2597 { 2598 struct list_head *h; 2599 2600 down_read(&slub_lock); 2601 list_for_each(h, &slab_caches) { 2602 struct kmem_cache *s = 2603 container_of(h, struct kmem_cache, list); 2604 2605 func(s, cpu); 2606 } 2607 up_read(&slub_lock); 2608 } 2609 2610 /* 2611 * Use the cpu notifier to insure that the cpu slabs are flushed when 2612 * necessary. 2613 */ 2614 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 2615 unsigned long action, void *hcpu) 2616 { 2617 long cpu = (long)hcpu; 2618 2619 switch (action) { 2620 case CPU_UP_CANCELED: 2621 case CPU_UP_CANCELED_FROZEN: 2622 case CPU_DEAD: 2623 case CPU_DEAD_FROZEN: 2624 for_all_slabs(__flush_cpu_slab, cpu); 2625 break; 2626 default: 2627 break; 2628 } 2629 return NOTIFY_OK; 2630 } 2631 2632 static struct notifier_block __cpuinitdata slab_notifier = 2633 { &slab_cpuup_callback, NULL, 0 }; 2634 2635 #endif 2636 2637 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 2638 { 2639 struct kmem_cache *s = get_slab(size, gfpflags); 2640 2641 if (!s) 2642 return NULL; 2643 2644 return slab_alloc(s, gfpflags, -1, caller); 2645 } 2646 2647 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 2648 int node, void *caller) 2649 { 2650 struct kmem_cache *s = get_slab(size, gfpflags); 2651 2652 if (!s) 2653 return NULL; 2654 2655 return slab_alloc(s, gfpflags, node, caller); 2656 } 2657 2658 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 2659 static int validate_slab(struct kmem_cache *s, struct page *page) 2660 { 2661 void *p; 2662 void *addr = page_address(page); 2663 DECLARE_BITMAP(map, s->objects); 2664 2665 if (!check_slab(s, page) || 2666 !on_freelist(s, page, NULL)) 2667 return 0; 2668 2669 /* Now we know that a valid freelist exists */ 2670 bitmap_zero(map, s->objects); 2671 2672 for_each_free_object(p, s, page->freelist) { 2673 set_bit(slab_index(p, s, addr), map); 2674 if (!check_object(s, page, p, 0)) 2675 return 0; 2676 } 2677 2678 for_each_object(p, s, addr) 2679 if (!test_bit(slab_index(p, s, addr), map)) 2680 if (!check_object(s, page, p, 1)) 2681 return 0; 2682 return 1; 2683 } 2684 2685 static void validate_slab_slab(struct kmem_cache *s, struct page *page) 2686 { 2687 if (slab_trylock(page)) { 2688 validate_slab(s, page); 2689 slab_unlock(page); 2690 } else 2691 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 2692 s->name, page); 2693 2694 if (s->flags & DEBUG_DEFAULT_FLAGS) { 2695 if (!SlabDebug(page)) 2696 printk(KERN_ERR "SLUB %s: SlabDebug not set " 2697 "on slab 0x%p\n", s->name, page); 2698 } else { 2699 if (SlabDebug(page)) 2700 printk(KERN_ERR "SLUB %s: SlabDebug set on " 2701 "slab 0x%p\n", s->name, page); 2702 } 2703 } 2704 2705 static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) 2706 { 2707 unsigned long count = 0; 2708 struct page *page; 2709 unsigned long flags; 2710 2711 spin_lock_irqsave(&n->list_lock, flags); 2712 2713 list_for_each_entry(page, &n->partial, lru) { 2714 validate_slab_slab(s, page); 2715 count++; 2716 } 2717 if (count != n->nr_partial) 2718 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " 2719 "counter=%ld\n", s->name, count, n->nr_partial); 2720 2721 if (!(s->flags & SLAB_STORE_USER)) 2722 goto out; 2723 2724 list_for_each_entry(page, &n->full, lru) { 2725 validate_slab_slab(s, page); 2726 count++; 2727 } 2728 if (count != atomic_long_read(&n->nr_slabs)) 2729 printk(KERN_ERR "SLUB: %s %ld slabs counted but " 2730 "counter=%ld\n", s->name, count, 2731 atomic_long_read(&n->nr_slabs)); 2732 2733 out: 2734 spin_unlock_irqrestore(&n->list_lock, flags); 2735 return count; 2736 } 2737 2738 static unsigned long validate_slab_cache(struct kmem_cache *s) 2739 { 2740 int node; 2741 unsigned long count = 0; 2742 2743 flush_all(s); 2744 for_each_online_node(node) { 2745 struct kmem_cache_node *n = get_node(s, node); 2746 2747 count += validate_slab_node(s, n); 2748 } 2749 return count; 2750 } 2751 2752 #ifdef SLUB_RESILIENCY_TEST 2753 static void resiliency_test(void) 2754 { 2755 u8 *p; 2756 2757 printk(KERN_ERR "SLUB resiliency testing\n"); 2758 printk(KERN_ERR "-----------------------\n"); 2759 printk(KERN_ERR "A. Corruption after allocation\n"); 2760 2761 p = kzalloc(16, GFP_KERNEL); 2762 p[16] = 0x12; 2763 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" 2764 " 0x12->0x%p\n\n", p + 16); 2765 2766 validate_slab_cache(kmalloc_caches + 4); 2767 2768 /* Hmmm... The next two are dangerous */ 2769 p = kzalloc(32, GFP_KERNEL); 2770 p[32 + sizeof(void *)] = 0x34; 2771 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" 2772 " 0x34 -> -0x%p\n", p); 2773 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); 2774 2775 validate_slab_cache(kmalloc_caches + 5); 2776 p = kzalloc(64, GFP_KERNEL); 2777 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 2778 *p = 0x56; 2779 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 2780 p); 2781 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); 2782 validate_slab_cache(kmalloc_caches + 6); 2783 2784 printk(KERN_ERR "\nB. Corruption after free\n"); 2785 p = kzalloc(128, GFP_KERNEL); 2786 kfree(p); 2787 *p = 0x78; 2788 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 2789 validate_slab_cache(kmalloc_caches + 7); 2790 2791 p = kzalloc(256, GFP_KERNEL); 2792 kfree(p); 2793 p[50] = 0x9a; 2794 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); 2795 validate_slab_cache(kmalloc_caches + 8); 2796 2797 p = kzalloc(512, GFP_KERNEL); 2798 kfree(p); 2799 p[512] = 0xab; 2800 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 2801 validate_slab_cache(kmalloc_caches + 9); 2802 } 2803 #else 2804 static void resiliency_test(void) {}; 2805 #endif 2806 2807 /* 2808 * Generate lists of code addresses where slabcache objects are allocated 2809 * and freed. 2810 */ 2811 2812 struct location { 2813 unsigned long count; 2814 void *addr; 2815 long long sum_time; 2816 long min_time; 2817 long max_time; 2818 long min_pid; 2819 long max_pid; 2820 cpumask_t cpus; 2821 nodemask_t nodes; 2822 }; 2823 2824 struct loc_track { 2825 unsigned long max; 2826 unsigned long count; 2827 struct location *loc; 2828 }; 2829 2830 static void free_loc_track(struct loc_track *t) 2831 { 2832 if (t->max) 2833 free_pages((unsigned long)t->loc, 2834 get_order(sizeof(struct location) * t->max)); 2835 } 2836 2837 static int alloc_loc_track(struct loc_track *t, unsigned long max) 2838 { 2839 struct location *l; 2840 int order; 2841 2842 if (!max) 2843 max = PAGE_SIZE / sizeof(struct location); 2844 2845 order = get_order(sizeof(struct location) * max); 2846 2847 l = (void *)__get_free_pages(GFP_KERNEL, order); 2848 2849 if (!l) 2850 return 0; 2851 2852 if (t->count) { 2853 memcpy(l, t->loc, sizeof(struct location) * t->count); 2854 free_loc_track(t); 2855 } 2856 t->max = max; 2857 t->loc = l; 2858 return 1; 2859 } 2860 2861 static int add_location(struct loc_track *t, struct kmem_cache *s, 2862 const struct track *track) 2863 { 2864 long start, end, pos; 2865 struct location *l; 2866 void *caddr; 2867 unsigned long age = jiffies - track->when; 2868 2869 start = -1; 2870 end = t->count; 2871 2872 for ( ; ; ) { 2873 pos = start + (end - start + 1) / 2; 2874 2875 /* 2876 * There is nothing at "end". If we end up there 2877 * we need to add something to before end. 2878 */ 2879 if (pos == end) 2880 break; 2881 2882 caddr = t->loc[pos].addr; 2883 if (track->addr == caddr) { 2884 2885 l = &t->loc[pos]; 2886 l->count++; 2887 if (track->when) { 2888 l->sum_time += age; 2889 if (age < l->min_time) 2890 l->min_time = age; 2891 if (age > l->max_time) 2892 l->max_time = age; 2893 2894 if (track->pid < l->min_pid) 2895 l->min_pid = track->pid; 2896 if (track->pid > l->max_pid) 2897 l->max_pid = track->pid; 2898 2899 cpu_set(track->cpu, l->cpus); 2900 } 2901 node_set(page_to_nid(virt_to_page(track)), l->nodes); 2902 return 1; 2903 } 2904 2905 if (track->addr < caddr) 2906 end = pos; 2907 else 2908 start = pos; 2909 } 2910 2911 /* 2912 * Not found. Insert new tracking element. 2913 */ 2914 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max)) 2915 return 0; 2916 2917 l = t->loc + pos; 2918 if (pos < t->count) 2919 memmove(l + 1, l, 2920 (t->count - pos) * sizeof(struct location)); 2921 t->count++; 2922 l->count = 1; 2923 l->addr = track->addr; 2924 l->sum_time = age; 2925 l->min_time = age; 2926 l->max_time = age; 2927 l->min_pid = track->pid; 2928 l->max_pid = track->pid; 2929 cpus_clear(l->cpus); 2930 cpu_set(track->cpu, l->cpus); 2931 nodes_clear(l->nodes); 2932 node_set(page_to_nid(virt_to_page(track)), l->nodes); 2933 return 1; 2934 } 2935 2936 static void process_slab(struct loc_track *t, struct kmem_cache *s, 2937 struct page *page, enum track_item alloc) 2938 { 2939 void *addr = page_address(page); 2940 DECLARE_BITMAP(map, s->objects); 2941 void *p; 2942 2943 bitmap_zero(map, s->objects); 2944 for_each_free_object(p, s, page->freelist) 2945 set_bit(slab_index(p, s, addr), map); 2946 2947 for_each_object(p, s, addr) 2948 if (!test_bit(slab_index(p, s, addr), map)) 2949 add_location(t, s, get_track(s, p, alloc)); 2950 } 2951 2952 static int list_locations(struct kmem_cache *s, char *buf, 2953 enum track_item alloc) 2954 { 2955 int n = 0; 2956 unsigned long i; 2957 struct loc_track t; 2958 int node; 2959 2960 t.count = 0; 2961 t.max = 0; 2962 2963 /* Push back cpu slabs */ 2964 flush_all(s); 2965 2966 for_each_online_node(node) { 2967 struct kmem_cache_node *n = get_node(s, node); 2968 unsigned long flags; 2969 struct page *page; 2970 2971 if (!atomic_read(&n->nr_slabs)) 2972 continue; 2973 2974 spin_lock_irqsave(&n->list_lock, flags); 2975 list_for_each_entry(page, &n->partial, lru) 2976 process_slab(&t, s, page, alloc); 2977 list_for_each_entry(page, &n->full, lru) 2978 process_slab(&t, s, page, alloc); 2979 spin_unlock_irqrestore(&n->list_lock, flags); 2980 } 2981 2982 for (i = 0; i < t.count; i++) { 2983 struct location *l = &t.loc[i]; 2984 2985 if (n > PAGE_SIZE - 100) 2986 break; 2987 n += sprintf(buf + n, "%7ld ", l->count); 2988 2989 if (l->addr) 2990 n += sprint_symbol(buf + n, (unsigned long)l->addr); 2991 else 2992 n += sprintf(buf + n, "<not-available>"); 2993 2994 if (l->sum_time != l->min_time) { 2995 unsigned long remainder; 2996 2997 n += sprintf(buf + n, " age=%ld/%ld/%ld", 2998 l->min_time, 2999 div_long_long_rem(l->sum_time, l->count, &remainder), 3000 l->max_time); 3001 } else 3002 n += sprintf(buf + n, " age=%ld", 3003 l->min_time); 3004 3005 if (l->min_pid != l->max_pid) 3006 n += sprintf(buf + n, " pid=%ld-%ld", 3007 l->min_pid, l->max_pid); 3008 else 3009 n += sprintf(buf + n, " pid=%ld", 3010 l->min_pid); 3011 3012 if (num_online_cpus() > 1 && !cpus_empty(l->cpus)) { 3013 n += sprintf(buf + n, " cpus="); 3014 n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50, 3015 l->cpus); 3016 } 3017 3018 if (num_online_nodes() > 1 && !nodes_empty(l->nodes)) { 3019 n += sprintf(buf + n, " nodes="); 3020 n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50, 3021 l->nodes); 3022 } 3023 3024 n += sprintf(buf + n, "\n"); 3025 } 3026 3027 free_loc_track(&t); 3028 if (!t.count) 3029 n += sprintf(buf, "No data\n"); 3030 return n; 3031 } 3032 3033 static unsigned long count_partial(struct kmem_cache_node *n) 3034 { 3035 unsigned long flags; 3036 unsigned long x = 0; 3037 struct page *page; 3038 3039 spin_lock_irqsave(&n->list_lock, flags); 3040 list_for_each_entry(page, &n->partial, lru) 3041 x += page->inuse; 3042 spin_unlock_irqrestore(&n->list_lock, flags); 3043 return x; 3044 } 3045 3046 enum slab_stat_type { 3047 SL_FULL, 3048 SL_PARTIAL, 3049 SL_CPU, 3050 SL_OBJECTS 3051 }; 3052 3053 #define SO_FULL (1 << SL_FULL) 3054 #define SO_PARTIAL (1 << SL_PARTIAL) 3055 #define SO_CPU (1 << SL_CPU) 3056 #define SO_OBJECTS (1 << SL_OBJECTS) 3057 3058 static unsigned long slab_objects(struct kmem_cache *s, 3059 char *buf, unsigned long flags) 3060 { 3061 unsigned long total = 0; 3062 int cpu; 3063 int node; 3064 int x; 3065 unsigned long *nodes; 3066 unsigned long *per_cpu; 3067 3068 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 3069 per_cpu = nodes + nr_node_ids; 3070 3071 for_each_possible_cpu(cpu) { 3072 struct page *page = s->cpu_slab[cpu]; 3073 int node; 3074 3075 if (page) { 3076 node = page_to_nid(page); 3077 if (flags & SO_CPU) { 3078 int x = 0; 3079 3080 if (flags & SO_OBJECTS) 3081 x = page->inuse; 3082 else 3083 x = 1; 3084 total += x; 3085 nodes[node] += x; 3086 } 3087 per_cpu[node]++; 3088 } 3089 } 3090 3091 for_each_online_node(node) { 3092 struct kmem_cache_node *n = get_node(s, node); 3093 3094 if (flags & SO_PARTIAL) { 3095 if (flags & SO_OBJECTS) 3096 x = count_partial(n); 3097 else 3098 x = n->nr_partial; 3099 total += x; 3100 nodes[node] += x; 3101 } 3102 3103 if (flags & SO_FULL) { 3104 int full_slabs = atomic_read(&n->nr_slabs) 3105 - per_cpu[node] 3106 - n->nr_partial; 3107 3108 if (flags & SO_OBJECTS) 3109 x = full_slabs * s->objects; 3110 else 3111 x = full_slabs; 3112 total += x; 3113 nodes[node] += x; 3114 } 3115 } 3116 3117 x = sprintf(buf, "%lu", total); 3118 #ifdef CONFIG_NUMA 3119 for_each_online_node(node) 3120 if (nodes[node]) 3121 x += sprintf(buf + x, " N%d=%lu", 3122 node, nodes[node]); 3123 #endif 3124 kfree(nodes); 3125 return x + sprintf(buf + x, "\n"); 3126 } 3127 3128 static int any_slab_objects(struct kmem_cache *s) 3129 { 3130 int node; 3131 int cpu; 3132 3133 for_each_possible_cpu(cpu) 3134 if (s->cpu_slab[cpu]) 3135 return 1; 3136 3137 for_each_node(node) { 3138 struct kmem_cache_node *n = get_node(s, node); 3139 3140 if (n->nr_partial || atomic_read(&n->nr_slabs)) 3141 return 1; 3142 } 3143 return 0; 3144 } 3145 3146 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 3147 #define to_slab(n) container_of(n, struct kmem_cache, kobj); 3148 3149 struct slab_attribute { 3150 struct attribute attr; 3151 ssize_t (*show)(struct kmem_cache *s, char *buf); 3152 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 3153 }; 3154 3155 #define SLAB_ATTR_RO(_name) \ 3156 static struct slab_attribute _name##_attr = __ATTR_RO(_name) 3157 3158 #define SLAB_ATTR(_name) \ 3159 static struct slab_attribute _name##_attr = \ 3160 __ATTR(_name, 0644, _name##_show, _name##_store) 3161 3162 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 3163 { 3164 return sprintf(buf, "%d\n", s->size); 3165 } 3166 SLAB_ATTR_RO(slab_size); 3167 3168 static ssize_t align_show(struct kmem_cache *s, char *buf) 3169 { 3170 return sprintf(buf, "%d\n", s->align); 3171 } 3172 SLAB_ATTR_RO(align); 3173 3174 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 3175 { 3176 return sprintf(buf, "%d\n", s->objsize); 3177 } 3178 SLAB_ATTR_RO(object_size); 3179 3180 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 3181 { 3182 return sprintf(buf, "%d\n", s->objects); 3183 } 3184 SLAB_ATTR_RO(objs_per_slab); 3185 3186 static ssize_t order_show(struct kmem_cache *s, char *buf) 3187 { 3188 return sprintf(buf, "%d\n", s->order); 3189 } 3190 SLAB_ATTR_RO(order); 3191 3192 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3193 { 3194 if (s->ctor) { 3195 int n = sprint_symbol(buf, (unsigned long)s->ctor); 3196 3197 return n + sprintf(buf + n, "\n"); 3198 } 3199 return 0; 3200 } 3201 SLAB_ATTR_RO(ctor); 3202 3203 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3204 { 3205 return sprintf(buf, "%d\n", s->refcount - 1); 3206 } 3207 SLAB_ATTR_RO(aliases); 3208 3209 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 3210 { 3211 return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU); 3212 } 3213 SLAB_ATTR_RO(slabs); 3214 3215 static ssize_t partial_show(struct kmem_cache *s, char *buf) 3216 { 3217 return slab_objects(s, buf, SO_PARTIAL); 3218 } 3219 SLAB_ATTR_RO(partial); 3220 3221 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 3222 { 3223 return slab_objects(s, buf, SO_CPU); 3224 } 3225 SLAB_ATTR_RO(cpu_slabs); 3226 3227 static ssize_t objects_show(struct kmem_cache *s, char *buf) 3228 { 3229 return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS); 3230 } 3231 SLAB_ATTR_RO(objects); 3232 3233 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 3234 { 3235 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 3236 } 3237 3238 static ssize_t sanity_checks_store(struct kmem_cache *s, 3239 const char *buf, size_t length) 3240 { 3241 s->flags &= ~SLAB_DEBUG_FREE; 3242 if (buf[0] == '1') 3243 s->flags |= SLAB_DEBUG_FREE; 3244 return length; 3245 } 3246 SLAB_ATTR(sanity_checks); 3247 3248 static ssize_t trace_show(struct kmem_cache *s, char *buf) 3249 { 3250 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 3251 } 3252 3253 static ssize_t trace_store(struct kmem_cache *s, const char *buf, 3254 size_t length) 3255 { 3256 s->flags &= ~SLAB_TRACE; 3257 if (buf[0] == '1') 3258 s->flags |= SLAB_TRACE; 3259 return length; 3260 } 3261 SLAB_ATTR(trace); 3262 3263 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 3264 { 3265 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 3266 } 3267 3268 static ssize_t reclaim_account_store(struct kmem_cache *s, 3269 const char *buf, size_t length) 3270 { 3271 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 3272 if (buf[0] == '1') 3273 s->flags |= SLAB_RECLAIM_ACCOUNT; 3274 return length; 3275 } 3276 SLAB_ATTR(reclaim_account); 3277 3278 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 3279 { 3280 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 3281 } 3282 SLAB_ATTR_RO(hwcache_align); 3283 3284 #ifdef CONFIG_ZONE_DMA 3285 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 3286 { 3287 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 3288 } 3289 SLAB_ATTR_RO(cache_dma); 3290 #endif 3291 3292 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 3293 { 3294 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 3295 } 3296 SLAB_ATTR_RO(destroy_by_rcu); 3297 3298 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 3299 { 3300 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 3301 } 3302 3303 static ssize_t red_zone_store(struct kmem_cache *s, 3304 const char *buf, size_t length) 3305 { 3306 if (any_slab_objects(s)) 3307 return -EBUSY; 3308 3309 s->flags &= ~SLAB_RED_ZONE; 3310 if (buf[0] == '1') 3311 s->flags |= SLAB_RED_ZONE; 3312 calculate_sizes(s); 3313 return length; 3314 } 3315 SLAB_ATTR(red_zone); 3316 3317 static ssize_t poison_show(struct kmem_cache *s, char *buf) 3318 { 3319 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 3320 } 3321 3322 static ssize_t poison_store(struct kmem_cache *s, 3323 const char *buf, size_t length) 3324 { 3325 if (any_slab_objects(s)) 3326 return -EBUSY; 3327 3328 s->flags &= ~SLAB_POISON; 3329 if (buf[0] == '1') 3330 s->flags |= SLAB_POISON; 3331 calculate_sizes(s); 3332 return length; 3333 } 3334 SLAB_ATTR(poison); 3335 3336 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 3337 { 3338 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 3339 } 3340 3341 static ssize_t store_user_store(struct kmem_cache *s, 3342 const char *buf, size_t length) 3343 { 3344 if (any_slab_objects(s)) 3345 return -EBUSY; 3346 3347 s->flags &= ~SLAB_STORE_USER; 3348 if (buf[0] == '1') 3349 s->flags |= SLAB_STORE_USER; 3350 calculate_sizes(s); 3351 return length; 3352 } 3353 SLAB_ATTR(store_user); 3354 3355 static ssize_t validate_show(struct kmem_cache *s, char *buf) 3356 { 3357 return 0; 3358 } 3359 3360 static ssize_t validate_store(struct kmem_cache *s, 3361 const char *buf, size_t length) 3362 { 3363 if (buf[0] == '1') 3364 validate_slab_cache(s); 3365 else 3366 return -EINVAL; 3367 return length; 3368 } 3369 SLAB_ATTR(validate); 3370 3371 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 3372 { 3373 return 0; 3374 } 3375 3376 static ssize_t shrink_store(struct kmem_cache *s, 3377 const char *buf, size_t length) 3378 { 3379 if (buf[0] == '1') { 3380 int rc = kmem_cache_shrink(s); 3381 3382 if (rc) 3383 return rc; 3384 } else 3385 return -EINVAL; 3386 return length; 3387 } 3388 SLAB_ATTR(shrink); 3389 3390 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 3391 { 3392 if (!(s->flags & SLAB_STORE_USER)) 3393 return -ENOSYS; 3394 return list_locations(s, buf, TRACK_ALLOC); 3395 } 3396 SLAB_ATTR_RO(alloc_calls); 3397 3398 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 3399 { 3400 if (!(s->flags & SLAB_STORE_USER)) 3401 return -ENOSYS; 3402 return list_locations(s, buf, TRACK_FREE); 3403 } 3404 SLAB_ATTR_RO(free_calls); 3405 3406 #ifdef CONFIG_NUMA 3407 static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) 3408 { 3409 return sprintf(buf, "%d\n", s->defrag_ratio / 10); 3410 } 3411 3412 static ssize_t defrag_ratio_store(struct kmem_cache *s, 3413 const char *buf, size_t length) 3414 { 3415 int n = simple_strtoul(buf, NULL, 10); 3416 3417 if (n < 100) 3418 s->defrag_ratio = n * 10; 3419 return length; 3420 } 3421 SLAB_ATTR(defrag_ratio); 3422 #endif 3423 3424 static struct attribute * slab_attrs[] = { 3425 &slab_size_attr.attr, 3426 &object_size_attr.attr, 3427 &objs_per_slab_attr.attr, 3428 &order_attr.attr, 3429 &objects_attr.attr, 3430 &slabs_attr.attr, 3431 &partial_attr.attr, 3432 &cpu_slabs_attr.attr, 3433 &ctor_attr.attr, 3434 &aliases_attr.attr, 3435 &align_attr.attr, 3436 &sanity_checks_attr.attr, 3437 &trace_attr.attr, 3438 &hwcache_align_attr.attr, 3439 &reclaim_account_attr.attr, 3440 &destroy_by_rcu_attr.attr, 3441 &red_zone_attr.attr, 3442 &poison_attr.attr, 3443 &store_user_attr.attr, 3444 &validate_attr.attr, 3445 &shrink_attr.attr, 3446 &alloc_calls_attr.attr, 3447 &free_calls_attr.attr, 3448 #ifdef CONFIG_ZONE_DMA 3449 &cache_dma_attr.attr, 3450 #endif 3451 #ifdef CONFIG_NUMA 3452 &defrag_ratio_attr.attr, 3453 #endif 3454 NULL 3455 }; 3456 3457 static struct attribute_group slab_attr_group = { 3458 .attrs = slab_attrs, 3459 }; 3460 3461 static ssize_t slab_attr_show(struct kobject *kobj, 3462 struct attribute *attr, 3463 char *buf) 3464 { 3465 struct slab_attribute *attribute; 3466 struct kmem_cache *s; 3467 int err; 3468 3469 attribute = to_slab_attr(attr); 3470 s = to_slab(kobj); 3471 3472 if (!attribute->show) 3473 return -EIO; 3474 3475 err = attribute->show(s, buf); 3476 3477 return err; 3478 } 3479 3480 static ssize_t slab_attr_store(struct kobject *kobj, 3481 struct attribute *attr, 3482 const char *buf, size_t len) 3483 { 3484 struct slab_attribute *attribute; 3485 struct kmem_cache *s; 3486 int err; 3487 3488 attribute = to_slab_attr(attr); 3489 s = to_slab(kobj); 3490 3491 if (!attribute->store) 3492 return -EIO; 3493 3494 err = attribute->store(s, buf, len); 3495 3496 return err; 3497 } 3498 3499 static struct sysfs_ops slab_sysfs_ops = { 3500 .show = slab_attr_show, 3501 .store = slab_attr_store, 3502 }; 3503 3504 static struct kobj_type slab_ktype = { 3505 .sysfs_ops = &slab_sysfs_ops, 3506 }; 3507 3508 static int uevent_filter(struct kset *kset, struct kobject *kobj) 3509 { 3510 struct kobj_type *ktype = get_ktype(kobj); 3511 3512 if (ktype == &slab_ktype) 3513 return 1; 3514 return 0; 3515 } 3516 3517 static struct kset_uevent_ops slab_uevent_ops = { 3518 .filter = uevent_filter, 3519 }; 3520 3521 decl_subsys(slab, &slab_ktype, &slab_uevent_ops); 3522 3523 #define ID_STR_LENGTH 64 3524 3525 /* Create a unique string id for a slab cache: 3526 * format 3527 * :[flags-]size:[memory address of kmemcache] 3528 */ 3529 static char *create_unique_id(struct kmem_cache *s) 3530 { 3531 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 3532 char *p = name; 3533 3534 BUG_ON(!name); 3535 3536 *p++ = ':'; 3537 /* 3538 * First flags affecting slabcache operations. We will only 3539 * get here for aliasable slabs so we do not need to support 3540 * too many flags. The flags here must cover all flags that 3541 * are matched during merging to guarantee that the id is 3542 * unique. 3543 */ 3544 if (s->flags & SLAB_CACHE_DMA) 3545 *p++ = 'd'; 3546 if (s->flags & SLAB_RECLAIM_ACCOUNT) 3547 *p++ = 'a'; 3548 if (s->flags & SLAB_DEBUG_FREE) 3549 *p++ = 'F'; 3550 if (p != name + 1) 3551 *p++ = '-'; 3552 p += sprintf(p, "%07d", s->size); 3553 BUG_ON(p > name + ID_STR_LENGTH - 1); 3554 return name; 3555 } 3556 3557 static int sysfs_slab_add(struct kmem_cache *s) 3558 { 3559 int err; 3560 const char *name; 3561 int unmergeable; 3562 3563 if (slab_state < SYSFS) 3564 /* Defer until later */ 3565 return 0; 3566 3567 unmergeable = slab_unmergeable(s); 3568 if (unmergeable) { 3569 /* 3570 * Slabcache can never be merged so we can use the name proper. 3571 * This is typically the case for debug situations. In that 3572 * case we can catch duplicate names easily. 3573 */ 3574 sysfs_remove_link(&slab_subsys.kobj, s->name); 3575 name = s->name; 3576 } else { 3577 /* 3578 * Create a unique name for the slab as a target 3579 * for the symlinks. 3580 */ 3581 name = create_unique_id(s); 3582 } 3583 3584 kobj_set_kset_s(s, slab_subsys); 3585 kobject_set_name(&s->kobj, name); 3586 kobject_init(&s->kobj); 3587 err = kobject_add(&s->kobj); 3588 if (err) 3589 return err; 3590 3591 err = sysfs_create_group(&s->kobj, &slab_attr_group); 3592 if (err) 3593 return err; 3594 kobject_uevent(&s->kobj, KOBJ_ADD); 3595 if (!unmergeable) { 3596 /* Setup first alias */ 3597 sysfs_slab_alias(s, s->name); 3598 kfree(name); 3599 } 3600 return 0; 3601 } 3602 3603 static void sysfs_slab_remove(struct kmem_cache *s) 3604 { 3605 kobject_uevent(&s->kobj, KOBJ_REMOVE); 3606 kobject_del(&s->kobj); 3607 } 3608 3609 /* 3610 * Need to buffer aliases during bootup until sysfs becomes 3611 * available lest we loose that information. 3612 */ 3613 struct saved_alias { 3614 struct kmem_cache *s; 3615 const char *name; 3616 struct saved_alias *next; 3617 }; 3618 3619 struct saved_alias *alias_list; 3620 3621 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 3622 { 3623 struct saved_alias *al; 3624 3625 if (slab_state == SYSFS) { 3626 /* 3627 * If we have a leftover link then remove it. 3628 */ 3629 sysfs_remove_link(&slab_subsys.kobj, name); 3630 return sysfs_create_link(&slab_subsys.kobj, 3631 &s->kobj, name); 3632 } 3633 3634 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 3635 if (!al) 3636 return -ENOMEM; 3637 3638 al->s = s; 3639 al->name = name; 3640 al->next = alias_list; 3641 alias_list = al; 3642 return 0; 3643 } 3644 3645 static int __init slab_sysfs_init(void) 3646 { 3647 struct list_head *h; 3648 int err; 3649 3650 err = subsystem_register(&slab_subsys); 3651 if (err) { 3652 printk(KERN_ERR "Cannot register slab subsystem.\n"); 3653 return -ENOSYS; 3654 } 3655 3656 slab_state = SYSFS; 3657 3658 list_for_each(h, &slab_caches) { 3659 struct kmem_cache *s = 3660 container_of(h, struct kmem_cache, list); 3661 3662 err = sysfs_slab_add(s); 3663 BUG_ON(err); 3664 } 3665 3666 while (alias_list) { 3667 struct saved_alias *al = alias_list; 3668 3669 alias_list = alias_list->next; 3670 err = sysfs_slab_alias(al->s, al->name); 3671 BUG_ON(err); 3672 kfree(al); 3673 } 3674 3675 resiliency_test(); 3676 return 0; 3677 } 3678 3679 __initcall(slab_sysfs_init); 3680 #endif 3681