1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* struct reclaim_state */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/bitops.h> 19 #include <linux/slab.h> 20 #include "slab.h" 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/kasan.h> 24 #include <linux/cpu.h> 25 #include <linux/cpuset.h> 26 #include <linux/mempolicy.h> 27 #include <linux/ctype.h> 28 #include <linux/debugobjects.h> 29 #include <linux/kallsyms.h> 30 #include <linux/kfence.h> 31 #include <linux/memory.h> 32 #include <linux/math64.h> 33 #include <linux/fault-inject.h> 34 #include <linux/stacktrace.h> 35 #include <linux/prefetch.h> 36 #include <linux/memcontrol.h> 37 #include <linux/random.h> 38 39 #include <trace/events/kmem.h> 40 41 #include "internal.h" 42 43 /* 44 * Lock order: 45 * 1. slab_mutex (Global Mutex) 46 * 2. node->list_lock 47 * 3. slab_lock(page) (Only on some arches and for debugging) 48 * 49 * slab_mutex 50 * 51 * The role of the slab_mutex is to protect the list of all the slabs 52 * and to synchronize major metadata changes to slab cache structures. 53 * 54 * The slab_lock is only used for debugging and on arches that do not 55 * have the ability to do a cmpxchg_double. It only protects: 56 * A. page->freelist -> List of object free in a page 57 * B. page->inuse -> Number of objects in use 58 * C. page->objects -> Number of objects in page 59 * D. page->frozen -> frozen state 60 * 61 * If a slab is frozen then it is exempt from list management. It is not 62 * on any list except per cpu partial list. The processor that froze the 63 * slab is the one who can perform list operations on the page. Other 64 * processors may put objects onto the freelist but the processor that 65 * froze the slab is the only one that can retrieve the objects from the 66 * page's freelist. 67 * 68 * The list_lock protects the partial and full list on each node and 69 * the partial slab counter. If taken then no new slabs may be added or 70 * removed from the lists nor make the number of partial slabs be modified. 71 * (Note that the total number of slabs is an atomic value that may be 72 * modified without taking the list lock). 73 * 74 * The list_lock is a centralized lock and thus we avoid taking it as 75 * much as possible. As long as SLUB does not have to handle partial 76 * slabs, operations can continue without any centralized lock. F.e. 77 * allocating a long series of objects that fill up slabs does not require 78 * the list lock. 79 * Interrupts are disabled during allocation and deallocation in order to 80 * make the slab allocator safe to use in the context of an irq. In addition 81 * interrupts are disabled to ensure that the processor does not change 82 * while handling per_cpu slabs, due to kernel preemption. 83 * 84 * SLUB assigns one slab for allocation to each processor. 85 * Allocations only occur from these slabs called cpu slabs. 86 * 87 * Slabs with free elements are kept on a partial list and during regular 88 * operations no list for full slabs is used. If an object in a full slab is 89 * freed then the slab will show up again on the partial lists. 90 * We track full slabs for debugging purposes though because otherwise we 91 * cannot scan all objects. 92 * 93 * Slabs are freed when they become empty. Teardown and setup is 94 * minimal so we rely on the page allocators per cpu caches for 95 * fast frees and allocs. 96 * 97 * page->frozen The slab is frozen and exempt from list processing. 98 * This means that the slab is dedicated to a purpose 99 * such as satisfying allocations for a specific 100 * processor. Objects may be freed in the slab while 101 * it is frozen but slab_free will then skip the usual 102 * list operations. It is up to the processor holding 103 * the slab to integrate the slab into the slab lists 104 * when the slab is no longer needed. 105 * 106 * One use of this flag is to mark slabs that are 107 * used for allocations. Then such a slab becomes a cpu 108 * slab. The cpu slab may be equipped with an additional 109 * freelist that allows lockless access to 110 * free objects in addition to the regular freelist 111 * that requires the slab lock. 112 * 113 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 114 * options set. This moves slab handling out of 115 * the fast path and disables lockless freelists. 116 */ 117 118 #ifdef CONFIG_SLUB_DEBUG 119 #ifdef CONFIG_SLUB_DEBUG_ON 120 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 121 #else 122 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 123 #endif 124 #endif 125 126 static inline bool kmem_cache_debug(struct kmem_cache *s) 127 { 128 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 129 } 130 131 void *fixup_red_left(struct kmem_cache *s, void *p) 132 { 133 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 134 p += s->red_left_pad; 135 136 return p; 137 } 138 139 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 140 { 141 #ifdef CONFIG_SLUB_CPU_PARTIAL 142 return !kmem_cache_debug(s); 143 #else 144 return false; 145 #endif 146 } 147 148 /* 149 * Issues still to be resolved: 150 * 151 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 152 * 153 * - Variable sizing of the per node arrays 154 */ 155 156 /* Enable to test recovery from slab corruption on boot */ 157 #undef SLUB_RESILIENCY_TEST 158 159 /* Enable to log cmpxchg failures */ 160 #undef SLUB_DEBUG_CMPXCHG 161 162 /* 163 * Minimum number of partial slabs. These will be left on the partial 164 * lists even if they are empty. kmem_cache_shrink may reclaim them. 165 */ 166 #define MIN_PARTIAL 5 167 168 /* 169 * Maximum number of desirable partial slabs. 170 * The existence of more partial slabs makes kmem_cache_shrink 171 * sort the partial list by the number of objects in use. 172 */ 173 #define MAX_PARTIAL 10 174 175 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 176 SLAB_POISON | SLAB_STORE_USER) 177 178 /* 179 * These debug flags cannot use CMPXCHG because there might be consistency 180 * issues when checking or reading debug information 181 */ 182 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 183 SLAB_TRACE) 184 185 186 /* 187 * Debugging flags that require metadata to be stored in the slab. These get 188 * disabled when slub_debug=O is used and a cache's min order increases with 189 * metadata. 190 */ 191 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 192 193 #define OO_SHIFT 16 194 #define OO_MASK ((1 << OO_SHIFT) - 1) 195 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ 196 197 /* Internal SLUB flags */ 198 /* Poison object */ 199 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) 200 /* Use cmpxchg_double */ 201 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) 202 203 /* 204 * Tracking user of a slab. 205 */ 206 #define TRACK_ADDRS_COUNT 16 207 struct track { 208 unsigned long addr; /* Called from address */ 209 #ifdef CONFIG_STACKTRACE 210 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ 211 #endif 212 int cpu; /* Was running on cpu */ 213 int pid; /* Pid context */ 214 unsigned long when; /* When did the operation occur */ 215 }; 216 217 enum track_item { TRACK_ALLOC, TRACK_FREE }; 218 219 #ifdef CONFIG_SYSFS 220 static int sysfs_slab_add(struct kmem_cache *); 221 static int sysfs_slab_alias(struct kmem_cache *, const char *); 222 #else 223 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 224 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 225 { return 0; } 226 #endif 227 228 static inline void stat(const struct kmem_cache *s, enum stat_item si) 229 { 230 #ifdef CONFIG_SLUB_STATS 231 /* 232 * The rmw is racy on a preemptible kernel but this is acceptable, so 233 * avoid this_cpu_add()'s irq-disable overhead. 234 */ 235 raw_cpu_inc(s->cpu_slab->stat[si]); 236 #endif 237 } 238 239 /* 240 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 241 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 242 * differ during memory hotplug/hotremove operations. 243 * Protected by slab_mutex. 244 */ 245 static nodemask_t slab_nodes; 246 247 /******************************************************************** 248 * Core slab cache functions 249 *******************************************************************/ 250 251 /* 252 * Returns freelist pointer (ptr). With hardening, this is obfuscated 253 * with an XOR of the address where the pointer is held and a per-cache 254 * random number. 255 */ 256 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, 257 unsigned long ptr_addr) 258 { 259 #ifdef CONFIG_SLAB_FREELIST_HARDENED 260 /* 261 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged. 262 * Normally, this doesn't cause any issues, as both set_freepointer() 263 * and get_freepointer() are called with a pointer with the same tag. 264 * However, there are some issues with CONFIG_SLUB_DEBUG code. For 265 * example, when __free_slub() iterates over objects in a cache, it 266 * passes untagged pointers to check_object(). check_object() in turns 267 * calls get_freepointer() with an untagged pointer, which causes the 268 * freepointer to be restored incorrectly. 269 */ 270 return (void *)((unsigned long)ptr ^ s->random ^ 271 swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); 272 #else 273 return ptr; 274 #endif 275 } 276 277 /* Returns the freelist pointer recorded at location ptr_addr. */ 278 static inline void *freelist_dereference(const struct kmem_cache *s, 279 void *ptr_addr) 280 { 281 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), 282 (unsigned long)ptr_addr); 283 } 284 285 static inline void *get_freepointer(struct kmem_cache *s, void *object) 286 { 287 object = kasan_reset_tag(object); 288 return freelist_dereference(s, object + s->offset); 289 } 290 291 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 292 { 293 prefetch(object + s->offset); 294 } 295 296 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 297 { 298 unsigned long freepointer_addr; 299 void *p; 300 301 if (!debug_pagealloc_enabled_static()) 302 return get_freepointer(s, object); 303 304 freepointer_addr = (unsigned long)object + s->offset; 305 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p)); 306 return freelist_ptr(s, p, freepointer_addr); 307 } 308 309 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 310 { 311 unsigned long freeptr_addr = (unsigned long)object + s->offset; 312 313 #ifdef CONFIG_SLAB_FREELIST_HARDENED 314 BUG_ON(object == fp); /* naive detection of double free or corruption */ 315 #endif 316 317 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 318 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); 319 } 320 321 /* Loop over all objects in a slab */ 322 #define for_each_object(__p, __s, __addr, __objects) \ 323 for (__p = fixup_red_left(__s, __addr); \ 324 __p < (__addr) + (__objects) * (__s)->size; \ 325 __p += (__s)->size) 326 327 static inline unsigned int order_objects(unsigned int order, unsigned int size) 328 { 329 return ((unsigned int)PAGE_SIZE << order) / size; 330 } 331 332 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 333 unsigned int size) 334 { 335 struct kmem_cache_order_objects x = { 336 (order << OO_SHIFT) + order_objects(order, size) 337 }; 338 339 return x; 340 } 341 342 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 343 { 344 return x.x >> OO_SHIFT; 345 } 346 347 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 348 { 349 return x.x & OO_MASK; 350 } 351 352 /* 353 * Per slab locking using the pagelock 354 */ 355 static __always_inline void slab_lock(struct page *page) 356 { 357 VM_BUG_ON_PAGE(PageTail(page), page); 358 bit_spin_lock(PG_locked, &page->flags); 359 } 360 361 static __always_inline void slab_unlock(struct page *page) 362 { 363 VM_BUG_ON_PAGE(PageTail(page), page); 364 __bit_spin_unlock(PG_locked, &page->flags); 365 } 366 367 /* Interrupts must be disabled (for the fallback code to work right) */ 368 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 369 void *freelist_old, unsigned long counters_old, 370 void *freelist_new, unsigned long counters_new, 371 const char *n) 372 { 373 VM_BUG_ON(!irqs_disabled()); 374 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 375 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 376 if (s->flags & __CMPXCHG_DOUBLE) { 377 if (cmpxchg_double(&page->freelist, &page->counters, 378 freelist_old, counters_old, 379 freelist_new, counters_new)) 380 return true; 381 } else 382 #endif 383 { 384 slab_lock(page); 385 if (page->freelist == freelist_old && 386 page->counters == counters_old) { 387 page->freelist = freelist_new; 388 page->counters = counters_new; 389 slab_unlock(page); 390 return true; 391 } 392 slab_unlock(page); 393 } 394 395 cpu_relax(); 396 stat(s, CMPXCHG_DOUBLE_FAIL); 397 398 #ifdef SLUB_DEBUG_CMPXCHG 399 pr_info("%s %s: cmpxchg double redo ", n, s->name); 400 #endif 401 402 return false; 403 } 404 405 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 406 void *freelist_old, unsigned long counters_old, 407 void *freelist_new, unsigned long counters_new, 408 const char *n) 409 { 410 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 411 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 412 if (s->flags & __CMPXCHG_DOUBLE) { 413 if (cmpxchg_double(&page->freelist, &page->counters, 414 freelist_old, counters_old, 415 freelist_new, counters_new)) 416 return true; 417 } else 418 #endif 419 { 420 unsigned long flags; 421 422 local_irq_save(flags); 423 slab_lock(page); 424 if (page->freelist == freelist_old && 425 page->counters == counters_old) { 426 page->freelist = freelist_new; 427 page->counters = counters_new; 428 slab_unlock(page); 429 local_irq_restore(flags); 430 return true; 431 } 432 slab_unlock(page); 433 local_irq_restore(flags); 434 } 435 436 cpu_relax(); 437 stat(s, CMPXCHG_DOUBLE_FAIL); 438 439 #ifdef SLUB_DEBUG_CMPXCHG 440 pr_info("%s %s: cmpxchg double redo ", n, s->name); 441 #endif 442 443 return false; 444 } 445 446 #ifdef CONFIG_SLUB_DEBUG 447 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 448 static DEFINE_SPINLOCK(object_map_lock); 449 450 /* 451 * Determine a map of object in use on a page. 452 * 453 * Node listlock must be held to guarantee that the page does 454 * not vanish from under us. 455 */ 456 static unsigned long *get_map(struct kmem_cache *s, struct page *page) 457 __acquires(&object_map_lock) 458 { 459 void *p; 460 void *addr = page_address(page); 461 462 VM_BUG_ON(!irqs_disabled()); 463 464 spin_lock(&object_map_lock); 465 466 bitmap_zero(object_map, page->objects); 467 468 for (p = page->freelist; p; p = get_freepointer(s, p)) 469 set_bit(__obj_to_index(s, addr, p), object_map); 470 471 return object_map; 472 } 473 474 static void put_map(unsigned long *map) __releases(&object_map_lock) 475 { 476 VM_BUG_ON(map != object_map); 477 spin_unlock(&object_map_lock); 478 } 479 480 static inline unsigned int size_from_object(struct kmem_cache *s) 481 { 482 if (s->flags & SLAB_RED_ZONE) 483 return s->size - s->red_left_pad; 484 485 return s->size; 486 } 487 488 static inline void *restore_red_left(struct kmem_cache *s, void *p) 489 { 490 if (s->flags & SLAB_RED_ZONE) 491 p -= s->red_left_pad; 492 493 return p; 494 } 495 496 /* 497 * Debug settings: 498 */ 499 #if defined(CONFIG_SLUB_DEBUG_ON) 500 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 501 #else 502 static slab_flags_t slub_debug; 503 #endif 504 505 static char *slub_debug_string; 506 static int disable_higher_order_debug; 507 508 /* 509 * slub is about to manipulate internal object metadata. This memory lies 510 * outside the range of the allocated object, so accessing it would normally 511 * be reported by kasan as a bounds error. metadata_access_enable() is used 512 * to tell kasan that these accesses are OK. 513 */ 514 static inline void metadata_access_enable(void) 515 { 516 kasan_disable_current(); 517 } 518 519 static inline void metadata_access_disable(void) 520 { 521 kasan_enable_current(); 522 } 523 524 /* 525 * Object debugging 526 */ 527 528 /* Verify that a pointer has an address that is valid within a slab page */ 529 static inline int check_valid_pointer(struct kmem_cache *s, 530 struct page *page, void *object) 531 { 532 void *base; 533 534 if (!object) 535 return 1; 536 537 base = page_address(page); 538 object = kasan_reset_tag(object); 539 object = restore_red_left(s, object); 540 if (object < base || object >= base + page->objects * s->size || 541 (object - base) % s->size) { 542 return 0; 543 } 544 545 return 1; 546 } 547 548 static void print_section(char *level, char *text, u8 *addr, 549 unsigned int length) 550 { 551 metadata_access_enable(); 552 print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS, 553 16, 1, addr, length, 1); 554 metadata_access_disable(); 555 } 556 557 /* 558 * See comment in calculate_sizes(). 559 */ 560 static inline bool freeptr_outside_object(struct kmem_cache *s) 561 { 562 return s->offset >= s->inuse; 563 } 564 565 /* 566 * Return offset of the end of info block which is inuse + free pointer if 567 * not overlapping with object. 568 */ 569 static inline unsigned int get_info_end(struct kmem_cache *s) 570 { 571 if (freeptr_outside_object(s)) 572 return s->inuse + sizeof(void *); 573 else 574 return s->inuse; 575 } 576 577 static struct track *get_track(struct kmem_cache *s, void *object, 578 enum track_item alloc) 579 { 580 struct track *p; 581 582 p = object + get_info_end(s); 583 584 return kasan_reset_tag(p + alloc); 585 } 586 587 static void set_track(struct kmem_cache *s, void *object, 588 enum track_item alloc, unsigned long addr) 589 { 590 struct track *p = get_track(s, object, alloc); 591 592 if (addr) { 593 #ifdef CONFIG_STACKTRACE 594 unsigned int nr_entries; 595 596 metadata_access_enable(); 597 nr_entries = stack_trace_save(kasan_reset_tag(p->addrs), 598 TRACK_ADDRS_COUNT, 3); 599 metadata_access_disable(); 600 601 if (nr_entries < TRACK_ADDRS_COUNT) 602 p->addrs[nr_entries] = 0; 603 #endif 604 p->addr = addr; 605 p->cpu = smp_processor_id(); 606 p->pid = current->pid; 607 p->when = jiffies; 608 } else { 609 memset(p, 0, sizeof(struct track)); 610 } 611 } 612 613 static void init_tracking(struct kmem_cache *s, void *object) 614 { 615 if (!(s->flags & SLAB_STORE_USER)) 616 return; 617 618 set_track(s, object, TRACK_FREE, 0UL); 619 set_track(s, object, TRACK_ALLOC, 0UL); 620 } 621 622 static void print_track(const char *s, struct track *t, unsigned long pr_time) 623 { 624 if (!t->addr) 625 return; 626 627 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 628 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 629 #ifdef CONFIG_STACKTRACE 630 { 631 int i; 632 for (i = 0; i < TRACK_ADDRS_COUNT; i++) 633 if (t->addrs[i]) 634 pr_err("\t%pS\n", (void *)t->addrs[i]); 635 else 636 break; 637 } 638 #endif 639 } 640 641 void print_tracking(struct kmem_cache *s, void *object) 642 { 643 unsigned long pr_time = jiffies; 644 if (!(s->flags & SLAB_STORE_USER)) 645 return; 646 647 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 648 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 649 } 650 651 static void print_page_info(struct page *page) 652 { 653 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n", 654 page, page->objects, page->inuse, page->freelist, 655 page->flags, &page->flags); 656 657 } 658 659 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 660 { 661 struct va_format vaf; 662 va_list args; 663 664 va_start(args, fmt); 665 vaf.fmt = fmt; 666 vaf.va = &args; 667 pr_err("=============================================================================\n"); 668 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 669 pr_err("-----------------------------------------------------------------------------\n\n"); 670 671 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 672 va_end(args); 673 } 674 675 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 676 { 677 struct va_format vaf; 678 va_list args; 679 680 va_start(args, fmt); 681 vaf.fmt = fmt; 682 vaf.va = &args; 683 pr_err("FIX %s: %pV\n", s->name, &vaf); 684 va_end(args); 685 } 686 687 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, 688 void **freelist, void *nextfree) 689 { 690 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 691 !check_valid_pointer(s, page, nextfree) && freelist) { 692 object_err(s, page, *freelist, "Freechain corrupt"); 693 *freelist = NULL; 694 slab_fix(s, "Isolate corrupted freechain"); 695 return true; 696 } 697 698 return false; 699 } 700 701 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 702 { 703 unsigned int off; /* Offset of last byte */ 704 u8 *addr = page_address(page); 705 706 print_tracking(s, p); 707 708 print_page_info(page); 709 710 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 711 p, p - addr, get_freepointer(s, p)); 712 713 if (s->flags & SLAB_RED_ZONE) 714 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 715 s->red_left_pad); 716 else if (p > addr + 16) 717 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 718 719 print_section(KERN_ERR, "Object ", p, 720 min_t(unsigned int, s->object_size, PAGE_SIZE)); 721 if (s->flags & SLAB_RED_ZONE) 722 print_section(KERN_ERR, "Redzone ", p + s->object_size, 723 s->inuse - s->object_size); 724 725 off = get_info_end(s); 726 727 if (s->flags & SLAB_STORE_USER) 728 off += 2 * sizeof(struct track); 729 730 off += kasan_metadata_size(s); 731 732 if (off != size_from_object(s)) 733 /* Beginning of the filler is the free pointer */ 734 print_section(KERN_ERR, "Padding ", p + off, 735 size_from_object(s) - off); 736 737 dump_stack(); 738 } 739 740 void object_err(struct kmem_cache *s, struct page *page, 741 u8 *object, char *reason) 742 { 743 slab_bug(s, "%s", reason); 744 print_trailer(s, page, object); 745 } 746 747 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, 748 const char *fmt, ...) 749 { 750 va_list args; 751 char buf[100]; 752 753 va_start(args, fmt); 754 vsnprintf(buf, sizeof(buf), fmt, args); 755 va_end(args); 756 slab_bug(s, "%s", buf); 757 print_page_info(page); 758 dump_stack(); 759 } 760 761 static void init_object(struct kmem_cache *s, void *object, u8 val) 762 { 763 u8 *p = kasan_reset_tag(object); 764 765 if (s->flags & SLAB_RED_ZONE) 766 memset(p - s->red_left_pad, val, s->red_left_pad); 767 768 if (s->flags & __OBJECT_POISON) { 769 memset(p, POISON_FREE, s->object_size - 1); 770 p[s->object_size - 1] = POISON_END; 771 } 772 773 if (s->flags & SLAB_RED_ZONE) 774 memset(p + s->object_size, val, s->inuse - s->object_size); 775 } 776 777 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 778 void *from, void *to) 779 { 780 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 781 memset(from, data, to - from); 782 } 783 784 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 785 u8 *object, char *what, 786 u8 *start, unsigned int value, unsigned int bytes) 787 { 788 u8 *fault; 789 u8 *end; 790 u8 *addr = page_address(page); 791 792 metadata_access_enable(); 793 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 794 metadata_access_disable(); 795 if (!fault) 796 return 1; 797 798 end = start + bytes; 799 while (end > fault && end[-1] == value) 800 end--; 801 802 slab_bug(s, "%s overwritten", what); 803 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 804 fault, end - 1, fault - addr, 805 fault[0], value); 806 print_trailer(s, page, object); 807 808 restore_bytes(s, what, value, fault, end); 809 return 0; 810 } 811 812 /* 813 * Object layout: 814 * 815 * object address 816 * Bytes of the object to be managed. 817 * If the freepointer may overlay the object then the free 818 * pointer is at the middle of the object. 819 * 820 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 821 * 0xa5 (POISON_END) 822 * 823 * object + s->object_size 824 * Padding to reach word boundary. This is also used for Redzoning. 825 * Padding is extended by another word if Redzoning is enabled and 826 * object_size == inuse. 827 * 828 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 829 * 0xcc (RED_ACTIVE) for objects in use. 830 * 831 * object + s->inuse 832 * Meta data starts here. 833 * 834 * A. Free pointer (if we cannot overwrite object on free) 835 * B. Tracking data for SLAB_STORE_USER 836 * C. Padding to reach required alignment boundary or at minimum 837 * one word if debugging is on to be able to detect writes 838 * before the word boundary. 839 * 840 * Padding is done using 0x5a (POISON_INUSE) 841 * 842 * object + s->size 843 * Nothing is used beyond s->size. 844 * 845 * If slabcaches are merged then the object_size and inuse boundaries are mostly 846 * ignored. And therefore no slab options that rely on these boundaries 847 * may be used with merged slabcaches. 848 */ 849 850 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 851 { 852 unsigned long off = get_info_end(s); /* The end of info */ 853 854 if (s->flags & SLAB_STORE_USER) 855 /* We also have user information there */ 856 off += 2 * sizeof(struct track); 857 858 off += kasan_metadata_size(s); 859 860 if (size_from_object(s) == off) 861 return 1; 862 863 return check_bytes_and_report(s, page, p, "Object padding", 864 p + off, POISON_INUSE, size_from_object(s) - off); 865 } 866 867 /* Check the pad bytes at the end of a slab page */ 868 static int slab_pad_check(struct kmem_cache *s, struct page *page) 869 { 870 u8 *start; 871 u8 *fault; 872 u8 *end; 873 u8 *pad; 874 int length; 875 int remainder; 876 877 if (!(s->flags & SLAB_POISON)) 878 return 1; 879 880 start = page_address(page); 881 length = page_size(page); 882 end = start + length; 883 remainder = length % s->size; 884 if (!remainder) 885 return 1; 886 887 pad = end - remainder; 888 metadata_access_enable(); 889 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 890 metadata_access_disable(); 891 if (!fault) 892 return 1; 893 while (end > fault && end[-1] == POISON_INUSE) 894 end--; 895 896 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu", 897 fault, end - 1, fault - start); 898 print_section(KERN_ERR, "Padding ", pad, remainder); 899 900 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 901 return 0; 902 } 903 904 static int check_object(struct kmem_cache *s, struct page *page, 905 void *object, u8 val) 906 { 907 u8 *p = object; 908 u8 *endobject = object + s->object_size; 909 910 if (s->flags & SLAB_RED_ZONE) { 911 if (!check_bytes_and_report(s, page, object, "Redzone", 912 object - s->red_left_pad, val, s->red_left_pad)) 913 return 0; 914 915 if (!check_bytes_and_report(s, page, object, "Redzone", 916 endobject, val, s->inuse - s->object_size)) 917 return 0; 918 } else { 919 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 920 check_bytes_and_report(s, page, p, "Alignment padding", 921 endobject, POISON_INUSE, 922 s->inuse - s->object_size); 923 } 924 } 925 926 if (s->flags & SLAB_POISON) { 927 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 928 (!check_bytes_and_report(s, page, p, "Poison", p, 929 POISON_FREE, s->object_size - 1) || 930 !check_bytes_and_report(s, page, p, "Poison", 931 p + s->object_size - 1, POISON_END, 1))) 932 return 0; 933 /* 934 * check_pad_bytes cleans up on its own. 935 */ 936 check_pad_bytes(s, page, p); 937 } 938 939 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 940 /* 941 * Object and freepointer overlap. Cannot check 942 * freepointer while object is allocated. 943 */ 944 return 1; 945 946 /* Check free pointer validity */ 947 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 948 object_err(s, page, p, "Freepointer corrupt"); 949 /* 950 * No choice but to zap it and thus lose the remainder 951 * of the free objects in this slab. May cause 952 * another error because the object count is now wrong. 953 */ 954 set_freepointer(s, p, NULL); 955 return 0; 956 } 957 return 1; 958 } 959 960 static int check_slab(struct kmem_cache *s, struct page *page) 961 { 962 int maxobj; 963 964 VM_BUG_ON(!irqs_disabled()); 965 966 if (!PageSlab(page)) { 967 slab_err(s, page, "Not a valid slab page"); 968 return 0; 969 } 970 971 maxobj = order_objects(compound_order(page), s->size); 972 if (page->objects > maxobj) { 973 slab_err(s, page, "objects %u > max %u", 974 page->objects, maxobj); 975 return 0; 976 } 977 if (page->inuse > page->objects) { 978 slab_err(s, page, "inuse %u > max %u", 979 page->inuse, page->objects); 980 return 0; 981 } 982 /* Slab_pad_check fixes things up after itself */ 983 slab_pad_check(s, page); 984 return 1; 985 } 986 987 /* 988 * Determine if a certain object on a page is on the freelist. Must hold the 989 * slab lock to guarantee that the chains are in a consistent state. 990 */ 991 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 992 { 993 int nr = 0; 994 void *fp; 995 void *object = NULL; 996 int max_objects; 997 998 fp = page->freelist; 999 while (fp && nr <= page->objects) { 1000 if (fp == search) 1001 return 1; 1002 if (!check_valid_pointer(s, page, fp)) { 1003 if (object) { 1004 object_err(s, page, object, 1005 "Freechain corrupt"); 1006 set_freepointer(s, object, NULL); 1007 } else { 1008 slab_err(s, page, "Freepointer corrupt"); 1009 page->freelist = NULL; 1010 page->inuse = page->objects; 1011 slab_fix(s, "Freelist cleared"); 1012 return 0; 1013 } 1014 break; 1015 } 1016 object = fp; 1017 fp = get_freepointer(s, object); 1018 nr++; 1019 } 1020 1021 max_objects = order_objects(compound_order(page), s->size); 1022 if (max_objects > MAX_OBJS_PER_PAGE) 1023 max_objects = MAX_OBJS_PER_PAGE; 1024 1025 if (page->objects != max_objects) { 1026 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", 1027 page->objects, max_objects); 1028 page->objects = max_objects; 1029 slab_fix(s, "Number of objects adjusted."); 1030 } 1031 if (page->inuse != page->objects - nr) { 1032 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", 1033 page->inuse, page->objects - nr); 1034 page->inuse = page->objects - nr; 1035 slab_fix(s, "Object count adjusted."); 1036 } 1037 return search == NULL; 1038 } 1039 1040 static void trace(struct kmem_cache *s, struct page *page, void *object, 1041 int alloc) 1042 { 1043 if (s->flags & SLAB_TRACE) { 1044 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1045 s->name, 1046 alloc ? "alloc" : "free", 1047 object, page->inuse, 1048 page->freelist); 1049 1050 if (!alloc) 1051 print_section(KERN_INFO, "Object ", (void *)object, 1052 s->object_size); 1053 1054 dump_stack(); 1055 } 1056 } 1057 1058 /* 1059 * Tracking of fully allocated slabs for debugging purposes. 1060 */ 1061 static void add_full(struct kmem_cache *s, 1062 struct kmem_cache_node *n, struct page *page) 1063 { 1064 if (!(s->flags & SLAB_STORE_USER)) 1065 return; 1066 1067 lockdep_assert_held(&n->list_lock); 1068 list_add(&page->slab_list, &n->full); 1069 } 1070 1071 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) 1072 { 1073 if (!(s->flags & SLAB_STORE_USER)) 1074 return; 1075 1076 lockdep_assert_held(&n->list_lock); 1077 list_del(&page->slab_list); 1078 } 1079 1080 /* Tracking of the number of slabs for debugging purposes */ 1081 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1082 { 1083 struct kmem_cache_node *n = get_node(s, node); 1084 1085 return atomic_long_read(&n->nr_slabs); 1086 } 1087 1088 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1089 { 1090 return atomic_long_read(&n->nr_slabs); 1091 } 1092 1093 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1094 { 1095 struct kmem_cache_node *n = get_node(s, node); 1096 1097 /* 1098 * May be called early in order to allocate a slab for the 1099 * kmem_cache_node structure. Solve the chicken-egg 1100 * dilemma by deferring the increment of the count during 1101 * bootstrap (see early_kmem_cache_node_alloc). 1102 */ 1103 if (likely(n)) { 1104 atomic_long_inc(&n->nr_slabs); 1105 atomic_long_add(objects, &n->total_objects); 1106 } 1107 } 1108 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1109 { 1110 struct kmem_cache_node *n = get_node(s, node); 1111 1112 atomic_long_dec(&n->nr_slabs); 1113 atomic_long_sub(objects, &n->total_objects); 1114 } 1115 1116 /* Object debug checks for alloc/free paths */ 1117 static void setup_object_debug(struct kmem_cache *s, struct page *page, 1118 void *object) 1119 { 1120 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1121 return; 1122 1123 init_object(s, object, SLUB_RED_INACTIVE); 1124 init_tracking(s, object); 1125 } 1126 1127 static 1128 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) 1129 { 1130 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1131 return; 1132 1133 metadata_access_enable(); 1134 memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page)); 1135 metadata_access_disable(); 1136 } 1137 1138 static inline int alloc_consistency_checks(struct kmem_cache *s, 1139 struct page *page, void *object) 1140 { 1141 if (!check_slab(s, page)) 1142 return 0; 1143 1144 if (!check_valid_pointer(s, page, object)) { 1145 object_err(s, page, object, "Freelist Pointer check fails"); 1146 return 0; 1147 } 1148 1149 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) 1150 return 0; 1151 1152 return 1; 1153 } 1154 1155 static noinline int alloc_debug_processing(struct kmem_cache *s, 1156 struct page *page, 1157 void *object, unsigned long addr) 1158 { 1159 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1160 if (!alloc_consistency_checks(s, page, object)) 1161 goto bad; 1162 } 1163 1164 /* Success perform special debug activities for allocs */ 1165 if (s->flags & SLAB_STORE_USER) 1166 set_track(s, object, TRACK_ALLOC, addr); 1167 trace(s, page, object, 1); 1168 init_object(s, object, SLUB_RED_ACTIVE); 1169 return 1; 1170 1171 bad: 1172 if (PageSlab(page)) { 1173 /* 1174 * If this is a slab page then lets do the best we can 1175 * to avoid issues in the future. Marking all objects 1176 * as used avoids touching the remaining objects. 1177 */ 1178 slab_fix(s, "Marking all objects used"); 1179 page->inuse = page->objects; 1180 page->freelist = NULL; 1181 } 1182 return 0; 1183 } 1184 1185 static inline int free_consistency_checks(struct kmem_cache *s, 1186 struct page *page, void *object, unsigned long addr) 1187 { 1188 if (!check_valid_pointer(s, page, object)) { 1189 slab_err(s, page, "Invalid object pointer 0x%p", object); 1190 return 0; 1191 } 1192 1193 if (on_freelist(s, page, object)) { 1194 object_err(s, page, object, "Object already free"); 1195 return 0; 1196 } 1197 1198 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 1199 return 0; 1200 1201 if (unlikely(s != page->slab_cache)) { 1202 if (!PageSlab(page)) { 1203 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", 1204 object); 1205 } else if (!page->slab_cache) { 1206 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1207 object); 1208 dump_stack(); 1209 } else 1210 object_err(s, page, object, 1211 "page slab pointer corrupt."); 1212 return 0; 1213 } 1214 return 1; 1215 } 1216 1217 /* Supports checking bulk free of a constructed freelist */ 1218 static noinline int free_debug_processing( 1219 struct kmem_cache *s, struct page *page, 1220 void *head, void *tail, int bulk_cnt, 1221 unsigned long addr) 1222 { 1223 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1224 void *object = head; 1225 int cnt = 0; 1226 unsigned long flags; 1227 int ret = 0; 1228 1229 spin_lock_irqsave(&n->list_lock, flags); 1230 slab_lock(page); 1231 1232 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1233 if (!check_slab(s, page)) 1234 goto out; 1235 } 1236 1237 next_object: 1238 cnt++; 1239 1240 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1241 if (!free_consistency_checks(s, page, object, addr)) 1242 goto out; 1243 } 1244 1245 if (s->flags & SLAB_STORE_USER) 1246 set_track(s, object, TRACK_FREE, addr); 1247 trace(s, page, object, 0); 1248 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 1249 init_object(s, object, SLUB_RED_INACTIVE); 1250 1251 /* Reached end of constructed freelist yet? */ 1252 if (object != tail) { 1253 object = get_freepointer(s, object); 1254 goto next_object; 1255 } 1256 ret = 1; 1257 1258 out: 1259 if (cnt != bulk_cnt) 1260 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", 1261 bulk_cnt, cnt); 1262 1263 slab_unlock(page); 1264 spin_unlock_irqrestore(&n->list_lock, flags); 1265 if (!ret) 1266 slab_fix(s, "Object at 0x%p not freed", object); 1267 return ret; 1268 } 1269 1270 /* 1271 * Parse a block of slub_debug options. Blocks are delimited by ';' 1272 * 1273 * @str: start of block 1274 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1275 * @slabs: return start of list of slabs, or NULL when there's no list 1276 * @init: assume this is initial parsing and not per-kmem-create parsing 1277 * 1278 * returns the start of next block if there's any, or NULL 1279 */ 1280 static char * 1281 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1282 { 1283 bool higher_order_disable = false; 1284 1285 /* Skip any completely empty blocks */ 1286 while (*str && *str == ';') 1287 str++; 1288 1289 if (*str == ',') { 1290 /* 1291 * No options but restriction on slabs. This means full 1292 * debugging for slabs matching a pattern. 1293 */ 1294 *flags = DEBUG_DEFAULT_FLAGS; 1295 goto check_slabs; 1296 } 1297 *flags = 0; 1298 1299 /* Determine which debug features should be switched on */ 1300 for (; *str && *str != ',' && *str != ';'; str++) { 1301 switch (tolower(*str)) { 1302 case '-': 1303 *flags = 0; 1304 break; 1305 case 'f': 1306 *flags |= SLAB_CONSISTENCY_CHECKS; 1307 break; 1308 case 'z': 1309 *flags |= SLAB_RED_ZONE; 1310 break; 1311 case 'p': 1312 *flags |= SLAB_POISON; 1313 break; 1314 case 'u': 1315 *flags |= SLAB_STORE_USER; 1316 break; 1317 case 't': 1318 *flags |= SLAB_TRACE; 1319 break; 1320 case 'a': 1321 *flags |= SLAB_FAILSLAB; 1322 break; 1323 case 'o': 1324 /* 1325 * Avoid enabling debugging on caches if its minimum 1326 * order would increase as a result. 1327 */ 1328 higher_order_disable = true; 1329 break; 1330 default: 1331 if (init) 1332 pr_err("slub_debug option '%c' unknown. skipped\n", *str); 1333 } 1334 } 1335 check_slabs: 1336 if (*str == ',') 1337 *slabs = ++str; 1338 else 1339 *slabs = NULL; 1340 1341 /* Skip over the slab list */ 1342 while (*str && *str != ';') 1343 str++; 1344 1345 /* Skip any completely empty blocks */ 1346 while (*str && *str == ';') 1347 str++; 1348 1349 if (init && higher_order_disable) 1350 disable_higher_order_debug = 1; 1351 1352 if (*str) 1353 return str; 1354 else 1355 return NULL; 1356 } 1357 1358 static int __init setup_slub_debug(char *str) 1359 { 1360 slab_flags_t flags; 1361 char *saved_str; 1362 char *slab_list; 1363 bool global_slub_debug_changed = false; 1364 bool slab_list_specified = false; 1365 1366 slub_debug = DEBUG_DEFAULT_FLAGS; 1367 if (*str++ != '=' || !*str) 1368 /* 1369 * No options specified. Switch on full debugging. 1370 */ 1371 goto out; 1372 1373 saved_str = str; 1374 while (str) { 1375 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1376 1377 if (!slab_list) { 1378 slub_debug = flags; 1379 global_slub_debug_changed = true; 1380 } else { 1381 slab_list_specified = true; 1382 } 1383 } 1384 1385 /* 1386 * For backwards compatibility, a single list of flags with list of 1387 * slabs means debugging is only enabled for those slabs, so the global 1388 * slub_debug should be 0. We can extended that to multiple lists as 1389 * long as there is no option specifying flags without a slab list. 1390 */ 1391 if (slab_list_specified) { 1392 if (!global_slub_debug_changed) 1393 slub_debug = 0; 1394 slub_debug_string = saved_str; 1395 } 1396 out: 1397 if (slub_debug != 0 || slub_debug_string) 1398 static_branch_enable(&slub_debug_enabled); 1399 if ((static_branch_unlikely(&init_on_alloc) || 1400 static_branch_unlikely(&init_on_free)) && 1401 (slub_debug & SLAB_POISON)) 1402 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1403 return 1; 1404 } 1405 1406 __setup("slub_debug", setup_slub_debug); 1407 1408 /* 1409 * kmem_cache_flags - apply debugging options to the cache 1410 * @object_size: the size of an object without meta data 1411 * @flags: flags to set 1412 * @name: name of the cache 1413 * 1414 * Debug option(s) are applied to @flags. In addition to the debug 1415 * option(s), if a slab name (or multiple) is specified i.e. 1416 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1417 * then only the select slabs will receive the debug option(s). 1418 */ 1419 slab_flags_t kmem_cache_flags(unsigned int object_size, 1420 slab_flags_t flags, const char *name) 1421 { 1422 char *iter; 1423 size_t len; 1424 char *next_block; 1425 slab_flags_t block_flags; 1426 slab_flags_t slub_debug_local = slub_debug; 1427 1428 /* 1429 * If the slab cache is for debugging (e.g. kmemleak) then 1430 * don't store user (stack trace) information by default, 1431 * but let the user enable it via the command line below. 1432 */ 1433 if (flags & SLAB_NOLEAKTRACE) 1434 slub_debug_local &= ~SLAB_STORE_USER; 1435 1436 len = strlen(name); 1437 next_block = slub_debug_string; 1438 /* Go through all blocks of debug options, see if any matches our slab's name */ 1439 while (next_block) { 1440 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1441 if (!iter) 1442 continue; 1443 /* Found a block that has a slab list, search it */ 1444 while (*iter) { 1445 char *end, *glob; 1446 size_t cmplen; 1447 1448 end = strchrnul(iter, ','); 1449 if (next_block && next_block < end) 1450 end = next_block - 1; 1451 1452 glob = strnchr(iter, end - iter, '*'); 1453 if (glob) 1454 cmplen = glob - iter; 1455 else 1456 cmplen = max_t(size_t, len, (end - iter)); 1457 1458 if (!strncmp(name, iter, cmplen)) { 1459 flags |= block_flags; 1460 return flags; 1461 } 1462 1463 if (!*end || *end == ';') 1464 break; 1465 iter = end + 1; 1466 } 1467 } 1468 1469 return flags | slub_debug_local; 1470 } 1471 #else /* !CONFIG_SLUB_DEBUG */ 1472 static inline void setup_object_debug(struct kmem_cache *s, 1473 struct page *page, void *object) {} 1474 static inline 1475 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} 1476 1477 static inline int alloc_debug_processing(struct kmem_cache *s, 1478 struct page *page, void *object, unsigned long addr) { return 0; } 1479 1480 static inline int free_debug_processing( 1481 struct kmem_cache *s, struct page *page, 1482 void *head, void *tail, int bulk_cnt, 1483 unsigned long addr) { return 0; } 1484 1485 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1486 { return 1; } 1487 static inline int check_object(struct kmem_cache *s, struct page *page, 1488 void *object, u8 val) { return 1; } 1489 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1490 struct page *page) {} 1491 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1492 struct page *page) {} 1493 slab_flags_t kmem_cache_flags(unsigned int object_size, 1494 slab_flags_t flags, const char *name) 1495 { 1496 return flags; 1497 } 1498 #define slub_debug 0 1499 1500 #define disable_higher_order_debug 0 1501 1502 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1503 { return 0; } 1504 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1505 { return 0; } 1506 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1507 int objects) {} 1508 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1509 int objects) {} 1510 1511 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, 1512 void **freelist, void *nextfree) 1513 { 1514 return false; 1515 } 1516 #endif /* CONFIG_SLUB_DEBUG */ 1517 1518 /* 1519 * Hooks for other subsystems that check memory allocations. In a typical 1520 * production configuration these hooks all should produce no code at all. 1521 */ 1522 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1523 { 1524 ptr = kasan_kmalloc_large(ptr, size, flags); 1525 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 1526 kmemleak_alloc(ptr, size, 1, flags); 1527 return ptr; 1528 } 1529 1530 static __always_inline void kfree_hook(void *x) 1531 { 1532 kmemleak_free(x); 1533 kasan_kfree_large(x); 1534 } 1535 1536 static __always_inline bool slab_free_hook(struct kmem_cache *s, 1537 void *x, bool init) 1538 { 1539 kmemleak_free_recursive(x, s->flags); 1540 1541 /* 1542 * Trouble is that we may no longer disable interrupts in the fast path 1543 * So in order to make the debug calls that expect irqs to be 1544 * disabled we need to disable interrupts temporarily. 1545 */ 1546 #ifdef CONFIG_LOCKDEP 1547 { 1548 unsigned long flags; 1549 1550 local_irq_save(flags); 1551 debug_check_no_locks_freed(x, s->object_size); 1552 local_irq_restore(flags); 1553 } 1554 #endif 1555 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1556 debug_check_no_obj_freed(x, s->object_size); 1557 1558 /* Use KCSAN to help debug racy use-after-free. */ 1559 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 1560 __kcsan_check_access(x, s->object_size, 1561 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 1562 1563 /* 1564 * As memory initialization might be integrated into KASAN, 1565 * kasan_slab_free and initialization memset's must be 1566 * kept together to avoid discrepancies in behavior. 1567 * 1568 * The initialization memset's clear the object and the metadata, 1569 * but don't touch the SLAB redzone. 1570 */ 1571 if (init) { 1572 int rsize; 1573 1574 if (!kasan_has_integrated_init()) 1575 memset(kasan_reset_tag(x), 0, s->object_size); 1576 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 1577 memset((char *)kasan_reset_tag(x) + s->inuse, 0, 1578 s->size - s->inuse - rsize); 1579 } 1580 /* KASAN might put x into memory quarantine, delaying its reuse. */ 1581 return kasan_slab_free(s, x, init); 1582 } 1583 1584 static inline bool slab_free_freelist_hook(struct kmem_cache *s, 1585 void **head, void **tail) 1586 { 1587 1588 void *object; 1589 void *next = *head; 1590 void *old_tail = *tail ? *tail : *head; 1591 1592 if (is_kfence_address(next)) { 1593 slab_free_hook(s, next, false); 1594 return true; 1595 } 1596 1597 /* Head and tail of the reconstructed freelist */ 1598 *head = NULL; 1599 *tail = NULL; 1600 1601 do { 1602 object = next; 1603 next = get_freepointer(s, object); 1604 1605 /* If object's reuse doesn't have to be delayed */ 1606 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { 1607 /* Move object to the new freelist */ 1608 set_freepointer(s, object, *head); 1609 *head = object; 1610 if (!*tail) 1611 *tail = object; 1612 } 1613 } while (object != old_tail); 1614 1615 if (*head == *tail) 1616 *tail = NULL; 1617 1618 return *head != NULL; 1619 } 1620 1621 static void *setup_object(struct kmem_cache *s, struct page *page, 1622 void *object) 1623 { 1624 setup_object_debug(s, page, object); 1625 object = kasan_init_slab_obj(s, object); 1626 if (unlikely(s->ctor)) { 1627 kasan_unpoison_object_data(s, object); 1628 s->ctor(object); 1629 kasan_poison_object_data(s, object); 1630 } 1631 return object; 1632 } 1633 1634 /* 1635 * Slab allocation and freeing 1636 */ 1637 static inline struct page *alloc_slab_page(struct kmem_cache *s, 1638 gfp_t flags, int node, struct kmem_cache_order_objects oo) 1639 { 1640 struct page *page; 1641 unsigned int order = oo_order(oo); 1642 1643 if (node == NUMA_NO_NODE) 1644 page = alloc_pages(flags, order); 1645 else 1646 page = __alloc_pages_node(node, flags, order); 1647 1648 return page; 1649 } 1650 1651 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1652 /* Pre-initialize the random sequence cache */ 1653 static int init_cache_random_seq(struct kmem_cache *s) 1654 { 1655 unsigned int count = oo_objects(s->oo); 1656 int err; 1657 1658 /* Bailout if already initialised */ 1659 if (s->random_seq) 1660 return 0; 1661 1662 err = cache_random_seq_create(s, count, GFP_KERNEL); 1663 if (err) { 1664 pr_err("SLUB: Unable to initialize free list for %s\n", 1665 s->name); 1666 return err; 1667 } 1668 1669 /* Transform to an offset on the set of pages */ 1670 if (s->random_seq) { 1671 unsigned int i; 1672 1673 for (i = 0; i < count; i++) 1674 s->random_seq[i] *= s->size; 1675 } 1676 return 0; 1677 } 1678 1679 /* Initialize each random sequence freelist per cache */ 1680 static void __init init_freelist_randomization(void) 1681 { 1682 struct kmem_cache *s; 1683 1684 mutex_lock(&slab_mutex); 1685 1686 list_for_each_entry(s, &slab_caches, list) 1687 init_cache_random_seq(s); 1688 1689 mutex_unlock(&slab_mutex); 1690 } 1691 1692 /* Get the next entry on the pre-computed freelist randomized */ 1693 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, 1694 unsigned long *pos, void *start, 1695 unsigned long page_limit, 1696 unsigned long freelist_count) 1697 { 1698 unsigned int idx; 1699 1700 /* 1701 * If the target page allocation failed, the number of objects on the 1702 * page might be smaller than the usual size defined by the cache. 1703 */ 1704 do { 1705 idx = s->random_seq[*pos]; 1706 *pos += 1; 1707 if (*pos >= freelist_count) 1708 *pos = 0; 1709 } while (unlikely(idx >= page_limit)); 1710 1711 return (char *)start + idx; 1712 } 1713 1714 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 1715 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1716 { 1717 void *start; 1718 void *cur; 1719 void *next; 1720 unsigned long idx, pos, page_limit, freelist_count; 1721 1722 if (page->objects < 2 || !s->random_seq) 1723 return false; 1724 1725 freelist_count = oo_objects(s->oo); 1726 pos = get_random_int() % freelist_count; 1727 1728 page_limit = page->objects * s->size; 1729 start = fixup_red_left(s, page_address(page)); 1730 1731 /* First entry is used as the base of the freelist */ 1732 cur = next_freelist_entry(s, page, &pos, start, page_limit, 1733 freelist_count); 1734 cur = setup_object(s, page, cur); 1735 page->freelist = cur; 1736 1737 for (idx = 1; idx < page->objects; idx++) { 1738 next = next_freelist_entry(s, page, &pos, start, page_limit, 1739 freelist_count); 1740 next = setup_object(s, page, next); 1741 set_freepointer(s, cur, next); 1742 cur = next; 1743 } 1744 set_freepointer(s, cur, NULL); 1745 1746 return true; 1747 } 1748 #else 1749 static inline int init_cache_random_seq(struct kmem_cache *s) 1750 { 1751 return 0; 1752 } 1753 static inline void init_freelist_randomization(void) { } 1754 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1755 { 1756 return false; 1757 } 1758 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1759 1760 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1761 { 1762 struct page *page; 1763 struct kmem_cache_order_objects oo = s->oo; 1764 gfp_t alloc_gfp; 1765 void *start, *p, *next; 1766 int idx; 1767 bool shuffle; 1768 1769 flags &= gfp_allowed_mask; 1770 1771 if (gfpflags_allow_blocking(flags)) 1772 local_irq_enable(); 1773 1774 flags |= s->allocflags; 1775 1776 /* 1777 * Let the initial higher-order allocation fail under memory pressure 1778 * so we fall-back to the minimum order allocation. 1779 */ 1780 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1781 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 1782 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); 1783 1784 page = alloc_slab_page(s, alloc_gfp, node, oo); 1785 if (unlikely(!page)) { 1786 oo = s->min; 1787 alloc_gfp = flags; 1788 /* 1789 * Allocation may have failed due to fragmentation. 1790 * Try a lower order alloc if possible 1791 */ 1792 page = alloc_slab_page(s, alloc_gfp, node, oo); 1793 if (unlikely(!page)) 1794 goto out; 1795 stat(s, ORDER_FALLBACK); 1796 } 1797 1798 page->objects = oo_objects(oo); 1799 1800 account_slab_page(page, oo_order(oo), s, flags); 1801 1802 page->slab_cache = s; 1803 __SetPageSlab(page); 1804 if (page_is_pfmemalloc(page)) 1805 SetPageSlabPfmemalloc(page); 1806 1807 kasan_poison_slab(page); 1808 1809 start = page_address(page); 1810 1811 setup_page_debug(s, page, start); 1812 1813 shuffle = shuffle_freelist(s, page); 1814 1815 if (!shuffle) { 1816 start = fixup_red_left(s, start); 1817 start = setup_object(s, page, start); 1818 page->freelist = start; 1819 for (idx = 0, p = start; idx < page->objects - 1; idx++) { 1820 next = p + s->size; 1821 next = setup_object(s, page, next); 1822 set_freepointer(s, p, next); 1823 p = next; 1824 } 1825 set_freepointer(s, p, NULL); 1826 } 1827 1828 page->inuse = page->objects; 1829 page->frozen = 1; 1830 1831 out: 1832 if (gfpflags_allow_blocking(flags)) 1833 local_irq_disable(); 1834 if (!page) 1835 return NULL; 1836 1837 inc_slabs_node(s, page_to_nid(page), page->objects); 1838 1839 return page; 1840 } 1841 1842 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1843 { 1844 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 1845 flags = kmalloc_fix_flags(flags); 1846 1847 return allocate_slab(s, 1848 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1849 } 1850 1851 static void __free_slab(struct kmem_cache *s, struct page *page) 1852 { 1853 int order = compound_order(page); 1854 int pages = 1 << order; 1855 1856 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 1857 void *p; 1858 1859 slab_pad_check(s, page); 1860 for_each_object(p, s, page_address(page), 1861 page->objects) 1862 check_object(s, page, p, SLUB_RED_INACTIVE); 1863 } 1864 1865 __ClearPageSlabPfmemalloc(page); 1866 __ClearPageSlab(page); 1867 /* In union with page->mapping where page allocator expects NULL */ 1868 page->slab_cache = NULL; 1869 if (current->reclaim_state) 1870 current->reclaim_state->reclaimed_slab += pages; 1871 unaccount_slab_page(page, order, s); 1872 __free_pages(page, order); 1873 } 1874 1875 static void rcu_free_slab(struct rcu_head *h) 1876 { 1877 struct page *page = container_of(h, struct page, rcu_head); 1878 1879 __free_slab(page->slab_cache, page); 1880 } 1881 1882 static void free_slab(struct kmem_cache *s, struct page *page) 1883 { 1884 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { 1885 call_rcu(&page->rcu_head, rcu_free_slab); 1886 } else 1887 __free_slab(s, page); 1888 } 1889 1890 static void discard_slab(struct kmem_cache *s, struct page *page) 1891 { 1892 dec_slabs_node(s, page_to_nid(page), page->objects); 1893 free_slab(s, page); 1894 } 1895 1896 /* 1897 * Management of partially allocated slabs. 1898 */ 1899 static inline void 1900 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) 1901 { 1902 n->nr_partial++; 1903 if (tail == DEACTIVATE_TO_TAIL) 1904 list_add_tail(&page->slab_list, &n->partial); 1905 else 1906 list_add(&page->slab_list, &n->partial); 1907 } 1908 1909 static inline void add_partial(struct kmem_cache_node *n, 1910 struct page *page, int tail) 1911 { 1912 lockdep_assert_held(&n->list_lock); 1913 __add_partial(n, page, tail); 1914 } 1915 1916 static inline void remove_partial(struct kmem_cache_node *n, 1917 struct page *page) 1918 { 1919 lockdep_assert_held(&n->list_lock); 1920 list_del(&page->slab_list); 1921 n->nr_partial--; 1922 } 1923 1924 /* 1925 * Remove slab from the partial list, freeze it and 1926 * return the pointer to the freelist. 1927 * 1928 * Returns a list of objects or NULL if it fails. 1929 */ 1930 static inline void *acquire_slab(struct kmem_cache *s, 1931 struct kmem_cache_node *n, struct page *page, 1932 int mode, int *objects) 1933 { 1934 void *freelist; 1935 unsigned long counters; 1936 struct page new; 1937 1938 lockdep_assert_held(&n->list_lock); 1939 1940 /* 1941 * Zap the freelist and set the frozen bit. 1942 * The old freelist is the list of objects for the 1943 * per cpu allocation list. 1944 */ 1945 freelist = page->freelist; 1946 counters = page->counters; 1947 new.counters = counters; 1948 *objects = new.objects - new.inuse; 1949 if (mode) { 1950 new.inuse = page->objects; 1951 new.freelist = NULL; 1952 } else { 1953 new.freelist = freelist; 1954 } 1955 1956 VM_BUG_ON(new.frozen); 1957 new.frozen = 1; 1958 1959 if (!__cmpxchg_double_slab(s, page, 1960 freelist, counters, 1961 new.freelist, new.counters, 1962 "acquire_slab")) 1963 return NULL; 1964 1965 remove_partial(n, page); 1966 WARN_ON(!freelist); 1967 return freelist; 1968 } 1969 1970 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); 1971 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); 1972 1973 /* 1974 * Try to allocate a partial slab from a specific node. 1975 */ 1976 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, 1977 struct kmem_cache_cpu *c, gfp_t flags) 1978 { 1979 struct page *page, *page2; 1980 void *object = NULL; 1981 unsigned int available = 0; 1982 int objects; 1983 1984 /* 1985 * Racy check. If we mistakenly see no partial slabs then we 1986 * just allocate an empty slab. If we mistakenly try to get a 1987 * partial slab and there is none available then get_partial() 1988 * will return NULL. 1989 */ 1990 if (!n || !n->nr_partial) 1991 return NULL; 1992 1993 spin_lock(&n->list_lock); 1994 list_for_each_entry_safe(page, page2, &n->partial, slab_list) { 1995 void *t; 1996 1997 if (!pfmemalloc_match(page, flags)) 1998 continue; 1999 2000 t = acquire_slab(s, n, page, object == NULL, &objects); 2001 if (!t) 2002 break; 2003 2004 available += objects; 2005 if (!object) { 2006 c->page = page; 2007 stat(s, ALLOC_FROM_PARTIAL); 2008 object = t; 2009 } else { 2010 put_cpu_partial(s, page, 0); 2011 stat(s, CPU_PARTIAL_NODE); 2012 } 2013 if (!kmem_cache_has_cpu_partial(s) 2014 || available > slub_cpu_partial(s) / 2) 2015 break; 2016 2017 } 2018 spin_unlock(&n->list_lock); 2019 return object; 2020 } 2021 2022 /* 2023 * Get a page from somewhere. Search in increasing NUMA distances. 2024 */ 2025 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, 2026 struct kmem_cache_cpu *c) 2027 { 2028 #ifdef CONFIG_NUMA 2029 struct zonelist *zonelist; 2030 struct zoneref *z; 2031 struct zone *zone; 2032 enum zone_type highest_zoneidx = gfp_zone(flags); 2033 void *object; 2034 unsigned int cpuset_mems_cookie; 2035 2036 /* 2037 * The defrag ratio allows a configuration of the tradeoffs between 2038 * inter node defragmentation and node local allocations. A lower 2039 * defrag_ratio increases the tendency to do local allocations 2040 * instead of attempting to obtain partial slabs from other nodes. 2041 * 2042 * If the defrag_ratio is set to 0 then kmalloc() always 2043 * returns node local objects. If the ratio is higher then kmalloc() 2044 * may return off node objects because partial slabs are obtained 2045 * from other nodes and filled up. 2046 * 2047 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2048 * (which makes defrag_ratio = 1000) then every (well almost) 2049 * allocation will first attempt to defrag slab caches on other nodes. 2050 * This means scanning over all nodes to look for partial slabs which 2051 * may be expensive if we do it every time we are trying to find a slab 2052 * with available objects. 2053 */ 2054 if (!s->remote_node_defrag_ratio || 2055 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2056 return NULL; 2057 2058 do { 2059 cpuset_mems_cookie = read_mems_allowed_begin(); 2060 zonelist = node_zonelist(mempolicy_slab_node(), flags); 2061 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2062 struct kmem_cache_node *n; 2063 2064 n = get_node(s, zone_to_nid(zone)); 2065 2066 if (n && cpuset_zone_allowed(zone, flags) && 2067 n->nr_partial > s->min_partial) { 2068 object = get_partial_node(s, n, c, flags); 2069 if (object) { 2070 /* 2071 * Don't check read_mems_allowed_retry() 2072 * here - if mems_allowed was updated in 2073 * parallel, that was a harmless race 2074 * between allocation and the cpuset 2075 * update 2076 */ 2077 return object; 2078 } 2079 } 2080 } 2081 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2082 #endif /* CONFIG_NUMA */ 2083 return NULL; 2084 } 2085 2086 /* 2087 * Get a partial page, lock it and return it. 2088 */ 2089 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, 2090 struct kmem_cache_cpu *c) 2091 { 2092 void *object; 2093 int searchnode = node; 2094 2095 if (node == NUMA_NO_NODE) 2096 searchnode = numa_mem_id(); 2097 2098 object = get_partial_node(s, get_node(s, searchnode), c, flags); 2099 if (object || node != NUMA_NO_NODE) 2100 return object; 2101 2102 return get_any_partial(s, flags, c); 2103 } 2104 2105 #ifdef CONFIG_PREEMPTION 2106 /* 2107 * Calculate the next globally unique transaction for disambiguation 2108 * during cmpxchg. The transactions start with the cpu number and are then 2109 * incremented by CONFIG_NR_CPUS. 2110 */ 2111 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2112 #else 2113 /* 2114 * No preemption supported therefore also no need to check for 2115 * different cpus. 2116 */ 2117 #define TID_STEP 1 2118 #endif 2119 2120 static inline unsigned long next_tid(unsigned long tid) 2121 { 2122 return tid + TID_STEP; 2123 } 2124 2125 #ifdef SLUB_DEBUG_CMPXCHG 2126 static inline unsigned int tid_to_cpu(unsigned long tid) 2127 { 2128 return tid % TID_STEP; 2129 } 2130 2131 static inline unsigned long tid_to_event(unsigned long tid) 2132 { 2133 return tid / TID_STEP; 2134 } 2135 #endif 2136 2137 static inline unsigned int init_tid(int cpu) 2138 { 2139 return cpu; 2140 } 2141 2142 static inline void note_cmpxchg_failure(const char *n, 2143 const struct kmem_cache *s, unsigned long tid) 2144 { 2145 #ifdef SLUB_DEBUG_CMPXCHG 2146 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2147 2148 pr_info("%s %s: cmpxchg redo ", n, s->name); 2149 2150 #ifdef CONFIG_PREEMPTION 2151 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2152 pr_warn("due to cpu change %d -> %d\n", 2153 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2154 else 2155 #endif 2156 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2157 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2158 tid_to_event(tid), tid_to_event(actual_tid)); 2159 else 2160 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2161 actual_tid, tid, next_tid(tid)); 2162 #endif 2163 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2164 } 2165 2166 static void init_kmem_cache_cpus(struct kmem_cache *s) 2167 { 2168 int cpu; 2169 2170 for_each_possible_cpu(cpu) 2171 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); 2172 } 2173 2174 /* 2175 * Remove the cpu slab 2176 */ 2177 static void deactivate_slab(struct kmem_cache *s, struct page *page, 2178 void *freelist, struct kmem_cache_cpu *c) 2179 { 2180 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 2181 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 2182 int lock = 0, free_delta = 0; 2183 enum slab_modes l = M_NONE, m = M_NONE; 2184 void *nextfree, *freelist_iter, *freelist_tail; 2185 int tail = DEACTIVATE_TO_HEAD; 2186 struct page new; 2187 struct page old; 2188 2189 if (page->freelist) { 2190 stat(s, DEACTIVATE_REMOTE_FREES); 2191 tail = DEACTIVATE_TO_TAIL; 2192 } 2193 2194 /* 2195 * Stage one: Count the objects on cpu's freelist as free_delta and 2196 * remember the last object in freelist_tail for later splicing. 2197 */ 2198 freelist_tail = NULL; 2199 freelist_iter = freelist; 2200 while (freelist_iter) { 2201 nextfree = get_freepointer(s, freelist_iter); 2202 2203 /* 2204 * If 'nextfree' is invalid, it is possible that the object at 2205 * 'freelist_iter' is already corrupted. So isolate all objects 2206 * starting at 'freelist_iter' by skipping them. 2207 */ 2208 if (freelist_corrupted(s, page, &freelist_iter, nextfree)) 2209 break; 2210 2211 freelist_tail = freelist_iter; 2212 free_delta++; 2213 2214 freelist_iter = nextfree; 2215 } 2216 2217 /* 2218 * Stage two: Unfreeze the page while splicing the per-cpu 2219 * freelist to the head of page's freelist. 2220 * 2221 * Ensure that the page is unfrozen while the list presence 2222 * reflects the actual number of objects during unfreeze. 2223 * 2224 * We setup the list membership and then perform a cmpxchg 2225 * with the count. If there is a mismatch then the page 2226 * is not unfrozen but the page is on the wrong list. 2227 * 2228 * Then we restart the process which may have to remove 2229 * the page from the list that we just put it on again 2230 * because the number of objects in the slab may have 2231 * changed. 2232 */ 2233 redo: 2234 2235 old.freelist = READ_ONCE(page->freelist); 2236 old.counters = READ_ONCE(page->counters); 2237 VM_BUG_ON(!old.frozen); 2238 2239 /* Determine target state of the slab */ 2240 new.counters = old.counters; 2241 if (freelist_tail) { 2242 new.inuse -= free_delta; 2243 set_freepointer(s, freelist_tail, old.freelist); 2244 new.freelist = freelist; 2245 } else 2246 new.freelist = old.freelist; 2247 2248 new.frozen = 0; 2249 2250 if (!new.inuse && n->nr_partial >= s->min_partial) 2251 m = M_FREE; 2252 else if (new.freelist) { 2253 m = M_PARTIAL; 2254 if (!lock) { 2255 lock = 1; 2256 /* 2257 * Taking the spinlock removes the possibility 2258 * that acquire_slab() will see a slab page that 2259 * is frozen 2260 */ 2261 spin_lock(&n->list_lock); 2262 } 2263 } else { 2264 m = M_FULL; 2265 if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) { 2266 lock = 1; 2267 /* 2268 * This also ensures that the scanning of full 2269 * slabs from diagnostic functions will not see 2270 * any frozen slabs. 2271 */ 2272 spin_lock(&n->list_lock); 2273 } 2274 } 2275 2276 if (l != m) { 2277 if (l == M_PARTIAL) 2278 remove_partial(n, page); 2279 else if (l == M_FULL) 2280 remove_full(s, n, page); 2281 2282 if (m == M_PARTIAL) 2283 add_partial(n, page, tail); 2284 else if (m == M_FULL) 2285 add_full(s, n, page); 2286 } 2287 2288 l = m; 2289 if (!__cmpxchg_double_slab(s, page, 2290 old.freelist, old.counters, 2291 new.freelist, new.counters, 2292 "unfreezing slab")) 2293 goto redo; 2294 2295 if (lock) 2296 spin_unlock(&n->list_lock); 2297 2298 if (m == M_PARTIAL) 2299 stat(s, tail); 2300 else if (m == M_FULL) 2301 stat(s, DEACTIVATE_FULL); 2302 else if (m == M_FREE) { 2303 stat(s, DEACTIVATE_EMPTY); 2304 discard_slab(s, page); 2305 stat(s, FREE_SLAB); 2306 } 2307 2308 c->page = NULL; 2309 c->freelist = NULL; 2310 } 2311 2312 /* 2313 * Unfreeze all the cpu partial slabs. 2314 * 2315 * This function must be called with interrupts disabled 2316 * for the cpu using c (or some other guarantee must be there 2317 * to guarantee no concurrent accesses). 2318 */ 2319 static void unfreeze_partials(struct kmem_cache *s, 2320 struct kmem_cache_cpu *c) 2321 { 2322 #ifdef CONFIG_SLUB_CPU_PARTIAL 2323 struct kmem_cache_node *n = NULL, *n2 = NULL; 2324 struct page *page, *discard_page = NULL; 2325 2326 while ((page = slub_percpu_partial(c))) { 2327 struct page new; 2328 struct page old; 2329 2330 slub_set_percpu_partial(c, page); 2331 2332 n2 = get_node(s, page_to_nid(page)); 2333 if (n != n2) { 2334 if (n) 2335 spin_unlock(&n->list_lock); 2336 2337 n = n2; 2338 spin_lock(&n->list_lock); 2339 } 2340 2341 do { 2342 2343 old.freelist = page->freelist; 2344 old.counters = page->counters; 2345 VM_BUG_ON(!old.frozen); 2346 2347 new.counters = old.counters; 2348 new.freelist = old.freelist; 2349 2350 new.frozen = 0; 2351 2352 } while (!__cmpxchg_double_slab(s, page, 2353 old.freelist, old.counters, 2354 new.freelist, new.counters, 2355 "unfreezing slab")); 2356 2357 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { 2358 page->next = discard_page; 2359 discard_page = page; 2360 } else { 2361 add_partial(n, page, DEACTIVATE_TO_TAIL); 2362 stat(s, FREE_ADD_PARTIAL); 2363 } 2364 } 2365 2366 if (n) 2367 spin_unlock(&n->list_lock); 2368 2369 while (discard_page) { 2370 page = discard_page; 2371 discard_page = discard_page->next; 2372 2373 stat(s, DEACTIVATE_EMPTY); 2374 discard_slab(s, page); 2375 stat(s, FREE_SLAB); 2376 } 2377 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2378 } 2379 2380 /* 2381 * Put a page that was just frozen (in __slab_free|get_partial_node) into a 2382 * partial page slot if available. 2383 * 2384 * If we did not find a slot then simply move all the partials to the 2385 * per node partial list. 2386 */ 2387 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 2388 { 2389 #ifdef CONFIG_SLUB_CPU_PARTIAL 2390 struct page *oldpage; 2391 int pages; 2392 int pobjects; 2393 2394 preempt_disable(); 2395 do { 2396 pages = 0; 2397 pobjects = 0; 2398 oldpage = this_cpu_read(s->cpu_slab->partial); 2399 2400 if (oldpage) { 2401 pobjects = oldpage->pobjects; 2402 pages = oldpage->pages; 2403 if (drain && pobjects > slub_cpu_partial(s)) { 2404 unsigned long flags; 2405 /* 2406 * partial array is full. Move the existing 2407 * set to the per node partial list. 2408 */ 2409 local_irq_save(flags); 2410 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); 2411 local_irq_restore(flags); 2412 oldpage = NULL; 2413 pobjects = 0; 2414 pages = 0; 2415 stat(s, CPU_PARTIAL_DRAIN); 2416 } 2417 } 2418 2419 pages++; 2420 pobjects += page->objects - page->inuse; 2421 2422 page->pages = pages; 2423 page->pobjects = pobjects; 2424 page->next = oldpage; 2425 2426 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) 2427 != oldpage); 2428 if (unlikely(!slub_cpu_partial(s))) { 2429 unsigned long flags; 2430 2431 local_irq_save(flags); 2432 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); 2433 local_irq_restore(flags); 2434 } 2435 preempt_enable(); 2436 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2437 } 2438 2439 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2440 { 2441 stat(s, CPUSLAB_FLUSH); 2442 deactivate_slab(s, c->page, c->freelist, c); 2443 2444 c->tid = next_tid(c->tid); 2445 } 2446 2447 /* 2448 * Flush cpu slab. 2449 * 2450 * Called from IPI handler with interrupts disabled. 2451 */ 2452 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 2453 { 2454 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2455 2456 if (c->page) 2457 flush_slab(s, c); 2458 2459 unfreeze_partials(s, c); 2460 } 2461 2462 static void flush_cpu_slab(void *d) 2463 { 2464 struct kmem_cache *s = d; 2465 2466 __flush_cpu_slab(s, smp_processor_id()); 2467 } 2468 2469 static bool has_cpu_slab(int cpu, void *info) 2470 { 2471 struct kmem_cache *s = info; 2472 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2473 2474 return c->page || slub_percpu_partial(c); 2475 } 2476 2477 static void flush_all(struct kmem_cache *s) 2478 { 2479 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1); 2480 } 2481 2482 /* 2483 * Use the cpu notifier to insure that the cpu slabs are flushed when 2484 * necessary. 2485 */ 2486 static int slub_cpu_dead(unsigned int cpu) 2487 { 2488 struct kmem_cache *s; 2489 unsigned long flags; 2490 2491 mutex_lock(&slab_mutex); 2492 list_for_each_entry(s, &slab_caches, list) { 2493 local_irq_save(flags); 2494 __flush_cpu_slab(s, cpu); 2495 local_irq_restore(flags); 2496 } 2497 mutex_unlock(&slab_mutex); 2498 return 0; 2499 } 2500 2501 /* 2502 * Check if the objects in a per cpu structure fit numa 2503 * locality expectations. 2504 */ 2505 static inline int node_match(struct page *page, int node) 2506 { 2507 #ifdef CONFIG_NUMA 2508 if (node != NUMA_NO_NODE && page_to_nid(page) != node) 2509 return 0; 2510 #endif 2511 return 1; 2512 } 2513 2514 #ifdef CONFIG_SLUB_DEBUG 2515 static int count_free(struct page *page) 2516 { 2517 return page->objects - page->inuse; 2518 } 2519 2520 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 2521 { 2522 return atomic_long_read(&n->total_objects); 2523 } 2524 #endif /* CONFIG_SLUB_DEBUG */ 2525 2526 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS) 2527 static unsigned long count_partial(struct kmem_cache_node *n, 2528 int (*get_count)(struct page *)) 2529 { 2530 unsigned long flags; 2531 unsigned long x = 0; 2532 struct page *page; 2533 2534 spin_lock_irqsave(&n->list_lock, flags); 2535 list_for_each_entry(page, &n->partial, slab_list) 2536 x += get_count(page); 2537 spin_unlock_irqrestore(&n->list_lock, flags); 2538 return x; 2539 } 2540 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ 2541 2542 static noinline void 2543 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2544 { 2545 #ifdef CONFIG_SLUB_DEBUG 2546 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 2547 DEFAULT_RATELIMIT_BURST); 2548 int node; 2549 struct kmem_cache_node *n; 2550 2551 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 2552 return; 2553 2554 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 2555 nid, gfpflags, &gfpflags); 2556 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 2557 s->name, s->object_size, s->size, oo_order(s->oo), 2558 oo_order(s->min)); 2559 2560 if (oo_order(s->min) > get_order(s->object_size)) 2561 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", 2562 s->name); 2563 2564 for_each_kmem_cache_node(s, node, n) { 2565 unsigned long nr_slabs; 2566 unsigned long nr_objs; 2567 unsigned long nr_free; 2568 2569 nr_free = count_partial(n, count_free); 2570 nr_slabs = node_nr_slabs(n); 2571 nr_objs = node_nr_objs(n); 2572 2573 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 2574 node, nr_slabs, nr_objs, nr_free); 2575 } 2576 #endif 2577 } 2578 2579 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, 2580 int node, struct kmem_cache_cpu **pc) 2581 { 2582 void *freelist; 2583 struct kmem_cache_cpu *c = *pc; 2584 struct page *page; 2585 2586 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2587 2588 freelist = get_partial(s, flags, node, c); 2589 2590 if (freelist) 2591 return freelist; 2592 2593 page = new_slab(s, flags, node); 2594 if (page) { 2595 c = raw_cpu_ptr(s->cpu_slab); 2596 if (c->page) 2597 flush_slab(s, c); 2598 2599 /* 2600 * No other reference to the page yet so we can 2601 * muck around with it freely without cmpxchg 2602 */ 2603 freelist = page->freelist; 2604 page->freelist = NULL; 2605 2606 stat(s, ALLOC_SLAB); 2607 c->page = page; 2608 *pc = c; 2609 } 2610 2611 return freelist; 2612 } 2613 2614 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) 2615 { 2616 if (unlikely(PageSlabPfmemalloc(page))) 2617 return gfp_pfmemalloc_allowed(gfpflags); 2618 2619 return true; 2620 } 2621 2622 /* 2623 * Check the page->freelist of a page and either transfer the freelist to the 2624 * per cpu freelist or deactivate the page. 2625 * 2626 * The page is still frozen if the return value is not NULL. 2627 * 2628 * If this function returns NULL then the page has been unfrozen. 2629 * 2630 * This function must be called with interrupt disabled. 2631 */ 2632 static inline void *get_freelist(struct kmem_cache *s, struct page *page) 2633 { 2634 struct page new; 2635 unsigned long counters; 2636 void *freelist; 2637 2638 do { 2639 freelist = page->freelist; 2640 counters = page->counters; 2641 2642 new.counters = counters; 2643 VM_BUG_ON(!new.frozen); 2644 2645 new.inuse = page->objects; 2646 new.frozen = freelist != NULL; 2647 2648 } while (!__cmpxchg_double_slab(s, page, 2649 freelist, counters, 2650 NULL, new.counters, 2651 "get_freelist")); 2652 2653 return freelist; 2654 } 2655 2656 /* 2657 * Slow path. The lockless freelist is empty or we need to perform 2658 * debugging duties. 2659 * 2660 * Processing is still very fast if new objects have been freed to the 2661 * regular freelist. In that case we simply take over the regular freelist 2662 * as the lockless freelist and zap the regular freelist. 2663 * 2664 * If that is not working then we fall back to the partial lists. We take the 2665 * first element of the freelist as the object to allocate now and move the 2666 * rest of the freelist to the lockless freelist. 2667 * 2668 * And if we were unable to get a new slab from the partial slab lists then 2669 * we need to allocate a new slab. This is the slowest path since it involves 2670 * a call to the page allocator and the setup of a new slab. 2671 * 2672 * Version of __slab_alloc to use when we know that interrupts are 2673 * already disabled (which is the case for bulk allocation). 2674 */ 2675 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2676 unsigned long addr, struct kmem_cache_cpu *c) 2677 { 2678 void *freelist; 2679 struct page *page; 2680 2681 stat(s, ALLOC_SLOWPATH); 2682 2683 page = c->page; 2684 if (!page) { 2685 /* 2686 * if the node is not online or has no normal memory, just 2687 * ignore the node constraint 2688 */ 2689 if (unlikely(node != NUMA_NO_NODE && 2690 !node_isset(node, slab_nodes))) 2691 node = NUMA_NO_NODE; 2692 goto new_slab; 2693 } 2694 redo: 2695 2696 if (unlikely(!node_match(page, node))) { 2697 /* 2698 * same as above but node_match() being false already 2699 * implies node != NUMA_NO_NODE 2700 */ 2701 if (!node_isset(node, slab_nodes)) { 2702 node = NUMA_NO_NODE; 2703 goto redo; 2704 } else { 2705 stat(s, ALLOC_NODE_MISMATCH); 2706 deactivate_slab(s, page, c->freelist, c); 2707 goto new_slab; 2708 } 2709 } 2710 2711 /* 2712 * By rights, we should be searching for a slab page that was 2713 * PFMEMALLOC but right now, we are losing the pfmemalloc 2714 * information when the page leaves the per-cpu allocator 2715 */ 2716 if (unlikely(!pfmemalloc_match(page, gfpflags))) { 2717 deactivate_slab(s, page, c->freelist, c); 2718 goto new_slab; 2719 } 2720 2721 /* must check again c->freelist in case of cpu migration or IRQ */ 2722 freelist = c->freelist; 2723 if (freelist) 2724 goto load_freelist; 2725 2726 freelist = get_freelist(s, page); 2727 2728 if (!freelist) { 2729 c->page = NULL; 2730 stat(s, DEACTIVATE_BYPASS); 2731 goto new_slab; 2732 } 2733 2734 stat(s, ALLOC_REFILL); 2735 2736 load_freelist: 2737 /* 2738 * freelist is pointing to the list of objects to be used. 2739 * page is pointing to the page from which the objects are obtained. 2740 * That page must be frozen for per cpu allocations to work. 2741 */ 2742 VM_BUG_ON(!c->page->frozen); 2743 c->freelist = get_freepointer(s, freelist); 2744 c->tid = next_tid(c->tid); 2745 return freelist; 2746 2747 new_slab: 2748 2749 if (slub_percpu_partial(c)) { 2750 page = c->page = slub_percpu_partial(c); 2751 slub_set_percpu_partial(c, page); 2752 stat(s, CPU_PARTIAL_ALLOC); 2753 goto redo; 2754 } 2755 2756 freelist = new_slab_objects(s, gfpflags, node, &c); 2757 2758 if (unlikely(!freelist)) { 2759 slab_out_of_memory(s, gfpflags, node); 2760 return NULL; 2761 } 2762 2763 page = c->page; 2764 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) 2765 goto load_freelist; 2766 2767 /* Only entered in the debug case */ 2768 if (kmem_cache_debug(s) && 2769 !alloc_debug_processing(s, page, freelist, addr)) 2770 goto new_slab; /* Slab failed checks. Next slab needed */ 2771 2772 deactivate_slab(s, page, get_freepointer(s, freelist), c); 2773 return freelist; 2774 } 2775 2776 /* 2777 * Another one that disabled interrupt and compensates for possible 2778 * cpu changes by refetching the per cpu area pointer. 2779 */ 2780 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2781 unsigned long addr, struct kmem_cache_cpu *c) 2782 { 2783 void *p; 2784 unsigned long flags; 2785 2786 local_irq_save(flags); 2787 #ifdef CONFIG_PREEMPTION 2788 /* 2789 * We may have been preempted and rescheduled on a different 2790 * cpu before disabling interrupts. Need to reload cpu area 2791 * pointer. 2792 */ 2793 c = this_cpu_ptr(s->cpu_slab); 2794 #endif 2795 2796 p = ___slab_alloc(s, gfpflags, node, addr, c); 2797 local_irq_restore(flags); 2798 return p; 2799 } 2800 2801 /* 2802 * If the object has been wiped upon free, make sure it's fully initialized by 2803 * zeroing out freelist pointer. 2804 */ 2805 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 2806 void *obj) 2807 { 2808 if (unlikely(slab_want_init_on_free(s)) && obj) 2809 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 2810 0, sizeof(void *)); 2811 } 2812 2813 /* 2814 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2815 * have the fastpath folded into their functions. So no function call 2816 * overhead for requests that can be satisfied on the fastpath. 2817 * 2818 * The fastpath works by first checking if the lockless freelist can be used. 2819 * If not then __slab_alloc is called for slow processing. 2820 * 2821 * Otherwise we can simply pick the next object from the lockless free list. 2822 */ 2823 static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2824 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 2825 { 2826 void *object; 2827 struct kmem_cache_cpu *c; 2828 struct page *page; 2829 unsigned long tid; 2830 struct obj_cgroup *objcg = NULL; 2831 bool init = false; 2832 2833 s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); 2834 if (!s) 2835 return NULL; 2836 2837 object = kfence_alloc(s, orig_size, gfpflags); 2838 if (unlikely(object)) 2839 goto out; 2840 2841 redo: 2842 /* 2843 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 2844 * enabled. We may switch back and forth between cpus while 2845 * reading from one cpu area. That does not matter as long 2846 * as we end up on the original cpu again when doing the cmpxchg. 2847 * 2848 * We should guarantee that tid and kmem_cache are retrieved on 2849 * the same cpu. It could be different if CONFIG_PREEMPTION so we need 2850 * to check if it is matched or not. 2851 */ 2852 do { 2853 tid = this_cpu_read(s->cpu_slab->tid); 2854 c = raw_cpu_ptr(s->cpu_slab); 2855 } while (IS_ENABLED(CONFIG_PREEMPTION) && 2856 unlikely(tid != READ_ONCE(c->tid))); 2857 2858 /* 2859 * Irqless object alloc/free algorithm used here depends on sequence 2860 * of fetching cpu_slab's data. tid should be fetched before anything 2861 * on c to guarantee that object and page associated with previous tid 2862 * won't be used with current tid. If we fetch tid first, object and 2863 * page could be one associated with next tid and our alloc/free 2864 * request will be failed. In this case, we will retry. So, no problem. 2865 */ 2866 barrier(); 2867 2868 /* 2869 * The transaction ids are globally unique per cpu and per operation on 2870 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 2871 * occurs on the right processor and that there was no operation on the 2872 * linked list in between. 2873 */ 2874 2875 object = c->freelist; 2876 page = c->page; 2877 if (unlikely(!object || !page || !node_match(page, node))) { 2878 object = __slab_alloc(s, gfpflags, node, addr, c); 2879 } else { 2880 void *next_object = get_freepointer_safe(s, object); 2881 2882 /* 2883 * The cmpxchg will only match if there was no additional 2884 * operation and if we are on the right processor. 2885 * 2886 * The cmpxchg does the following atomically (without lock 2887 * semantics!) 2888 * 1. Relocate first pointer to the current per cpu area. 2889 * 2. Verify that tid and freelist have not been changed 2890 * 3. If they were not changed replace tid and freelist 2891 * 2892 * Since this is without lock semantics the protection is only 2893 * against code executing on this cpu *not* from access by 2894 * other cpus. 2895 */ 2896 if (unlikely(!this_cpu_cmpxchg_double( 2897 s->cpu_slab->freelist, s->cpu_slab->tid, 2898 object, tid, 2899 next_object, next_tid(tid)))) { 2900 2901 note_cmpxchg_failure("slab_alloc", s, tid); 2902 goto redo; 2903 } 2904 prefetch_freepointer(s, next_object); 2905 stat(s, ALLOC_FASTPATH); 2906 } 2907 2908 maybe_wipe_obj_freeptr(s, object); 2909 init = slab_want_init_on_alloc(gfpflags, s); 2910 2911 out: 2912 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init); 2913 2914 return object; 2915 } 2916 2917 static __always_inline void *slab_alloc(struct kmem_cache *s, 2918 gfp_t gfpflags, unsigned long addr, size_t orig_size) 2919 { 2920 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size); 2921 } 2922 2923 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 2924 { 2925 void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size); 2926 2927 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, 2928 s->size, gfpflags); 2929 2930 return ret; 2931 } 2932 EXPORT_SYMBOL(kmem_cache_alloc); 2933 2934 #ifdef CONFIG_TRACING 2935 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 2936 { 2937 void *ret = slab_alloc(s, gfpflags, _RET_IP_, size); 2938 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2939 ret = kasan_kmalloc(s, ret, size, gfpflags); 2940 return ret; 2941 } 2942 EXPORT_SYMBOL(kmem_cache_alloc_trace); 2943 #endif 2944 2945 #ifdef CONFIG_NUMA 2946 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 2947 { 2948 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size); 2949 2950 trace_kmem_cache_alloc_node(_RET_IP_, ret, 2951 s->object_size, s->size, gfpflags, node); 2952 2953 return ret; 2954 } 2955 EXPORT_SYMBOL(kmem_cache_alloc_node); 2956 2957 #ifdef CONFIG_TRACING 2958 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 2959 gfp_t gfpflags, 2960 int node, size_t size) 2961 { 2962 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size); 2963 2964 trace_kmalloc_node(_RET_IP_, ret, 2965 size, s->size, gfpflags, node); 2966 2967 ret = kasan_kmalloc(s, ret, size, gfpflags); 2968 return ret; 2969 } 2970 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 2971 #endif 2972 #endif /* CONFIG_NUMA */ 2973 2974 /* 2975 * Slow path handling. This may still be called frequently since objects 2976 * have a longer lifetime than the cpu slabs in most processing loads. 2977 * 2978 * So we still attempt to reduce cache line usage. Just take the slab 2979 * lock and free the item. If there is no additional partial page 2980 * handling required then we can return immediately. 2981 */ 2982 static void __slab_free(struct kmem_cache *s, struct page *page, 2983 void *head, void *tail, int cnt, 2984 unsigned long addr) 2985 2986 { 2987 void *prior; 2988 int was_frozen; 2989 struct page new; 2990 unsigned long counters; 2991 struct kmem_cache_node *n = NULL; 2992 unsigned long flags; 2993 2994 stat(s, FREE_SLOWPATH); 2995 2996 if (kfence_free(head)) 2997 return; 2998 2999 if (kmem_cache_debug(s) && 3000 !free_debug_processing(s, page, head, tail, cnt, addr)) 3001 return; 3002 3003 do { 3004 if (unlikely(n)) { 3005 spin_unlock_irqrestore(&n->list_lock, flags); 3006 n = NULL; 3007 } 3008 prior = page->freelist; 3009 counters = page->counters; 3010 set_freepointer(s, tail, prior); 3011 new.counters = counters; 3012 was_frozen = new.frozen; 3013 new.inuse -= cnt; 3014 if ((!new.inuse || !prior) && !was_frozen) { 3015 3016 if (kmem_cache_has_cpu_partial(s) && !prior) { 3017 3018 /* 3019 * Slab was on no list before and will be 3020 * partially empty 3021 * We can defer the list move and instead 3022 * freeze it. 3023 */ 3024 new.frozen = 1; 3025 3026 } else { /* Needs to be taken off a list */ 3027 3028 n = get_node(s, page_to_nid(page)); 3029 /* 3030 * Speculatively acquire the list_lock. 3031 * If the cmpxchg does not succeed then we may 3032 * drop the list_lock without any processing. 3033 * 3034 * Otherwise the list_lock will synchronize with 3035 * other processors updating the list of slabs. 3036 */ 3037 spin_lock_irqsave(&n->list_lock, flags); 3038 3039 } 3040 } 3041 3042 } while (!cmpxchg_double_slab(s, page, 3043 prior, counters, 3044 head, new.counters, 3045 "__slab_free")); 3046 3047 if (likely(!n)) { 3048 3049 if (likely(was_frozen)) { 3050 /* 3051 * The list lock was not taken therefore no list 3052 * activity can be necessary. 3053 */ 3054 stat(s, FREE_FROZEN); 3055 } else if (new.frozen) { 3056 /* 3057 * If we just froze the page then put it onto the 3058 * per cpu partial list. 3059 */ 3060 put_cpu_partial(s, page, 1); 3061 stat(s, CPU_PARTIAL_FREE); 3062 } 3063 3064 return; 3065 } 3066 3067 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 3068 goto slab_empty; 3069 3070 /* 3071 * Objects left in the slab. If it was not on the partial list before 3072 * then add it. 3073 */ 3074 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 3075 remove_full(s, n, page); 3076 add_partial(n, page, DEACTIVATE_TO_TAIL); 3077 stat(s, FREE_ADD_PARTIAL); 3078 } 3079 spin_unlock_irqrestore(&n->list_lock, flags); 3080 return; 3081 3082 slab_empty: 3083 if (prior) { 3084 /* 3085 * Slab on the partial list. 3086 */ 3087 remove_partial(n, page); 3088 stat(s, FREE_REMOVE_PARTIAL); 3089 } else { 3090 /* Slab must be on the full list */ 3091 remove_full(s, n, page); 3092 } 3093 3094 spin_unlock_irqrestore(&n->list_lock, flags); 3095 stat(s, FREE_SLAB); 3096 discard_slab(s, page); 3097 } 3098 3099 /* 3100 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 3101 * can perform fastpath freeing without additional function calls. 3102 * 3103 * The fastpath is only possible if we are freeing to the current cpu slab 3104 * of this processor. This typically the case if we have just allocated 3105 * the item before. 3106 * 3107 * If fastpath is not possible then fall back to __slab_free where we deal 3108 * with all sorts of special processing. 3109 * 3110 * Bulk free of a freelist with several objects (all pointing to the 3111 * same page) possible by specifying head and tail ptr, plus objects 3112 * count (cnt). Bulk free indicated by tail pointer being set. 3113 */ 3114 static __always_inline void do_slab_free(struct kmem_cache *s, 3115 struct page *page, void *head, void *tail, 3116 int cnt, unsigned long addr) 3117 { 3118 void *tail_obj = tail ? : head; 3119 struct kmem_cache_cpu *c; 3120 unsigned long tid; 3121 3122 memcg_slab_free_hook(s, &head, 1); 3123 redo: 3124 /* 3125 * Determine the currently cpus per cpu slab. 3126 * The cpu may change afterward. However that does not matter since 3127 * data is retrieved via this pointer. If we are on the same cpu 3128 * during the cmpxchg then the free will succeed. 3129 */ 3130 do { 3131 tid = this_cpu_read(s->cpu_slab->tid); 3132 c = raw_cpu_ptr(s->cpu_slab); 3133 } while (IS_ENABLED(CONFIG_PREEMPTION) && 3134 unlikely(tid != READ_ONCE(c->tid))); 3135 3136 /* Same with comment on barrier() in slab_alloc_node() */ 3137 barrier(); 3138 3139 if (likely(page == c->page)) { 3140 void **freelist = READ_ONCE(c->freelist); 3141 3142 set_freepointer(s, tail_obj, freelist); 3143 3144 if (unlikely(!this_cpu_cmpxchg_double( 3145 s->cpu_slab->freelist, s->cpu_slab->tid, 3146 freelist, tid, 3147 head, next_tid(tid)))) { 3148 3149 note_cmpxchg_failure("slab_free", s, tid); 3150 goto redo; 3151 } 3152 stat(s, FREE_FASTPATH); 3153 } else 3154 __slab_free(s, page, head, tail_obj, cnt, addr); 3155 3156 } 3157 3158 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, 3159 void *head, void *tail, int cnt, 3160 unsigned long addr) 3161 { 3162 /* 3163 * With KASAN enabled slab_free_freelist_hook modifies the freelist 3164 * to remove objects, whose reuse must be delayed. 3165 */ 3166 if (slab_free_freelist_hook(s, &head, &tail)) 3167 do_slab_free(s, page, head, tail, cnt, addr); 3168 } 3169 3170 #ifdef CONFIG_KASAN_GENERIC 3171 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 3172 { 3173 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); 3174 } 3175 #endif 3176 3177 void kmem_cache_free(struct kmem_cache *s, void *x) 3178 { 3179 s = cache_from_obj(s, x); 3180 if (!s) 3181 return; 3182 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); 3183 trace_kmem_cache_free(_RET_IP_, x, s->name); 3184 } 3185 EXPORT_SYMBOL(kmem_cache_free); 3186 3187 struct detached_freelist { 3188 struct page *page; 3189 void *tail; 3190 void *freelist; 3191 int cnt; 3192 struct kmem_cache *s; 3193 }; 3194 3195 /* 3196 * This function progressively scans the array with free objects (with 3197 * a limited look ahead) and extract objects belonging to the same 3198 * page. It builds a detached freelist directly within the given 3199 * page/objects. This can happen without any need for 3200 * synchronization, because the objects are owned by running process. 3201 * The freelist is build up as a single linked list in the objects. 3202 * The idea is, that this detached freelist can then be bulk 3203 * transferred to the real freelist(s), but only requiring a single 3204 * synchronization primitive. Look ahead in the array is limited due 3205 * to performance reasons. 3206 */ 3207 static inline 3208 int build_detached_freelist(struct kmem_cache *s, size_t size, 3209 void **p, struct detached_freelist *df) 3210 { 3211 size_t first_skipped_index = 0; 3212 int lookahead = 3; 3213 void *object; 3214 struct page *page; 3215 3216 /* Always re-init detached_freelist */ 3217 df->page = NULL; 3218 3219 do { 3220 object = p[--size]; 3221 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */ 3222 } while (!object && size); 3223 3224 if (!object) 3225 return 0; 3226 3227 page = virt_to_head_page(object); 3228 if (!s) { 3229 /* Handle kalloc'ed objects */ 3230 if (unlikely(!PageSlab(page))) { 3231 BUG_ON(!PageCompound(page)); 3232 kfree_hook(object); 3233 __free_pages(page, compound_order(page)); 3234 p[size] = NULL; /* mark object processed */ 3235 return size; 3236 } 3237 /* Derive kmem_cache from object */ 3238 df->s = page->slab_cache; 3239 } else { 3240 df->s = cache_from_obj(s, object); /* Support for memcg */ 3241 } 3242 3243 if (is_kfence_address(object)) { 3244 slab_free_hook(df->s, object, false); 3245 __kfence_free(object); 3246 p[size] = NULL; /* mark object processed */ 3247 return size; 3248 } 3249 3250 /* Start new detached freelist */ 3251 df->page = page; 3252 set_freepointer(df->s, object, NULL); 3253 df->tail = object; 3254 df->freelist = object; 3255 p[size] = NULL; /* mark object processed */ 3256 df->cnt = 1; 3257 3258 while (size) { 3259 object = p[--size]; 3260 if (!object) 3261 continue; /* Skip processed objects */ 3262 3263 /* df->page is always set at this point */ 3264 if (df->page == virt_to_head_page(object)) { 3265 /* Opportunity build freelist */ 3266 set_freepointer(df->s, object, df->freelist); 3267 df->freelist = object; 3268 df->cnt++; 3269 p[size] = NULL; /* mark object processed */ 3270 3271 continue; 3272 } 3273 3274 /* Limit look ahead search */ 3275 if (!--lookahead) 3276 break; 3277 3278 if (!first_skipped_index) 3279 first_skipped_index = size + 1; 3280 } 3281 3282 return first_skipped_index; 3283 } 3284 3285 /* Note that interrupts must be enabled when calling this function. */ 3286 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 3287 { 3288 if (WARN_ON(!size)) 3289 return; 3290 3291 memcg_slab_free_hook(s, p, size); 3292 do { 3293 struct detached_freelist df; 3294 3295 size = build_detached_freelist(s, size, p, &df); 3296 if (!df.page) 3297 continue; 3298 3299 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); 3300 } while (likely(size)); 3301 } 3302 EXPORT_SYMBOL(kmem_cache_free_bulk); 3303 3304 /* Note that interrupts must be enabled when calling this function. */ 3305 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3306 void **p) 3307 { 3308 struct kmem_cache_cpu *c; 3309 int i; 3310 struct obj_cgroup *objcg = NULL; 3311 3312 /* memcg and kmem_cache debug support */ 3313 s = slab_pre_alloc_hook(s, &objcg, size, flags); 3314 if (unlikely(!s)) 3315 return false; 3316 /* 3317 * Drain objects in the per cpu slab, while disabling local 3318 * IRQs, which protects against PREEMPT and interrupts 3319 * handlers invoking normal fastpath. 3320 */ 3321 local_irq_disable(); 3322 c = this_cpu_ptr(s->cpu_slab); 3323 3324 for (i = 0; i < size; i++) { 3325 void *object = kfence_alloc(s, s->object_size, flags); 3326 3327 if (unlikely(object)) { 3328 p[i] = object; 3329 continue; 3330 } 3331 3332 object = c->freelist; 3333 if (unlikely(!object)) { 3334 /* 3335 * We may have removed an object from c->freelist using 3336 * the fastpath in the previous iteration; in that case, 3337 * c->tid has not been bumped yet. 3338 * Since ___slab_alloc() may reenable interrupts while 3339 * allocating memory, we should bump c->tid now. 3340 */ 3341 c->tid = next_tid(c->tid); 3342 3343 /* 3344 * Invoking slow path likely have side-effect 3345 * of re-populating per CPU c->freelist 3346 */ 3347 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 3348 _RET_IP_, c); 3349 if (unlikely(!p[i])) 3350 goto error; 3351 3352 c = this_cpu_ptr(s->cpu_slab); 3353 maybe_wipe_obj_freeptr(s, p[i]); 3354 3355 continue; /* goto for-loop */ 3356 } 3357 c->freelist = get_freepointer(s, object); 3358 p[i] = object; 3359 maybe_wipe_obj_freeptr(s, p[i]); 3360 } 3361 c->tid = next_tid(c->tid); 3362 local_irq_enable(); 3363 3364 /* 3365 * memcg and kmem_cache debug support and memory initialization. 3366 * Done outside of the IRQ disabled fastpath loop. 3367 */ 3368 slab_post_alloc_hook(s, objcg, flags, size, p, 3369 slab_want_init_on_alloc(flags, s)); 3370 return i; 3371 error: 3372 local_irq_enable(); 3373 slab_post_alloc_hook(s, objcg, flags, i, p, false); 3374 __kmem_cache_free_bulk(s, i, p); 3375 return 0; 3376 } 3377 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3378 3379 3380 /* 3381 * Object placement in a slab is made very easy because we always start at 3382 * offset 0. If we tune the size of the object to the alignment then we can 3383 * get the required alignment by putting one properly sized object after 3384 * another. 3385 * 3386 * Notice that the allocation order determines the sizes of the per cpu 3387 * caches. Each processor has always one slab available for allocations. 3388 * Increasing the allocation order reduces the number of times that slabs 3389 * must be moved on and off the partial lists and is therefore a factor in 3390 * locking overhead. 3391 */ 3392 3393 /* 3394 * Minimum / Maximum order of slab pages. This influences locking overhead 3395 * and slab fragmentation. A higher order reduces the number of partial slabs 3396 * and increases the number of allocations possible without having to 3397 * take the list_lock. 3398 */ 3399 static unsigned int slub_min_order; 3400 static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 3401 static unsigned int slub_min_objects; 3402 3403 /* 3404 * Calculate the order of allocation given an slab object size. 3405 * 3406 * The order of allocation has significant impact on performance and other 3407 * system components. Generally order 0 allocations should be preferred since 3408 * order 0 does not cause fragmentation in the page allocator. Larger objects 3409 * be problematic to put into order 0 slabs because there may be too much 3410 * unused space left. We go to a higher order if more than 1/16th of the slab 3411 * would be wasted. 3412 * 3413 * In order to reach satisfactory performance we must ensure that a minimum 3414 * number of objects is in one slab. Otherwise we may generate too much 3415 * activity on the partial lists which requires taking the list_lock. This is 3416 * less a concern for large slabs though which are rarely used. 3417 * 3418 * slub_max_order specifies the order where we begin to stop considering the 3419 * number of objects in a slab as critical. If we reach slub_max_order then 3420 * we try to keep the page order as low as possible. So we accept more waste 3421 * of space in favor of a small page order. 3422 * 3423 * Higher order allocations also allow the placement of more objects in a 3424 * slab and thereby reduce object handling overhead. If the user has 3425 * requested a higher minimum order then we start with that one instead of 3426 * the smallest order which will fit the object. 3427 */ 3428 static inline unsigned int slab_order(unsigned int size, 3429 unsigned int min_objects, unsigned int max_order, 3430 unsigned int fract_leftover) 3431 { 3432 unsigned int min_order = slub_min_order; 3433 unsigned int order; 3434 3435 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 3436 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 3437 3438 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); 3439 order <= max_order; order++) { 3440 3441 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 3442 unsigned int rem; 3443 3444 rem = slab_size % size; 3445 3446 if (rem <= slab_size / fract_leftover) 3447 break; 3448 } 3449 3450 return order; 3451 } 3452 3453 static inline int calculate_order(unsigned int size) 3454 { 3455 unsigned int order; 3456 unsigned int min_objects; 3457 unsigned int max_objects; 3458 unsigned int nr_cpus; 3459 3460 /* 3461 * Attempt to find best configuration for a slab. This 3462 * works by first attempting to generate a layout with 3463 * the best configuration and backing off gradually. 3464 * 3465 * First we increase the acceptable waste in a slab. Then 3466 * we reduce the minimum objects required in a slab. 3467 */ 3468 min_objects = slub_min_objects; 3469 if (!min_objects) { 3470 /* 3471 * Some architectures will only update present cpus when 3472 * onlining them, so don't trust the number if it's just 1. But 3473 * we also don't want to use nr_cpu_ids always, as on some other 3474 * architectures, there can be many possible cpus, but never 3475 * onlined. Here we compromise between trying to avoid too high 3476 * order on systems that appear larger than they are, and too 3477 * low order on systems that appear smaller than they are. 3478 */ 3479 nr_cpus = num_present_cpus(); 3480 if (nr_cpus <= 1) 3481 nr_cpus = nr_cpu_ids; 3482 min_objects = 4 * (fls(nr_cpus) + 1); 3483 } 3484 max_objects = order_objects(slub_max_order, size); 3485 min_objects = min(min_objects, max_objects); 3486 3487 while (min_objects > 1) { 3488 unsigned int fraction; 3489 3490 fraction = 16; 3491 while (fraction >= 4) { 3492 order = slab_order(size, min_objects, 3493 slub_max_order, fraction); 3494 if (order <= slub_max_order) 3495 return order; 3496 fraction /= 2; 3497 } 3498 min_objects--; 3499 } 3500 3501 /* 3502 * We were unable to place multiple objects in a slab. Now 3503 * lets see if we can place a single object there. 3504 */ 3505 order = slab_order(size, 1, slub_max_order, 1); 3506 if (order <= slub_max_order) 3507 return order; 3508 3509 /* 3510 * Doh this slab cannot be placed using slub_max_order. 3511 */ 3512 order = slab_order(size, 1, MAX_ORDER, 1); 3513 if (order < MAX_ORDER) 3514 return order; 3515 return -ENOSYS; 3516 } 3517 3518 static void 3519 init_kmem_cache_node(struct kmem_cache_node *n) 3520 { 3521 n->nr_partial = 0; 3522 spin_lock_init(&n->list_lock); 3523 INIT_LIST_HEAD(&n->partial); 3524 #ifdef CONFIG_SLUB_DEBUG 3525 atomic_long_set(&n->nr_slabs, 0); 3526 atomic_long_set(&n->total_objects, 0); 3527 INIT_LIST_HEAD(&n->full); 3528 #endif 3529 } 3530 3531 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 3532 { 3533 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 3534 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); 3535 3536 /* 3537 * Must align to double word boundary for the double cmpxchg 3538 * instructions to work; see __pcpu_double_call_return_bool(). 3539 */ 3540 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 3541 2 * sizeof(void *)); 3542 3543 if (!s->cpu_slab) 3544 return 0; 3545 3546 init_kmem_cache_cpus(s); 3547 3548 return 1; 3549 } 3550 3551 static struct kmem_cache *kmem_cache_node; 3552 3553 /* 3554 * No kmalloc_node yet so do it by hand. We know that this is the first 3555 * slab on the node for this slabcache. There are no concurrent accesses 3556 * possible. 3557 * 3558 * Note that this function only works on the kmem_cache_node 3559 * when allocating for the kmem_cache_node. This is used for bootstrapping 3560 * memory on a fresh node that has no slab structures yet. 3561 */ 3562 static void early_kmem_cache_node_alloc(int node) 3563 { 3564 struct page *page; 3565 struct kmem_cache_node *n; 3566 3567 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 3568 3569 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); 3570 3571 BUG_ON(!page); 3572 if (page_to_nid(page) != node) { 3573 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 3574 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 3575 } 3576 3577 n = page->freelist; 3578 BUG_ON(!n); 3579 #ifdef CONFIG_SLUB_DEBUG 3580 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 3581 init_tracking(kmem_cache_node, n); 3582 #endif 3583 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 3584 page->freelist = get_freepointer(kmem_cache_node, n); 3585 page->inuse = 1; 3586 page->frozen = 0; 3587 kmem_cache_node->node[node] = n; 3588 init_kmem_cache_node(n); 3589 inc_slabs_node(kmem_cache_node, node, page->objects); 3590 3591 /* 3592 * No locks need to be taken here as it has just been 3593 * initialized and there is no concurrent access. 3594 */ 3595 __add_partial(n, page, DEACTIVATE_TO_HEAD); 3596 } 3597 3598 static void free_kmem_cache_nodes(struct kmem_cache *s) 3599 { 3600 int node; 3601 struct kmem_cache_node *n; 3602 3603 for_each_kmem_cache_node(s, node, n) { 3604 s->node[node] = NULL; 3605 kmem_cache_free(kmem_cache_node, n); 3606 } 3607 } 3608 3609 void __kmem_cache_release(struct kmem_cache *s) 3610 { 3611 cache_random_seq_destroy(s); 3612 free_percpu(s->cpu_slab); 3613 free_kmem_cache_nodes(s); 3614 } 3615 3616 static int init_kmem_cache_nodes(struct kmem_cache *s) 3617 { 3618 int node; 3619 3620 for_each_node_mask(node, slab_nodes) { 3621 struct kmem_cache_node *n; 3622 3623 if (slab_state == DOWN) { 3624 early_kmem_cache_node_alloc(node); 3625 continue; 3626 } 3627 n = kmem_cache_alloc_node(kmem_cache_node, 3628 GFP_KERNEL, node); 3629 3630 if (!n) { 3631 free_kmem_cache_nodes(s); 3632 return 0; 3633 } 3634 3635 init_kmem_cache_node(n); 3636 s->node[node] = n; 3637 } 3638 return 1; 3639 } 3640 3641 static void set_min_partial(struct kmem_cache *s, unsigned long min) 3642 { 3643 if (min < MIN_PARTIAL) 3644 min = MIN_PARTIAL; 3645 else if (min > MAX_PARTIAL) 3646 min = MAX_PARTIAL; 3647 s->min_partial = min; 3648 } 3649 3650 static void set_cpu_partial(struct kmem_cache *s) 3651 { 3652 #ifdef CONFIG_SLUB_CPU_PARTIAL 3653 /* 3654 * cpu_partial determined the maximum number of objects kept in the 3655 * per cpu partial lists of a processor. 3656 * 3657 * Per cpu partial lists mainly contain slabs that just have one 3658 * object freed. If they are used for allocation then they can be 3659 * filled up again with minimal effort. The slab will never hit the 3660 * per node partial lists and therefore no locking will be required. 3661 * 3662 * This setting also determines 3663 * 3664 * A) The number of objects from per cpu partial slabs dumped to the 3665 * per node list when we reach the limit. 3666 * B) The number of objects in cpu partial slabs to extract from the 3667 * per node list when we run out of per cpu objects. We only fetch 3668 * 50% to keep some capacity around for frees. 3669 */ 3670 if (!kmem_cache_has_cpu_partial(s)) 3671 slub_set_cpu_partial(s, 0); 3672 else if (s->size >= PAGE_SIZE) 3673 slub_set_cpu_partial(s, 2); 3674 else if (s->size >= 1024) 3675 slub_set_cpu_partial(s, 6); 3676 else if (s->size >= 256) 3677 slub_set_cpu_partial(s, 13); 3678 else 3679 slub_set_cpu_partial(s, 30); 3680 #endif 3681 } 3682 3683 /* 3684 * calculate_sizes() determines the order and the distribution of data within 3685 * a slab object. 3686 */ 3687 static int calculate_sizes(struct kmem_cache *s, int forced_order) 3688 { 3689 slab_flags_t flags = s->flags; 3690 unsigned int size = s->object_size; 3691 unsigned int freepointer_area; 3692 unsigned int order; 3693 3694 /* 3695 * Round up object size to the next word boundary. We can only 3696 * place the free pointer at word boundaries and this determines 3697 * the possible location of the free pointer. 3698 */ 3699 size = ALIGN(size, sizeof(void *)); 3700 /* 3701 * This is the area of the object where a freepointer can be 3702 * safely written. If redzoning adds more to the inuse size, we 3703 * can't use that portion for writing the freepointer, so 3704 * s->offset must be limited within this for the general case. 3705 */ 3706 freepointer_area = size; 3707 3708 #ifdef CONFIG_SLUB_DEBUG 3709 /* 3710 * Determine if we can poison the object itself. If the user of 3711 * the slab may touch the object after free or before allocation 3712 * then we should never poison the object itself. 3713 */ 3714 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 3715 !s->ctor) 3716 s->flags |= __OBJECT_POISON; 3717 else 3718 s->flags &= ~__OBJECT_POISON; 3719 3720 3721 /* 3722 * If we are Redzoning then check if there is some space between the 3723 * end of the object and the free pointer. If not then add an 3724 * additional word to have some bytes to store Redzone information. 3725 */ 3726 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 3727 size += sizeof(void *); 3728 #endif 3729 3730 /* 3731 * With that we have determined the number of bytes in actual use 3732 * by the object. This is the potential offset to the free pointer. 3733 */ 3734 s->inuse = size; 3735 3736 if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 3737 s->ctor)) { 3738 /* 3739 * Relocate free pointer after the object if it is not 3740 * permitted to overwrite the first word of the object on 3741 * kmem_cache_free. 3742 * 3743 * This is the case if we do RCU, have a constructor or 3744 * destructor or are poisoning the objects. 3745 * 3746 * The assumption that s->offset >= s->inuse means free 3747 * pointer is outside of the object is used in the 3748 * freeptr_outside_object() function. If that is no 3749 * longer true, the function needs to be modified. 3750 */ 3751 s->offset = size; 3752 size += sizeof(void *); 3753 } else if (freepointer_area > sizeof(void *)) { 3754 /* 3755 * Store freelist pointer near middle of object to keep 3756 * it away from the edges of the object to avoid small 3757 * sized over/underflows from neighboring allocations. 3758 */ 3759 s->offset = ALIGN(freepointer_area / 2, sizeof(void *)); 3760 } 3761 3762 #ifdef CONFIG_SLUB_DEBUG 3763 if (flags & SLAB_STORE_USER) 3764 /* 3765 * Need to store information about allocs and frees after 3766 * the object. 3767 */ 3768 size += 2 * sizeof(struct track); 3769 #endif 3770 3771 kasan_cache_create(s, &size, &s->flags); 3772 #ifdef CONFIG_SLUB_DEBUG 3773 if (flags & SLAB_RED_ZONE) { 3774 /* 3775 * Add some empty padding so that we can catch 3776 * overwrites from earlier objects rather than let 3777 * tracking information or the free pointer be 3778 * corrupted if a user writes before the start 3779 * of the object. 3780 */ 3781 size += sizeof(void *); 3782 3783 s->red_left_pad = sizeof(void *); 3784 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 3785 size += s->red_left_pad; 3786 } 3787 #endif 3788 3789 /* 3790 * SLUB stores one object immediately after another beginning from 3791 * offset 0. In order to align the objects we have to simply size 3792 * each object to conform to the alignment. 3793 */ 3794 size = ALIGN(size, s->align); 3795 s->size = size; 3796 s->reciprocal_size = reciprocal_value(size); 3797 if (forced_order >= 0) 3798 order = forced_order; 3799 else 3800 order = calculate_order(size); 3801 3802 if ((int)order < 0) 3803 return 0; 3804 3805 s->allocflags = 0; 3806 if (order) 3807 s->allocflags |= __GFP_COMP; 3808 3809 if (s->flags & SLAB_CACHE_DMA) 3810 s->allocflags |= GFP_DMA; 3811 3812 if (s->flags & SLAB_CACHE_DMA32) 3813 s->allocflags |= GFP_DMA32; 3814 3815 if (s->flags & SLAB_RECLAIM_ACCOUNT) 3816 s->allocflags |= __GFP_RECLAIMABLE; 3817 3818 /* 3819 * Determine the number of objects per slab 3820 */ 3821 s->oo = oo_make(order, size); 3822 s->min = oo_make(get_order(size), size); 3823 if (oo_objects(s->oo) > oo_objects(s->max)) 3824 s->max = s->oo; 3825 3826 return !!oo_objects(s->oo); 3827 } 3828 3829 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 3830 { 3831 #ifdef CONFIG_SLUB_DEBUG 3832 /* 3833 * If no slub_debug was enabled globally, the static key is not yet 3834 * enabled by setup_slub_debug(). Enable it if the cache is being 3835 * created with any of the debugging flags passed explicitly. 3836 */ 3837 if (flags & SLAB_DEBUG_FLAGS) 3838 static_branch_enable(&slub_debug_enabled); 3839 #endif 3840 s->flags = kmem_cache_flags(s->size, flags, s->name); 3841 #ifdef CONFIG_SLAB_FREELIST_HARDENED 3842 s->random = get_random_long(); 3843 #endif 3844 3845 if (!calculate_sizes(s, -1)) 3846 goto error; 3847 if (disable_higher_order_debug) { 3848 /* 3849 * Disable debugging flags that store metadata if the min slab 3850 * order increased. 3851 */ 3852 if (get_order(s->size) > get_order(s->object_size)) { 3853 s->flags &= ~DEBUG_METADATA_FLAGS; 3854 s->offset = 0; 3855 if (!calculate_sizes(s, -1)) 3856 goto error; 3857 } 3858 } 3859 3860 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 3861 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 3862 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) 3863 /* Enable fast mode */ 3864 s->flags |= __CMPXCHG_DOUBLE; 3865 #endif 3866 3867 /* 3868 * The larger the object size is, the more pages we want on the partial 3869 * list to avoid pounding the page allocator excessively. 3870 */ 3871 set_min_partial(s, ilog2(s->size) / 2); 3872 3873 set_cpu_partial(s); 3874 3875 #ifdef CONFIG_NUMA 3876 s->remote_node_defrag_ratio = 1000; 3877 #endif 3878 3879 /* Initialize the pre-computed randomized freelist if slab is up */ 3880 if (slab_state >= UP) { 3881 if (init_cache_random_seq(s)) 3882 goto error; 3883 } 3884 3885 if (!init_kmem_cache_nodes(s)) 3886 goto error; 3887 3888 if (alloc_kmem_cache_cpus(s)) 3889 return 0; 3890 3891 free_kmem_cache_nodes(s); 3892 error: 3893 return -EINVAL; 3894 } 3895 3896 static void list_slab_objects(struct kmem_cache *s, struct page *page, 3897 const char *text) 3898 { 3899 #ifdef CONFIG_SLUB_DEBUG 3900 void *addr = page_address(page); 3901 unsigned long *map; 3902 void *p; 3903 3904 slab_err(s, page, text, s->name); 3905 slab_lock(page); 3906 3907 map = get_map(s, page); 3908 for_each_object(p, s, addr, page->objects) { 3909 3910 if (!test_bit(__obj_to_index(s, addr, p), map)) { 3911 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 3912 print_tracking(s, p); 3913 } 3914 } 3915 put_map(map); 3916 slab_unlock(page); 3917 #endif 3918 } 3919 3920 /* 3921 * Attempt to free all partial slabs on a node. 3922 * This is called from __kmem_cache_shutdown(). We must take list_lock 3923 * because sysfs file might still access partial list after the shutdowning. 3924 */ 3925 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3926 { 3927 LIST_HEAD(discard); 3928 struct page *page, *h; 3929 3930 BUG_ON(irqs_disabled()); 3931 spin_lock_irq(&n->list_lock); 3932 list_for_each_entry_safe(page, h, &n->partial, slab_list) { 3933 if (!page->inuse) { 3934 remove_partial(n, page); 3935 list_add(&page->slab_list, &discard); 3936 } else { 3937 list_slab_objects(s, page, 3938 "Objects remaining in %s on __kmem_cache_shutdown()"); 3939 } 3940 } 3941 spin_unlock_irq(&n->list_lock); 3942 3943 list_for_each_entry_safe(page, h, &discard, slab_list) 3944 discard_slab(s, page); 3945 } 3946 3947 bool __kmem_cache_empty(struct kmem_cache *s) 3948 { 3949 int node; 3950 struct kmem_cache_node *n; 3951 3952 for_each_kmem_cache_node(s, node, n) 3953 if (n->nr_partial || slabs_node(s, node)) 3954 return false; 3955 return true; 3956 } 3957 3958 /* 3959 * Release all resources used by a slab cache. 3960 */ 3961 int __kmem_cache_shutdown(struct kmem_cache *s) 3962 { 3963 int node; 3964 struct kmem_cache_node *n; 3965 3966 flush_all(s); 3967 /* Attempt to free all objects */ 3968 for_each_kmem_cache_node(s, node, n) { 3969 free_partial(s, n); 3970 if (n->nr_partial || slabs_node(s, node)) 3971 return 1; 3972 } 3973 return 0; 3974 } 3975 3976 #ifdef CONFIG_PRINTK 3977 void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page) 3978 { 3979 void *base; 3980 int __maybe_unused i; 3981 unsigned int objnr; 3982 void *objp; 3983 void *objp0; 3984 struct kmem_cache *s = page->slab_cache; 3985 struct track __maybe_unused *trackp; 3986 3987 kpp->kp_ptr = object; 3988 kpp->kp_page = page; 3989 kpp->kp_slab_cache = s; 3990 base = page_address(page); 3991 objp0 = kasan_reset_tag(object); 3992 #ifdef CONFIG_SLUB_DEBUG 3993 objp = restore_red_left(s, objp0); 3994 #else 3995 objp = objp0; 3996 #endif 3997 objnr = obj_to_index(s, page, objp); 3998 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 3999 objp = base + s->size * objnr; 4000 kpp->kp_objp = objp; 4001 if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) || 4002 !(s->flags & SLAB_STORE_USER)) 4003 return; 4004 #ifdef CONFIG_SLUB_DEBUG 4005 trackp = get_track(s, objp, TRACK_ALLOC); 4006 kpp->kp_ret = (void *)trackp->addr; 4007 #ifdef CONFIG_STACKTRACE 4008 for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { 4009 kpp->kp_stack[i] = (void *)trackp->addrs[i]; 4010 if (!kpp->kp_stack[i]) 4011 break; 4012 } 4013 #endif 4014 #endif 4015 } 4016 #endif 4017 4018 /******************************************************************** 4019 * Kmalloc subsystem 4020 *******************************************************************/ 4021 4022 static int __init setup_slub_min_order(char *str) 4023 { 4024 get_option(&str, (int *)&slub_min_order); 4025 4026 return 1; 4027 } 4028 4029 __setup("slub_min_order=", setup_slub_min_order); 4030 4031 static int __init setup_slub_max_order(char *str) 4032 { 4033 get_option(&str, (int *)&slub_max_order); 4034 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); 4035 4036 return 1; 4037 } 4038 4039 __setup("slub_max_order=", setup_slub_max_order); 4040 4041 static int __init setup_slub_min_objects(char *str) 4042 { 4043 get_option(&str, (int *)&slub_min_objects); 4044 4045 return 1; 4046 } 4047 4048 __setup("slub_min_objects=", setup_slub_min_objects); 4049 4050 void *__kmalloc(size_t size, gfp_t flags) 4051 { 4052 struct kmem_cache *s; 4053 void *ret; 4054 4055 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4056 return kmalloc_large(size, flags); 4057 4058 s = kmalloc_slab(size, flags); 4059 4060 if (unlikely(ZERO_OR_NULL_PTR(s))) 4061 return s; 4062 4063 ret = slab_alloc(s, flags, _RET_IP_, size); 4064 4065 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 4066 4067 ret = kasan_kmalloc(s, ret, size, flags); 4068 4069 return ret; 4070 } 4071 EXPORT_SYMBOL(__kmalloc); 4072 4073 #ifdef CONFIG_NUMA 4074 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 4075 { 4076 struct page *page; 4077 void *ptr = NULL; 4078 unsigned int order = get_order(size); 4079 4080 flags |= __GFP_COMP; 4081 page = alloc_pages_node(node, flags, order); 4082 if (page) { 4083 ptr = page_address(page); 4084 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 4085 PAGE_SIZE << order); 4086 } 4087 4088 return kmalloc_large_node_hook(ptr, size, flags); 4089 } 4090 4091 void *__kmalloc_node(size_t size, gfp_t flags, int node) 4092 { 4093 struct kmem_cache *s; 4094 void *ret; 4095 4096 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4097 ret = kmalloc_large_node(size, flags, node); 4098 4099 trace_kmalloc_node(_RET_IP_, ret, 4100 size, PAGE_SIZE << get_order(size), 4101 flags, node); 4102 4103 return ret; 4104 } 4105 4106 s = kmalloc_slab(size, flags); 4107 4108 if (unlikely(ZERO_OR_NULL_PTR(s))) 4109 return s; 4110 4111 ret = slab_alloc_node(s, flags, node, _RET_IP_, size); 4112 4113 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 4114 4115 ret = kasan_kmalloc(s, ret, size, flags); 4116 4117 return ret; 4118 } 4119 EXPORT_SYMBOL(__kmalloc_node); 4120 #endif /* CONFIG_NUMA */ 4121 4122 #ifdef CONFIG_HARDENED_USERCOPY 4123 /* 4124 * Rejects incorrectly sized objects and objects that are to be copied 4125 * to/from userspace but do not fall entirely within the containing slab 4126 * cache's usercopy region. 4127 * 4128 * Returns NULL if check passes, otherwise const char * to name of cache 4129 * to indicate an error. 4130 */ 4131 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 4132 bool to_user) 4133 { 4134 struct kmem_cache *s; 4135 unsigned int offset; 4136 size_t object_size; 4137 bool is_kfence = is_kfence_address(ptr); 4138 4139 ptr = kasan_reset_tag(ptr); 4140 4141 /* Find object and usable object size. */ 4142 s = page->slab_cache; 4143 4144 /* Reject impossible pointers. */ 4145 if (ptr < page_address(page)) 4146 usercopy_abort("SLUB object not in SLUB page?!", NULL, 4147 to_user, 0, n); 4148 4149 /* Find offset within object. */ 4150 if (is_kfence) 4151 offset = ptr - kfence_object_start(ptr); 4152 else 4153 offset = (ptr - page_address(page)) % s->size; 4154 4155 /* Adjust for redzone and reject if within the redzone. */ 4156 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 4157 if (offset < s->red_left_pad) 4158 usercopy_abort("SLUB object in left red zone", 4159 s->name, to_user, offset, n); 4160 offset -= s->red_left_pad; 4161 } 4162 4163 /* Allow address range falling entirely within usercopy region. */ 4164 if (offset >= s->useroffset && 4165 offset - s->useroffset <= s->usersize && 4166 n <= s->useroffset - offset + s->usersize) 4167 return; 4168 4169 /* 4170 * If the copy is still within the allocated object, produce 4171 * a warning instead of rejecting the copy. This is intended 4172 * to be a temporary method to find any missing usercopy 4173 * whitelists. 4174 */ 4175 object_size = slab_ksize(s); 4176 if (usercopy_fallback && 4177 offset <= object_size && n <= object_size - offset) { 4178 usercopy_warn("SLUB object", s->name, to_user, offset, n); 4179 return; 4180 } 4181 4182 usercopy_abort("SLUB object", s->name, to_user, offset, n); 4183 } 4184 #endif /* CONFIG_HARDENED_USERCOPY */ 4185 4186 size_t __ksize(const void *object) 4187 { 4188 struct page *page; 4189 4190 if (unlikely(object == ZERO_SIZE_PTR)) 4191 return 0; 4192 4193 page = virt_to_head_page(object); 4194 4195 if (unlikely(!PageSlab(page))) { 4196 WARN_ON(!PageCompound(page)); 4197 return page_size(page); 4198 } 4199 4200 return slab_ksize(page->slab_cache); 4201 } 4202 EXPORT_SYMBOL(__ksize); 4203 4204 void kfree(const void *x) 4205 { 4206 struct page *page; 4207 void *object = (void *)x; 4208 4209 trace_kfree(_RET_IP_, x); 4210 4211 if (unlikely(ZERO_OR_NULL_PTR(x))) 4212 return; 4213 4214 page = virt_to_head_page(x); 4215 if (unlikely(!PageSlab(page))) { 4216 unsigned int order = compound_order(page); 4217 4218 BUG_ON(!PageCompound(page)); 4219 kfree_hook(object); 4220 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 4221 -(PAGE_SIZE << order)); 4222 __free_pages(page, order); 4223 return; 4224 } 4225 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); 4226 } 4227 EXPORT_SYMBOL(kfree); 4228 4229 #define SHRINK_PROMOTE_MAX 32 4230 4231 /* 4232 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 4233 * up most to the head of the partial lists. New allocations will then 4234 * fill those up and thus they can be removed from the partial lists. 4235 * 4236 * The slabs with the least items are placed last. This results in them 4237 * being allocated from last increasing the chance that the last objects 4238 * are freed in them. 4239 */ 4240 int __kmem_cache_shrink(struct kmem_cache *s) 4241 { 4242 int node; 4243 int i; 4244 struct kmem_cache_node *n; 4245 struct page *page; 4246 struct page *t; 4247 struct list_head discard; 4248 struct list_head promote[SHRINK_PROMOTE_MAX]; 4249 unsigned long flags; 4250 int ret = 0; 4251 4252 flush_all(s); 4253 for_each_kmem_cache_node(s, node, n) { 4254 INIT_LIST_HEAD(&discard); 4255 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 4256 INIT_LIST_HEAD(promote + i); 4257 4258 spin_lock_irqsave(&n->list_lock, flags); 4259 4260 /* 4261 * Build lists of slabs to discard or promote. 4262 * 4263 * Note that concurrent frees may occur while we hold the 4264 * list_lock. page->inuse here is the upper limit. 4265 */ 4266 list_for_each_entry_safe(page, t, &n->partial, slab_list) { 4267 int free = page->objects - page->inuse; 4268 4269 /* Do not reread page->inuse */ 4270 barrier(); 4271 4272 /* We do not keep full slabs on the list */ 4273 BUG_ON(free <= 0); 4274 4275 if (free == page->objects) { 4276 list_move(&page->slab_list, &discard); 4277 n->nr_partial--; 4278 } else if (free <= SHRINK_PROMOTE_MAX) 4279 list_move(&page->slab_list, promote + free - 1); 4280 } 4281 4282 /* 4283 * Promote the slabs filled up most to the head of the 4284 * partial list. 4285 */ 4286 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 4287 list_splice(promote + i, &n->partial); 4288 4289 spin_unlock_irqrestore(&n->list_lock, flags); 4290 4291 /* Release empty slabs */ 4292 list_for_each_entry_safe(page, t, &discard, slab_list) 4293 discard_slab(s, page); 4294 4295 if (slabs_node(s, node)) 4296 ret = 1; 4297 } 4298 4299 return ret; 4300 } 4301 4302 static int slab_mem_going_offline_callback(void *arg) 4303 { 4304 struct kmem_cache *s; 4305 4306 mutex_lock(&slab_mutex); 4307 list_for_each_entry(s, &slab_caches, list) 4308 __kmem_cache_shrink(s); 4309 mutex_unlock(&slab_mutex); 4310 4311 return 0; 4312 } 4313 4314 static void slab_mem_offline_callback(void *arg) 4315 { 4316 struct memory_notify *marg = arg; 4317 int offline_node; 4318 4319 offline_node = marg->status_change_nid_normal; 4320 4321 /* 4322 * If the node still has available memory. we need kmem_cache_node 4323 * for it yet. 4324 */ 4325 if (offline_node < 0) 4326 return; 4327 4328 mutex_lock(&slab_mutex); 4329 node_clear(offline_node, slab_nodes); 4330 /* 4331 * We no longer free kmem_cache_node structures here, as it would be 4332 * racy with all get_node() users, and infeasible to protect them with 4333 * slab_mutex. 4334 */ 4335 mutex_unlock(&slab_mutex); 4336 } 4337 4338 static int slab_mem_going_online_callback(void *arg) 4339 { 4340 struct kmem_cache_node *n; 4341 struct kmem_cache *s; 4342 struct memory_notify *marg = arg; 4343 int nid = marg->status_change_nid_normal; 4344 int ret = 0; 4345 4346 /* 4347 * If the node's memory is already available, then kmem_cache_node is 4348 * already created. Nothing to do. 4349 */ 4350 if (nid < 0) 4351 return 0; 4352 4353 /* 4354 * We are bringing a node online. No memory is available yet. We must 4355 * allocate a kmem_cache_node structure in order to bring the node 4356 * online. 4357 */ 4358 mutex_lock(&slab_mutex); 4359 list_for_each_entry(s, &slab_caches, list) { 4360 /* 4361 * The structure may already exist if the node was previously 4362 * onlined and offlined. 4363 */ 4364 if (get_node(s, nid)) 4365 continue; 4366 /* 4367 * XXX: kmem_cache_alloc_node will fallback to other nodes 4368 * since memory is not yet available from the node that 4369 * is brought up. 4370 */ 4371 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 4372 if (!n) { 4373 ret = -ENOMEM; 4374 goto out; 4375 } 4376 init_kmem_cache_node(n); 4377 s->node[nid] = n; 4378 } 4379 /* 4380 * Any cache created after this point will also have kmem_cache_node 4381 * initialized for the new node. 4382 */ 4383 node_set(nid, slab_nodes); 4384 out: 4385 mutex_unlock(&slab_mutex); 4386 return ret; 4387 } 4388 4389 static int slab_memory_callback(struct notifier_block *self, 4390 unsigned long action, void *arg) 4391 { 4392 int ret = 0; 4393 4394 switch (action) { 4395 case MEM_GOING_ONLINE: 4396 ret = slab_mem_going_online_callback(arg); 4397 break; 4398 case MEM_GOING_OFFLINE: 4399 ret = slab_mem_going_offline_callback(arg); 4400 break; 4401 case MEM_OFFLINE: 4402 case MEM_CANCEL_ONLINE: 4403 slab_mem_offline_callback(arg); 4404 break; 4405 case MEM_ONLINE: 4406 case MEM_CANCEL_OFFLINE: 4407 break; 4408 } 4409 if (ret) 4410 ret = notifier_from_errno(ret); 4411 else 4412 ret = NOTIFY_OK; 4413 return ret; 4414 } 4415 4416 static struct notifier_block slab_memory_callback_nb = { 4417 .notifier_call = slab_memory_callback, 4418 .priority = SLAB_CALLBACK_PRI, 4419 }; 4420 4421 /******************************************************************** 4422 * Basic setup of slabs 4423 *******************************************************************/ 4424 4425 /* 4426 * Used for early kmem_cache structures that were allocated using 4427 * the page allocator. Allocate them properly then fix up the pointers 4428 * that may be pointing to the wrong kmem_cache structure. 4429 */ 4430 4431 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 4432 { 4433 int node; 4434 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 4435 struct kmem_cache_node *n; 4436 4437 memcpy(s, static_cache, kmem_cache->object_size); 4438 4439 /* 4440 * This runs very early, and only the boot processor is supposed to be 4441 * up. Even if it weren't true, IRQs are not up so we couldn't fire 4442 * IPIs around. 4443 */ 4444 __flush_cpu_slab(s, smp_processor_id()); 4445 for_each_kmem_cache_node(s, node, n) { 4446 struct page *p; 4447 4448 list_for_each_entry(p, &n->partial, slab_list) 4449 p->slab_cache = s; 4450 4451 #ifdef CONFIG_SLUB_DEBUG 4452 list_for_each_entry(p, &n->full, slab_list) 4453 p->slab_cache = s; 4454 #endif 4455 } 4456 list_add(&s->list, &slab_caches); 4457 return s; 4458 } 4459 4460 void __init kmem_cache_init(void) 4461 { 4462 static __initdata struct kmem_cache boot_kmem_cache, 4463 boot_kmem_cache_node; 4464 int node; 4465 4466 if (debug_guardpage_minorder()) 4467 slub_max_order = 0; 4468 4469 kmem_cache_node = &boot_kmem_cache_node; 4470 kmem_cache = &boot_kmem_cache; 4471 4472 /* 4473 * Initialize the nodemask for which we will allocate per node 4474 * structures. Here we don't need taking slab_mutex yet. 4475 */ 4476 for_each_node_state(node, N_NORMAL_MEMORY) 4477 node_set(node, slab_nodes); 4478 4479 create_boot_cache(kmem_cache_node, "kmem_cache_node", 4480 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); 4481 4482 register_hotmemory_notifier(&slab_memory_callback_nb); 4483 4484 /* Able to allocate the per node structures */ 4485 slab_state = PARTIAL; 4486 4487 create_boot_cache(kmem_cache, "kmem_cache", 4488 offsetof(struct kmem_cache, node) + 4489 nr_node_ids * sizeof(struct kmem_cache_node *), 4490 SLAB_HWCACHE_ALIGN, 0, 0); 4491 4492 kmem_cache = bootstrap(&boot_kmem_cache); 4493 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 4494 4495 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 4496 setup_kmalloc_cache_index_table(); 4497 create_kmalloc_caches(0); 4498 4499 /* Setup random freelists for each cache */ 4500 init_freelist_randomization(); 4501 4502 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 4503 slub_cpu_dead); 4504 4505 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 4506 cache_line_size(), 4507 slub_min_order, slub_max_order, slub_min_objects, 4508 nr_cpu_ids, nr_node_ids); 4509 } 4510 4511 void __init kmem_cache_init_late(void) 4512 { 4513 } 4514 4515 struct kmem_cache * 4516 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 4517 slab_flags_t flags, void (*ctor)(void *)) 4518 { 4519 struct kmem_cache *s; 4520 4521 s = find_mergeable(size, align, flags, name, ctor); 4522 if (s) { 4523 s->refcount++; 4524 4525 /* 4526 * Adjust the object sizes so that we clear 4527 * the complete object on kzalloc. 4528 */ 4529 s->object_size = max(s->object_size, size); 4530 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 4531 4532 if (sysfs_slab_alias(s, name)) { 4533 s->refcount--; 4534 s = NULL; 4535 } 4536 } 4537 4538 return s; 4539 } 4540 4541 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 4542 { 4543 int err; 4544 4545 err = kmem_cache_open(s, flags); 4546 if (err) 4547 return err; 4548 4549 /* Mutex is not taken during early boot */ 4550 if (slab_state <= UP) 4551 return 0; 4552 4553 err = sysfs_slab_add(s); 4554 if (err) 4555 __kmem_cache_release(s); 4556 4557 return err; 4558 } 4559 4560 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 4561 { 4562 struct kmem_cache *s; 4563 void *ret; 4564 4565 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4566 return kmalloc_large(size, gfpflags); 4567 4568 s = kmalloc_slab(size, gfpflags); 4569 4570 if (unlikely(ZERO_OR_NULL_PTR(s))) 4571 return s; 4572 4573 ret = slab_alloc(s, gfpflags, caller, size); 4574 4575 /* Honor the call site pointer we received. */ 4576 trace_kmalloc(caller, ret, size, s->size, gfpflags); 4577 4578 return ret; 4579 } 4580 EXPORT_SYMBOL(__kmalloc_track_caller); 4581 4582 #ifdef CONFIG_NUMA 4583 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 4584 int node, unsigned long caller) 4585 { 4586 struct kmem_cache *s; 4587 void *ret; 4588 4589 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4590 ret = kmalloc_large_node(size, gfpflags, node); 4591 4592 trace_kmalloc_node(caller, ret, 4593 size, PAGE_SIZE << get_order(size), 4594 gfpflags, node); 4595 4596 return ret; 4597 } 4598 4599 s = kmalloc_slab(size, gfpflags); 4600 4601 if (unlikely(ZERO_OR_NULL_PTR(s))) 4602 return s; 4603 4604 ret = slab_alloc_node(s, gfpflags, node, caller, size); 4605 4606 /* Honor the call site pointer we received. */ 4607 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 4608 4609 return ret; 4610 } 4611 EXPORT_SYMBOL(__kmalloc_node_track_caller); 4612 #endif 4613 4614 #ifdef CONFIG_SYSFS 4615 static int count_inuse(struct page *page) 4616 { 4617 return page->inuse; 4618 } 4619 4620 static int count_total(struct page *page) 4621 { 4622 return page->objects; 4623 } 4624 #endif 4625 4626 #ifdef CONFIG_SLUB_DEBUG 4627 static void validate_slab(struct kmem_cache *s, struct page *page) 4628 { 4629 void *p; 4630 void *addr = page_address(page); 4631 unsigned long *map; 4632 4633 slab_lock(page); 4634 4635 if (!check_slab(s, page) || !on_freelist(s, page, NULL)) 4636 goto unlock; 4637 4638 /* Now we know that a valid freelist exists */ 4639 map = get_map(s, page); 4640 for_each_object(p, s, addr, page->objects) { 4641 u8 val = test_bit(__obj_to_index(s, addr, p), map) ? 4642 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 4643 4644 if (!check_object(s, page, p, val)) 4645 break; 4646 } 4647 put_map(map); 4648 unlock: 4649 slab_unlock(page); 4650 } 4651 4652 static int validate_slab_node(struct kmem_cache *s, 4653 struct kmem_cache_node *n) 4654 { 4655 unsigned long count = 0; 4656 struct page *page; 4657 unsigned long flags; 4658 4659 spin_lock_irqsave(&n->list_lock, flags); 4660 4661 list_for_each_entry(page, &n->partial, slab_list) { 4662 validate_slab(s, page); 4663 count++; 4664 } 4665 if (count != n->nr_partial) 4666 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 4667 s->name, count, n->nr_partial); 4668 4669 if (!(s->flags & SLAB_STORE_USER)) 4670 goto out; 4671 4672 list_for_each_entry(page, &n->full, slab_list) { 4673 validate_slab(s, page); 4674 count++; 4675 } 4676 if (count != atomic_long_read(&n->nr_slabs)) 4677 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 4678 s->name, count, atomic_long_read(&n->nr_slabs)); 4679 4680 out: 4681 spin_unlock_irqrestore(&n->list_lock, flags); 4682 return count; 4683 } 4684 4685 static long validate_slab_cache(struct kmem_cache *s) 4686 { 4687 int node; 4688 unsigned long count = 0; 4689 struct kmem_cache_node *n; 4690 4691 flush_all(s); 4692 for_each_kmem_cache_node(s, node, n) 4693 count += validate_slab_node(s, n); 4694 4695 return count; 4696 } 4697 /* 4698 * Generate lists of code addresses where slabcache objects are allocated 4699 * and freed. 4700 */ 4701 4702 struct location { 4703 unsigned long count; 4704 unsigned long addr; 4705 long long sum_time; 4706 long min_time; 4707 long max_time; 4708 long min_pid; 4709 long max_pid; 4710 DECLARE_BITMAP(cpus, NR_CPUS); 4711 nodemask_t nodes; 4712 }; 4713 4714 struct loc_track { 4715 unsigned long max; 4716 unsigned long count; 4717 struct location *loc; 4718 }; 4719 4720 static void free_loc_track(struct loc_track *t) 4721 { 4722 if (t->max) 4723 free_pages((unsigned long)t->loc, 4724 get_order(sizeof(struct location) * t->max)); 4725 } 4726 4727 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 4728 { 4729 struct location *l; 4730 int order; 4731 4732 order = get_order(sizeof(struct location) * max); 4733 4734 l = (void *)__get_free_pages(flags, order); 4735 if (!l) 4736 return 0; 4737 4738 if (t->count) { 4739 memcpy(l, t->loc, sizeof(struct location) * t->count); 4740 free_loc_track(t); 4741 } 4742 t->max = max; 4743 t->loc = l; 4744 return 1; 4745 } 4746 4747 static int add_location(struct loc_track *t, struct kmem_cache *s, 4748 const struct track *track) 4749 { 4750 long start, end, pos; 4751 struct location *l; 4752 unsigned long caddr; 4753 unsigned long age = jiffies - track->when; 4754 4755 start = -1; 4756 end = t->count; 4757 4758 for ( ; ; ) { 4759 pos = start + (end - start + 1) / 2; 4760 4761 /* 4762 * There is nothing at "end". If we end up there 4763 * we need to add something to before end. 4764 */ 4765 if (pos == end) 4766 break; 4767 4768 caddr = t->loc[pos].addr; 4769 if (track->addr == caddr) { 4770 4771 l = &t->loc[pos]; 4772 l->count++; 4773 if (track->when) { 4774 l->sum_time += age; 4775 if (age < l->min_time) 4776 l->min_time = age; 4777 if (age > l->max_time) 4778 l->max_time = age; 4779 4780 if (track->pid < l->min_pid) 4781 l->min_pid = track->pid; 4782 if (track->pid > l->max_pid) 4783 l->max_pid = track->pid; 4784 4785 cpumask_set_cpu(track->cpu, 4786 to_cpumask(l->cpus)); 4787 } 4788 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4789 return 1; 4790 } 4791 4792 if (track->addr < caddr) 4793 end = pos; 4794 else 4795 start = pos; 4796 } 4797 4798 /* 4799 * Not found. Insert new tracking element. 4800 */ 4801 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 4802 return 0; 4803 4804 l = t->loc + pos; 4805 if (pos < t->count) 4806 memmove(l + 1, l, 4807 (t->count - pos) * sizeof(struct location)); 4808 t->count++; 4809 l->count = 1; 4810 l->addr = track->addr; 4811 l->sum_time = age; 4812 l->min_time = age; 4813 l->max_time = age; 4814 l->min_pid = track->pid; 4815 l->max_pid = track->pid; 4816 cpumask_clear(to_cpumask(l->cpus)); 4817 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 4818 nodes_clear(l->nodes); 4819 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4820 return 1; 4821 } 4822 4823 static void process_slab(struct loc_track *t, struct kmem_cache *s, 4824 struct page *page, enum track_item alloc) 4825 { 4826 void *addr = page_address(page); 4827 void *p; 4828 unsigned long *map; 4829 4830 map = get_map(s, page); 4831 for_each_object(p, s, addr, page->objects) 4832 if (!test_bit(__obj_to_index(s, addr, p), map)) 4833 add_location(t, s, get_track(s, p, alloc)); 4834 put_map(map); 4835 } 4836 4837 static int list_locations(struct kmem_cache *s, char *buf, 4838 enum track_item alloc) 4839 { 4840 int len = 0; 4841 unsigned long i; 4842 struct loc_track t = { 0, 0, NULL }; 4843 int node; 4844 struct kmem_cache_node *n; 4845 4846 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 4847 GFP_KERNEL)) { 4848 return sysfs_emit(buf, "Out of memory\n"); 4849 } 4850 /* Push back cpu slabs */ 4851 flush_all(s); 4852 4853 for_each_kmem_cache_node(s, node, n) { 4854 unsigned long flags; 4855 struct page *page; 4856 4857 if (!atomic_long_read(&n->nr_slabs)) 4858 continue; 4859 4860 spin_lock_irqsave(&n->list_lock, flags); 4861 list_for_each_entry(page, &n->partial, slab_list) 4862 process_slab(&t, s, page, alloc); 4863 list_for_each_entry(page, &n->full, slab_list) 4864 process_slab(&t, s, page, alloc); 4865 spin_unlock_irqrestore(&n->list_lock, flags); 4866 } 4867 4868 for (i = 0; i < t.count; i++) { 4869 struct location *l = &t.loc[i]; 4870 4871 len += sysfs_emit_at(buf, len, "%7ld ", l->count); 4872 4873 if (l->addr) 4874 len += sysfs_emit_at(buf, len, "%pS", (void *)l->addr); 4875 else 4876 len += sysfs_emit_at(buf, len, "<not-available>"); 4877 4878 if (l->sum_time != l->min_time) 4879 len += sysfs_emit_at(buf, len, " age=%ld/%ld/%ld", 4880 l->min_time, 4881 (long)div_u64(l->sum_time, 4882 l->count), 4883 l->max_time); 4884 else 4885 len += sysfs_emit_at(buf, len, " age=%ld", l->min_time); 4886 4887 if (l->min_pid != l->max_pid) 4888 len += sysfs_emit_at(buf, len, " pid=%ld-%ld", 4889 l->min_pid, l->max_pid); 4890 else 4891 len += sysfs_emit_at(buf, len, " pid=%ld", 4892 l->min_pid); 4893 4894 if (num_online_cpus() > 1 && 4895 !cpumask_empty(to_cpumask(l->cpus))) 4896 len += sysfs_emit_at(buf, len, " cpus=%*pbl", 4897 cpumask_pr_args(to_cpumask(l->cpus))); 4898 4899 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 4900 len += sysfs_emit_at(buf, len, " nodes=%*pbl", 4901 nodemask_pr_args(&l->nodes)); 4902 4903 len += sysfs_emit_at(buf, len, "\n"); 4904 } 4905 4906 free_loc_track(&t); 4907 if (!t.count) 4908 len += sysfs_emit_at(buf, len, "No data\n"); 4909 4910 return len; 4911 } 4912 #endif /* CONFIG_SLUB_DEBUG */ 4913 4914 #ifdef SLUB_RESILIENCY_TEST 4915 static void __init resiliency_test(void) 4916 { 4917 u8 *p; 4918 int type = KMALLOC_NORMAL; 4919 4920 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); 4921 4922 pr_err("SLUB resiliency testing\n"); 4923 pr_err("-----------------------\n"); 4924 pr_err("A. Corruption after allocation\n"); 4925 4926 p = kzalloc(16, GFP_KERNEL); 4927 p[16] = 0x12; 4928 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n", 4929 p + 16); 4930 4931 validate_slab_cache(kmalloc_caches[type][4]); 4932 4933 /* Hmmm... The next two are dangerous */ 4934 p = kzalloc(32, GFP_KERNEL); 4935 p[32 + sizeof(void *)] = 0x34; 4936 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n", 4937 p); 4938 pr_err("If allocated object is overwritten then not detectable\n\n"); 4939 4940 validate_slab_cache(kmalloc_caches[type][5]); 4941 p = kzalloc(64, GFP_KERNEL); 4942 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 4943 *p = 0x56; 4944 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 4945 p); 4946 pr_err("If allocated object is overwritten then not detectable\n\n"); 4947 validate_slab_cache(kmalloc_caches[type][6]); 4948 4949 pr_err("\nB. Corruption after free\n"); 4950 p = kzalloc(128, GFP_KERNEL); 4951 kfree(p); 4952 *p = 0x78; 4953 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 4954 validate_slab_cache(kmalloc_caches[type][7]); 4955 4956 p = kzalloc(256, GFP_KERNEL); 4957 kfree(p); 4958 p[50] = 0x9a; 4959 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); 4960 validate_slab_cache(kmalloc_caches[type][8]); 4961 4962 p = kzalloc(512, GFP_KERNEL); 4963 kfree(p); 4964 p[512] = 0xab; 4965 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 4966 validate_slab_cache(kmalloc_caches[type][9]); 4967 } 4968 #else 4969 #ifdef CONFIG_SYSFS 4970 static void resiliency_test(void) {}; 4971 #endif 4972 #endif /* SLUB_RESILIENCY_TEST */ 4973 4974 #ifdef CONFIG_SYSFS 4975 enum slab_stat_type { 4976 SL_ALL, /* All slabs */ 4977 SL_PARTIAL, /* Only partially allocated slabs */ 4978 SL_CPU, /* Only slabs used for cpu caches */ 4979 SL_OBJECTS, /* Determine allocated objects not slabs */ 4980 SL_TOTAL /* Determine object capacity not slabs */ 4981 }; 4982 4983 #define SO_ALL (1 << SL_ALL) 4984 #define SO_PARTIAL (1 << SL_PARTIAL) 4985 #define SO_CPU (1 << SL_CPU) 4986 #define SO_OBJECTS (1 << SL_OBJECTS) 4987 #define SO_TOTAL (1 << SL_TOTAL) 4988 4989 static ssize_t show_slab_objects(struct kmem_cache *s, 4990 char *buf, unsigned long flags) 4991 { 4992 unsigned long total = 0; 4993 int node; 4994 int x; 4995 unsigned long *nodes; 4996 int len = 0; 4997 4998 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 4999 if (!nodes) 5000 return -ENOMEM; 5001 5002 if (flags & SO_CPU) { 5003 int cpu; 5004 5005 for_each_possible_cpu(cpu) { 5006 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 5007 cpu); 5008 int node; 5009 struct page *page; 5010 5011 page = READ_ONCE(c->page); 5012 if (!page) 5013 continue; 5014 5015 node = page_to_nid(page); 5016 if (flags & SO_TOTAL) 5017 x = page->objects; 5018 else if (flags & SO_OBJECTS) 5019 x = page->inuse; 5020 else 5021 x = 1; 5022 5023 total += x; 5024 nodes[node] += x; 5025 5026 page = slub_percpu_partial_read_once(c); 5027 if (page) { 5028 node = page_to_nid(page); 5029 if (flags & SO_TOTAL) 5030 WARN_ON_ONCE(1); 5031 else if (flags & SO_OBJECTS) 5032 WARN_ON_ONCE(1); 5033 else 5034 x = page->pages; 5035 total += x; 5036 nodes[node] += x; 5037 } 5038 } 5039 } 5040 5041 /* 5042 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 5043 * already held which will conflict with an existing lock order: 5044 * 5045 * mem_hotplug_lock->slab_mutex->kernfs_mutex 5046 * 5047 * We don't really need mem_hotplug_lock (to hold off 5048 * slab_mem_going_offline_callback) here because slab's memory hot 5049 * unplug code doesn't destroy the kmem_cache->node[] data. 5050 */ 5051 5052 #ifdef CONFIG_SLUB_DEBUG 5053 if (flags & SO_ALL) { 5054 struct kmem_cache_node *n; 5055 5056 for_each_kmem_cache_node(s, node, n) { 5057 5058 if (flags & SO_TOTAL) 5059 x = atomic_long_read(&n->total_objects); 5060 else if (flags & SO_OBJECTS) 5061 x = atomic_long_read(&n->total_objects) - 5062 count_partial(n, count_free); 5063 else 5064 x = atomic_long_read(&n->nr_slabs); 5065 total += x; 5066 nodes[node] += x; 5067 } 5068 5069 } else 5070 #endif 5071 if (flags & SO_PARTIAL) { 5072 struct kmem_cache_node *n; 5073 5074 for_each_kmem_cache_node(s, node, n) { 5075 if (flags & SO_TOTAL) 5076 x = count_partial(n, count_total); 5077 else if (flags & SO_OBJECTS) 5078 x = count_partial(n, count_inuse); 5079 else 5080 x = n->nr_partial; 5081 total += x; 5082 nodes[node] += x; 5083 } 5084 } 5085 5086 len += sysfs_emit_at(buf, len, "%lu", total); 5087 #ifdef CONFIG_NUMA 5088 for (node = 0; node < nr_node_ids; node++) { 5089 if (nodes[node]) 5090 len += sysfs_emit_at(buf, len, " N%d=%lu", 5091 node, nodes[node]); 5092 } 5093 #endif 5094 len += sysfs_emit_at(buf, len, "\n"); 5095 kfree(nodes); 5096 5097 return len; 5098 } 5099 5100 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 5101 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 5102 5103 struct slab_attribute { 5104 struct attribute attr; 5105 ssize_t (*show)(struct kmem_cache *s, char *buf); 5106 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 5107 }; 5108 5109 #define SLAB_ATTR_RO(_name) \ 5110 static struct slab_attribute _name##_attr = \ 5111 __ATTR(_name, 0400, _name##_show, NULL) 5112 5113 #define SLAB_ATTR(_name) \ 5114 static struct slab_attribute _name##_attr = \ 5115 __ATTR(_name, 0600, _name##_show, _name##_store) 5116 5117 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 5118 { 5119 return sysfs_emit(buf, "%u\n", s->size); 5120 } 5121 SLAB_ATTR_RO(slab_size); 5122 5123 static ssize_t align_show(struct kmem_cache *s, char *buf) 5124 { 5125 return sysfs_emit(buf, "%u\n", s->align); 5126 } 5127 SLAB_ATTR_RO(align); 5128 5129 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 5130 { 5131 return sysfs_emit(buf, "%u\n", s->object_size); 5132 } 5133 SLAB_ATTR_RO(object_size); 5134 5135 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 5136 { 5137 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 5138 } 5139 SLAB_ATTR_RO(objs_per_slab); 5140 5141 static ssize_t order_show(struct kmem_cache *s, char *buf) 5142 { 5143 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 5144 } 5145 SLAB_ATTR_RO(order); 5146 5147 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 5148 { 5149 return sysfs_emit(buf, "%lu\n", s->min_partial); 5150 } 5151 5152 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 5153 size_t length) 5154 { 5155 unsigned long min; 5156 int err; 5157 5158 err = kstrtoul(buf, 10, &min); 5159 if (err) 5160 return err; 5161 5162 set_min_partial(s, min); 5163 return length; 5164 } 5165 SLAB_ATTR(min_partial); 5166 5167 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 5168 { 5169 return sysfs_emit(buf, "%u\n", slub_cpu_partial(s)); 5170 } 5171 5172 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 5173 size_t length) 5174 { 5175 unsigned int objects; 5176 int err; 5177 5178 err = kstrtouint(buf, 10, &objects); 5179 if (err) 5180 return err; 5181 if (objects && !kmem_cache_has_cpu_partial(s)) 5182 return -EINVAL; 5183 5184 slub_set_cpu_partial(s, objects); 5185 flush_all(s); 5186 return length; 5187 } 5188 SLAB_ATTR(cpu_partial); 5189 5190 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 5191 { 5192 if (!s->ctor) 5193 return 0; 5194 return sysfs_emit(buf, "%pS\n", s->ctor); 5195 } 5196 SLAB_ATTR_RO(ctor); 5197 5198 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 5199 { 5200 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 5201 } 5202 SLAB_ATTR_RO(aliases); 5203 5204 static ssize_t partial_show(struct kmem_cache *s, char *buf) 5205 { 5206 return show_slab_objects(s, buf, SO_PARTIAL); 5207 } 5208 SLAB_ATTR_RO(partial); 5209 5210 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 5211 { 5212 return show_slab_objects(s, buf, SO_CPU); 5213 } 5214 SLAB_ATTR_RO(cpu_slabs); 5215 5216 static ssize_t objects_show(struct kmem_cache *s, char *buf) 5217 { 5218 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 5219 } 5220 SLAB_ATTR_RO(objects); 5221 5222 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 5223 { 5224 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 5225 } 5226 SLAB_ATTR_RO(objects_partial); 5227 5228 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 5229 { 5230 int objects = 0; 5231 int pages = 0; 5232 int cpu; 5233 int len = 0; 5234 5235 for_each_online_cpu(cpu) { 5236 struct page *page; 5237 5238 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5239 5240 if (page) { 5241 pages += page->pages; 5242 objects += page->pobjects; 5243 } 5244 } 5245 5246 len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages); 5247 5248 #ifdef CONFIG_SMP 5249 for_each_online_cpu(cpu) { 5250 struct page *page; 5251 5252 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5253 if (page) 5254 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 5255 cpu, page->pobjects, page->pages); 5256 } 5257 #endif 5258 len += sysfs_emit_at(buf, len, "\n"); 5259 5260 return len; 5261 } 5262 SLAB_ATTR_RO(slabs_cpu_partial); 5263 5264 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 5265 { 5266 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 5267 } 5268 SLAB_ATTR_RO(reclaim_account); 5269 5270 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 5271 { 5272 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 5273 } 5274 SLAB_ATTR_RO(hwcache_align); 5275 5276 #ifdef CONFIG_ZONE_DMA 5277 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 5278 { 5279 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 5280 } 5281 SLAB_ATTR_RO(cache_dma); 5282 #endif 5283 5284 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 5285 { 5286 return sysfs_emit(buf, "%u\n", s->usersize); 5287 } 5288 SLAB_ATTR_RO(usersize); 5289 5290 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 5291 { 5292 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 5293 } 5294 SLAB_ATTR_RO(destroy_by_rcu); 5295 5296 #ifdef CONFIG_SLUB_DEBUG 5297 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 5298 { 5299 return show_slab_objects(s, buf, SO_ALL); 5300 } 5301 SLAB_ATTR_RO(slabs); 5302 5303 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 5304 { 5305 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 5306 } 5307 SLAB_ATTR_RO(total_objects); 5308 5309 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 5310 { 5311 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 5312 } 5313 SLAB_ATTR_RO(sanity_checks); 5314 5315 static ssize_t trace_show(struct kmem_cache *s, char *buf) 5316 { 5317 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 5318 } 5319 SLAB_ATTR_RO(trace); 5320 5321 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 5322 { 5323 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 5324 } 5325 5326 SLAB_ATTR_RO(red_zone); 5327 5328 static ssize_t poison_show(struct kmem_cache *s, char *buf) 5329 { 5330 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 5331 } 5332 5333 SLAB_ATTR_RO(poison); 5334 5335 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 5336 { 5337 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 5338 } 5339 5340 SLAB_ATTR_RO(store_user); 5341 5342 static ssize_t validate_show(struct kmem_cache *s, char *buf) 5343 { 5344 return 0; 5345 } 5346 5347 static ssize_t validate_store(struct kmem_cache *s, 5348 const char *buf, size_t length) 5349 { 5350 int ret = -EINVAL; 5351 5352 if (buf[0] == '1') { 5353 ret = validate_slab_cache(s); 5354 if (ret >= 0) 5355 ret = length; 5356 } 5357 return ret; 5358 } 5359 SLAB_ATTR(validate); 5360 5361 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 5362 { 5363 if (!(s->flags & SLAB_STORE_USER)) 5364 return -ENOSYS; 5365 return list_locations(s, buf, TRACK_ALLOC); 5366 } 5367 SLAB_ATTR_RO(alloc_calls); 5368 5369 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 5370 { 5371 if (!(s->flags & SLAB_STORE_USER)) 5372 return -ENOSYS; 5373 return list_locations(s, buf, TRACK_FREE); 5374 } 5375 SLAB_ATTR_RO(free_calls); 5376 #endif /* CONFIG_SLUB_DEBUG */ 5377 5378 #ifdef CONFIG_FAILSLAB 5379 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 5380 { 5381 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 5382 } 5383 SLAB_ATTR_RO(failslab); 5384 #endif 5385 5386 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 5387 { 5388 return 0; 5389 } 5390 5391 static ssize_t shrink_store(struct kmem_cache *s, 5392 const char *buf, size_t length) 5393 { 5394 if (buf[0] == '1') 5395 kmem_cache_shrink(s); 5396 else 5397 return -EINVAL; 5398 return length; 5399 } 5400 SLAB_ATTR(shrink); 5401 5402 #ifdef CONFIG_NUMA 5403 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 5404 { 5405 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 5406 } 5407 5408 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 5409 const char *buf, size_t length) 5410 { 5411 unsigned int ratio; 5412 int err; 5413 5414 err = kstrtouint(buf, 10, &ratio); 5415 if (err) 5416 return err; 5417 if (ratio > 100) 5418 return -ERANGE; 5419 5420 s->remote_node_defrag_ratio = ratio * 10; 5421 5422 return length; 5423 } 5424 SLAB_ATTR(remote_node_defrag_ratio); 5425 #endif 5426 5427 #ifdef CONFIG_SLUB_STATS 5428 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 5429 { 5430 unsigned long sum = 0; 5431 int cpu; 5432 int len = 0; 5433 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 5434 5435 if (!data) 5436 return -ENOMEM; 5437 5438 for_each_online_cpu(cpu) { 5439 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 5440 5441 data[cpu] = x; 5442 sum += x; 5443 } 5444 5445 len += sysfs_emit_at(buf, len, "%lu", sum); 5446 5447 #ifdef CONFIG_SMP 5448 for_each_online_cpu(cpu) { 5449 if (data[cpu]) 5450 len += sysfs_emit_at(buf, len, " C%d=%u", 5451 cpu, data[cpu]); 5452 } 5453 #endif 5454 kfree(data); 5455 len += sysfs_emit_at(buf, len, "\n"); 5456 5457 return len; 5458 } 5459 5460 static void clear_stat(struct kmem_cache *s, enum stat_item si) 5461 { 5462 int cpu; 5463 5464 for_each_online_cpu(cpu) 5465 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 5466 } 5467 5468 #define STAT_ATTR(si, text) \ 5469 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 5470 { \ 5471 return show_stat(s, buf, si); \ 5472 } \ 5473 static ssize_t text##_store(struct kmem_cache *s, \ 5474 const char *buf, size_t length) \ 5475 { \ 5476 if (buf[0] != '0') \ 5477 return -EINVAL; \ 5478 clear_stat(s, si); \ 5479 return length; \ 5480 } \ 5481 SLAB_ATTR(text); \ 5482 5483 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 5484 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 5485 STAT_ATTR(FREE_FASTPATH, free_fastpath); 5486 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 5487 STAT_ATTR(FREE_FROZEN, free_frozen); 5488 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 5489 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 5490 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 5491 STAT_ATTR(ALLOC_SLAB, alloc_slab); 5492 STAT_ATTR(ALLOC_REFILL, alloc_refill); 5493 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 5494 STAT_ATTR(FREE_SLAB, free_slab); 5495 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 5496 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 5497 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 5498 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 5499 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 5500 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 5501 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 5502 STAT_ATTR(ORDER_FALLBACK, order_fallback); 5503 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 5504 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 5505 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 5506 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 5507 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 5508 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 5509 #endif /* CONFIG_SLUB_STATS */ 5510 5511 static struct attribute *slab_attrs[] = { 5512 &slab_size_attr.attr, 5513 &object_size_attr.attr, 5514 &objs_per_slab_attr.attr, 5515 &order_attr.attr, 5516 &min_partial_attr.attr, 5517 &cpu_partial_attr.attr, 5518 &objects_attr.attr, 5519 &objects_partial_attr.attr, 5520 &partial_attr.attr, 5521 &cpu_slabs_attr.attr, 5522 &ctor_attr.attr, 5523 &aliases_attr.attr, 5524 &align_attr.attr, 5525 &hwcache_align_attr.attr, 5526 &reclaim_account_attr.attr, 5527 &destroy_by_rcu_attr.attr, 5528 &shrink_attr.attr, 5529 &slabs_cpu_partial_attr.attr, 5530 #ifdef CONFIG_SLUB_DEBUG 5531 &total_objects_attr.attr, 5532 &slabs_attr.attr, 5533 &sanity_checks_attr.attr, 5534 &trace_attr.attr, 5535 &red_zone_attr.attr, 5536 &poison_attr.attr, 5537 &store_user_attr.attr, 5538 &validate_attr.attr, 5539 &alloc_calls_attr.attr, 5540 &free_calls_attr.attr, 5541 #endif 5542 #ifdef CONFIG_ZONE_DMA 5543 &cache_dma_attr.attr, 5544 #endif 5545 #ifdef CONFIG_NUMA 5546 &remote_node_defrag_ratio_attr.attr, 5547 #endif 5548 #ifdef CONFIG_SLUB_STATS 5549 &alloc_fastpath_attr.attr, 5550 &alloc_slowpath_attr.attr, 5551 &free_fastpath_attr.attr, 5552 &free_slowpath_attr.attr, 5553 &free_frozen_attr.attr, 5554 &free_add_partial_attr.attr, 5555 &free_remove_partial_attr.attr, 5556 &alloc_from_partial_attr.attr, 5557 &alloc_slab_attr.attr, 5558 &alloc_refill_attr.attr, 5559 &alloc_node_mismatch_attr.attr, 5560 &free_slab_attr.attr, 5561 &cpuslab_flush_attr.attr, 5562 &deactivate_full_attr.attr, 5563 &deactivate_empty_attr.attr, 5564 &deactivate_to_head_attr.attr, 5565 &deactivate_to_tail_attr.attr, 5566 &deactivate_remote_frees_attr.attr, 5567 &deactivate_bypass_attr.attr, 5568 &order_fallback_attr.attr, 5569 &cmpxchg_double_fail_attr.attr, 5570 &cmpxchg_double_cpu_fail_attr.attr, 5571 &cpu_partial_alloc_attr.attr, 5572 &cpu_partial_free_attr.attr, 5573 &cpu_partial_node_attr.attr, 5574 &cpu_partial_drain_attr.attr, 5575 #endif 5576 #ifdef CONFIG_FAILSLAB 5577 &failslab_attr.attr, 5578 #endif 5579 &usersize_attr.attr, 5580 5581 NULL 5582 }; 5583 5584 static const struct attribute_group slab_attr_group = { 5585 .attrs = slab_attrs, 5586 }; 5587 5588 static ssize_t slab_attr_show(struct kobject *kobj, 5589 struct attribute *attr, 5590 char *buf) 5591 { 5592 struct slab_attribute *attribute; 5593 struct kmem_cache *s; 5594 int err; 5595 5596 attribute = to_slab_attr(attr); 5597 s = to_slab(kobj); 5598 5599 if (!attribute->show) 5600 return -EIO; 5601 5602 err = attribute->show(s, buf); 5603 5604 return err; 5605 } 5606 5607 static ssize_t slab_attr_store(struct kobject *kobj, 5608 struct attribute *attr, 5609 const char *buf, size_t len) 5610 { 5611 struct slab_attribute *attribute; 5612 struct kmem_cache *s; 5613 int err; 5614 5615 attribute = to_slab_attr(attr); 5616 s = to_slab(kobj); 5617 5618 if (!attribute->store) 5619 return -EIO; 5620 5621 err = attribute->store(s, buf, len); 5622 return err; 5623 } 5624 5625 static void kmem_cache_release(struct kobject *k) 5626 { 5627 slab_kmem_cache_release(to_slab(k)); 5628 } 5629 5630 static const struct sysfs_ops slab_sysfs_ops = { 5631 .show = slab_attr_show, 5632 .store = slab_attr_store, 5633 }; 5634 5635 static struct kobj_type slab_ktype = { 5636 .sysfs_ops = &slab_sysfs_ops, 5637 .release = kmem_cache_release, 5638 }; 5639 5640 static struct kset *slab_kset; 5641 5642 static inline struct kset *cache_kset(struct kmem_cache *s) 5643 { 5644 return slab_kset; 5645 } 5646 5647 #define ID_STR_LENGTH 64 5648 5649 /* Create a unique string id for a slab cache: 5650 * 5651 * Format :[flags-]size 5652 */ 5653 static char *create_unique_id(struct kmem_cache *s) 5654 { 5655 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 5656 char *p = name; 5657 5658 BUG_ON(!name); 5659 5660 *p++ = ':'; 5661 /* 5662 * First flags affecting slabcache operations. We will only 5663 * get here for aliasable slabs so we do not need to support 5664 * too many flags. The flags here must cover all flags that 5665 * are matched during merging to guarantee that the id is 5666 * unique. 5667 */ 5668 if (s->flags & SLAB_CACHE_DMA) 5669 *p++ = 'd'; 5670 if (s->flags & SLAB_CACHE_DMA32) 5671 *p++ = 'D'; 5672 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5673 *p++ = 'a'; 5674 if (s->flags & SLAB_CONSISTENCY_CHECKS) 5675 *p++ = 'F'; 5676 if (s->flags & SLAB_ACCOUNT) 5677 *p++ = 'A'; 5678 if (p != name + 1) 5679 *p++ = '-'; 5680 p += sprintf(p, "%07u", s->size); 5681 5682 BUG_ON(p > name + ID_STR_LENGTH - 1); 5683 return name; 5684 } 5685 5686 static int sysfs_slab_add(struct kmem_cache *s) 5687 { 5688 int err; 5689 const char *name; 5690 struct kset *kset = cache_kset(s); 5691 int unmergeable = slab_unmergeable(s); 5692 5693 if (!kset) { 5694 kobject_init(&s->kobj, &slab_ktype); 5695 return 0; 5696 } 5697 5698 if (!unmergeable && disable_higher_order_debug && 5699 (slub_debug & DEBUG_METADATA_FLAGS)) 5700 unmergeable = 1; 5701 5702 if (unmergeable) { 5703 /* 5704 * Slabcache can never be merged so we can use the name proper. 5705 * This is typically the case for debug situations. In that 5706 * case we can catch duplicate names easily. 5707 */ 5708 sysfs_remove_link(&slab_kset->kobj, s->name); 5709 name = s->name; 5710 } else { 5711 /* 5712 * Create a unique name for the slab as a target 5713 * for the symlinks. 5714 */ 5715 name = create_unique_id(s); 5716 } 5717 5718 s->kobj.kset = kset; 5719 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 5720 if (err) 5721 goto out; 5722 5723 err = sysfs_create_group(&s->kobj, &slab_attr_group); 5724 if (err) 5725 goto out_del_kobj; 5726 5727 if (!unmergeable) { 5728 /* Setup first alias */ 5729 sysfs_slab_alias(s, s->name); 5730 } 5731 out: 5732 if (!unmergeable) 5733 kfree(name); 5734 return err; 5735 out_del_kobj: 5736 kobject_del(&s->kobj); 5737 goto out; 5738 } 5739 5740 void sysfs_slab_unlink(struct kmem_cache *s) 5741 { 5742 if (slab_state >= FULL) 5743 kobject_del(&s->kobj); 5744 } 5745 5746 void sysfs_slab_release(struct kmem_cache *s) 5747 { 5748 if (slab_state >= FULL) 5749 kobject_put(&s->kobj); 5750 } 5751 5752 /* 5753 * Need to buffer aliases during bootup until sysfs becomes 5754 * available lest we lose that information. 5755 */ 5756 struct saved_alias { 5757 struct kmem_cache *s; 5758 const char *name; 5759 struct saved_alias *next; 5760 }; 5761 5762 static struct saved_alias *alias_list; 5763 5764 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 5765 { 5766 struct saved_alias *al; 5767 5768 if (slab_state == FULL) { 5769 /* 5770 * If we have a leftover link then remove it. 5771 */ 5772 sysfs_remove_link(&slab_kset->kobj, name); 5773 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 5774 } 5775 5776 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 5777 if (!al) 5778 return -ENOMEM; 5779 5780 al->s = s; 5781 al->name = name; 5782 al->next = alias_list; 5783 alias_list = al; 5784 return 0; 5785 } 5786 5787 static int __init slab_sysfs_init(void) 5788 { 5789 struct kmem_cache *s; 5790 int err; 5791 5792 mutex_lock(&slab_mutex); 5793 5794 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 5795 if (!slab_kset) { 5796 mutex_unlock(&slab_mutex); 5797 pr_err("Cannot register slab subsystem.\n"); 5798 return -ENOSYS; 5799 } 5800 5801 slab_state = FULL; 5802 5803 list_for_each_entry(s, &slab_caches, list) { 5804 err = sysfs_slab_add(s); 5805 if (err) 5806 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 5807 s->name); 5808 } 5809 5810 while (alias_list) { 5811 struct saved_alias *al = alias_list; 5812 5813 alias_list = alias_list->next; 5814 err = sysfs_slab_alias(al->s, al->name); 5815 if (err) 5816 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 5817 al->name); 5818 kfree(al); 5819 } 5820 5821 mutex_unlock(&slab_mutex); 5822 resiliency_test(); 5823 return 0; 5824 } 5825 5826 __initcall(slab_sysfs_init); 5827 #endif /* CONFIG_SYSFS */ 5828 5829 /* 5830 * The /proc/slabinfo ABI 5831 */ 5832 #ifdef CONFIG_SLUB_DEBUG 5833 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 5834 { 5835 unsigned long nr_slabs = 0; 5836 unsigned long nr_objs = 0; 5837 unsigned long nr_free = 0; 5838 int node; 5839 struct kmem_cache_node *n; 5840 5841 for_each_kmem_cache_node(s, node, n) { 5842 nr_slabs += node_nr_slabs(n); 5843 nr_objs += node_nr_objs(n); 5844 nr_free += count_partial(n, count_free); 5845 } 5846 5847 sinfo->active_objs = nr_objs - nr_free; 5848 sinfo->num_objs = nr_objs; 5849 sinfo->active_slabs = nr_slabs; 5850 sinfo->num_slabs = nr_slabs; 5851 sinfo->objects_per_slab = oo_objects(s->oo); 5852 sinfo->cache_order = oo_order(s->oo); 5853 } 5854 5855 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) 5856 { 5857 } 5858 5859 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 5860 size_t count, loff_t *ppos) 5861 { 5862 return -EIO; 5863 } 5864 #endif /* CONFIG_SLUB_DEBUG */ 5865