1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/kmsan.h> 26 #include <linux/cpu.h> 27 #include <linux/cpuset.h> 28 #include <linux/mempolicy.h> 29 #include <linux/ctype.h> 30 #include <linux/stackdepot.h> 31 #include <linux/debugobjects.h> 32 #include <linux/kallsyms.h> 33 #include <linux/kfence.h> 34 #include <linux/memory.h> 35 #include <linux/math64.h> 36 #include <linux/fault-inject.h> 37 #include <linux/stacktrace.h> 38 #include <linux/prefetch.h> 39 #include <linux/memcontrol.h> 40 #include <linux/random.h> 41 #include <kunit/test.h> 42 #include <kunit/test-bug.h> 43 #include <linux/sort.h> 44 45 #include <linux/debugfs.h> 46 #include <trace/events/kmem.h> 47 48 #include "internal.h" 49 50 /* 51 * Lock order: 52 * 1. slab_mutex (Global Mutex) 53 * 2. node->list_lock (Spinlock) 54 * 3. kmem_cache->cpu_slab->lock (Local lock) 55 * 4. slab_lock(slab) (Only on some arches) 56 * 5. object_map_lock (Only for debugging) 57 * 58 * slab_mutex 59 * 60 * The role of the slab_mutex is to protect the list of all the slabs 61 * and to synchronize major metadata changes to slab cache structures. 62 * Also synchronizes memory hotplug callbacks. 63 * 64 * slab_lock 65 * 66 * The slab_lock is a wrapper around the page lock, thus it is a bit 67 * spinlock. 68 * 69 * The slab_lock is only used on arches that do not have the ability 70 * to do a cmpxchg_double. It only protects: 71 * 72 * A. slab->freelist -> List of free objects in a slab 73 * B. slab->inuse -> Number of objects in use 74 * C. slab->objects -> Number of objects in slab 75 * D. slab->frozen -> frozen state 76 * 77 * Frozen slabs 78 * 79 * If a slab is frozen then it is exempt from list management. It is not 80 * on any list except per cpu partial list. The processor that froze the 81 * slab is the one who can perform list operations on the slab. Other 82 * processors may put objects onto the freelist but the processor that 83 * froze the slab is the only one that can retrieve the objects from the 84 * slab's freelist. 85 * 86 * list_lock 87 * 88 * The list_lock protects the partial and full list on each node and 89 * the partial slab counter. If taken then no new slabs may be added or 90 * removed from the lists nor make the number of partial slabs be modified. 91 * (Note that the total number of slabs is an atomic value that may be 92 * modified without taking the list lock). 93 * 94 * The list_lock is a centralized lock and thus we avoid taking it as 95 * much as possible. As long as SLUB does not have to handle partial 96 * slabs, operations can continue without any centralized lock. F.e. 97 * allocating a long series of objects that fill up slabs does not require 98 * the list lock. 99 * 100 * For debug caches, all allocations are forced to go through a list_lock 101 * protected region to serialize against concurrent validation. 102 * 103 * cpu_slab->lock local lock 104 * 105 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 106 * except the stat counters. This is a percpu structure manipulated only by 107 * the local cpu, so the lock protects against being preempted or interrupted 108 * by an irq. Fast path operations rely on lockless operations instead. 109 * 110 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption 111 * which means the lockless fastpath cannot be used as it might interfere with 112 * an in-progress slow path operations. In this case the local lock is always 113 * taken but it still utilizes the freelist for the common operations. 114 * 115 * lockless fastpaths 116 * 117 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 118 * are fully lockless when satisfied from the percpu slab (and when 119 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 120 * They also don't disable preemption or migration or irqs. They rely on 121 * the transaction id (tid) field to detect being preempted or moved to 122 * another cpu. 123 * 124 * irq, preemption, migration considerations 125 * 126 * Interrupts are disabled as part of list_lock or local_lock operations, or 127 * around the slab_lock operation, in order to make the slab allocator safe 128 * to use in the context of an irq. 129 * 130 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 131 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 132 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 133 * doesn't have to be revalidated in each section protected by the local lock. 134 * 135 * SLUB assigns one slab for allocation to each processor. 136 * Allocations only occur from these slabs called cpu slabs. 137 * 138 * Slabs with free elements are kept on a partial list and during regular 139 * operations no list for full slabs is used. If an object in a full slab is 140 * freed then the slab will show up again on the partial lists. 141 * We track full slabs for debugging purposes though because otherwise we 142 * cannot scan all objects. 143 * 144 * Slabs are freed when they become empty. Teardown and setup is 145 * minimal so we rely on the page allocators per cpu caches for 146 * fast frees and allocs. 147 * 148 * slab->frozen The slab is frozen and exempt from list processing. 149 * This means that the slab is dedicated to a purpose 150 * such as satisfying allocations for a specific 151 * processor. Objects may be freed in the slab while 152 * it is frozen but slab_free will then skip the usual 153 * list operations. It is up to the processor holding 154 * the slab to integrate the slab into the slab lists 155 * when the slab is no longer needed. 156 * 157 * One use of this flag is to mark slabs that are 158 * used for allocations. Then such a slab becomes a cpu 159 * slab. The cpu slab may be equipped with an additional 160 * freelist that allows lockless access to 161 * free objects in addition to the regular freelist 162 * that requires the slab lock. 163 * 164 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 165 * options set. This moves slab handling out of 166 * the fast path and disables lockless freelists. 167 */ 168 169 /* 170 * We could simply use migrate_disable()/enable() but as long as it's a 171 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 172 */ 173 #ifndef CONFIG_PREEMPT_RT 174 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 175 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 176 #define USE_LOCKLESS_FAST_PATH() (true) 177 #else 178 #define slub_get_cpu_ptr(var) \ 179 ({ \ 180 migrate_disable(); \ 181 this_cpu_ptr(var); \ 182 }) 183 #define slub_put_cpu_ptr(var) \ 184 do { \ 185 (void)(var); \ 186 migrate_enable(); \ 187 } while (0) 188 #define USE_LOCKLESS_FAST_PATH() (false) 189 #endif 190 191 #ifndef CONFIG_SLUB_TINY 192 #define __fastpath_inline __always_inline 193 #else 194 #define __fastpath_inline 195 #endif 196 197 #ifdef CONFIG_SLUB_DEBUG 198 #ifdef CONFIG_SLUB_DEBUG_ON 199 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 200 #else 201 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 202 #endif 203 #endif /* CONFIG_SLUB_DEBUG */ 204 205 /* Structure holding parameters for get_partial() call chain */ 206 struct partial_context { 207 struct slab **slab; 208 gfp_t flags; 209 unsigned int orig_size; 210 }; 211 212 static inline bool kmem_cache_debug(struct kmem_cache *s) 213 { 214 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 215 } 216 217 static inline bool slub_debug_orig_size(struct kmem_cache *s) 218 { 219 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) && 220 (s->flags & SLAB_KMALLOC)); 221 } 222 223 void *fixup_red_left(struct kmem_cache *s, void *p) 224 { 225 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 226 p += s->red_left_pad; 227 228 return p; 229 } 230 231 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 232 { 233 #ifdef CONFIG_SLUB_CPU_PARTIAL 234 return !kmem_cache_debug(s); 235 #else 236 return false; 237 #endif 238 } 239 240 /* 241 * Issues still to be resolved: 242 * 243 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 244 * 245 * - Variable sizing of the per node arrays 246 */ 247 248 /* Enable to log cmpxchg failures */ 249 #undef SLUB_DEBUG_CMPXCHG 250 251 #ifndef CONFIG_SLUB_TINY 252 /* 253 * Minimum number of partial slabs. These will be left on the partial 254 * lists even if they are empty. kmem_cache_shrink may reclaim them. 255 */ 256 #define MIN_PARTIAL 5 257 258 /* 259 * Maximum number of desirable partial slabs. 260 * The existence of more partial slabs makes kmem_cache_shrink 261 * sort the partial list by the number of objects in use. 262 */ 263 #define MAX_PARTIAL 10 264 #else 265 #define MIN_PARTIAL 0 266 #define MAX_PARTIAL 0 267 #endif 268 269 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 270 SLAB_POISON | SLAB_STORE_USER) 271 272 /* 273 * These debug flags cannot use CMPXCHG because there might be consistency 274 * issues when checking or reading debug information 275 */ 276 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 277 SLAB_TRACE) 278 279 280 /* 281 * Debugging flags that require metadata to be stored in the slab. These get 282 * disabled when slub_debug=O is used and a cache's min order increases with 283 * metadata. 284 */ 285 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 286 287 #define OO_SHIFT 16 288 #define OO_MASK ((1 << OO_SHIFT) - 1) 289 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ 290 291 /* Internal SLUB flags */ 292 /* Poison object */ 293 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) 294 /* Use cmpxchg_double */ 295 296 #ifdef system_has_freelist_aba 297 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) 298 #else 299 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U) 300 #endif 301 302 /* 303 * Tracking user of a slab. 304 */ 305 #define TRACK_ADDRS_COUNT 16 306 struct track { 307 unsigned long addr; /* Called from address */ 308 #ifdef CONFIG_STACKDEPOT 309 depot_stack_handle_t handle; 310 #endif 311 int cpu; /* Was running on cpu */ 312 int pid; /* Pid context */ 313 unsigned long when; /* When did the operation occur */ 314 }; 315 316 enum track_item { TRACK_ALLOC, TRACK_FREE }; 317 318 #ifdef SLAB_SUPPORTS_SYSFS 319 static int sysfs_slab_add(struct kmem_cache *); 320 static int sysfs_slab_alias(struct kmem_cache *, const char *); 321 #else 322 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 323 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 324 { return 0; } 325 #endif 326 327 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 328 static void debugfs_slab_add(struct kmem_cache *); 329 #else 330 static inline void debugfs_slab_add(struct kmem_cache *s) { } 331 #endif 332 333 static inline void stat(const struct kmem_cache *s, enum stat_item si) 334 { 335 #ifdef CONFIG_SLUB_STATS 336 /* 337 * The rmw is racy on a preemptible kernel but this is acceptable, so 338 * avoid this_cpu_add()'s irq-disable overhead. 339 */ 340 raw_cpu_inc(s->cpu_slab->stat[si]); 341 #endif 342 } 343 344 /* 345 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 346 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 347 * differ during memory hotplug/hotremove operations. 348 * Protected by slab_mutex. 349 */ 350 static nodemask_t slab_nodes; 351 352 #ifndef CONFIG_SLUB_TINY 353 /* 354 * Workqueue used for flush_cpu_slab(). 355 */ 356 static struct workqueue_struct *flushwq; 357 #endif 358 359 /******************************************************************** 360 * Core slab cache functions 361 *******************************************************************/ 362 363 /* 364 * Returns freelist pointer (ptr). With hardening, this is obfuscated 365 * with an XOR of the address where the pointer is held and a per-cache 366 * random number. 367 */ 368 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, 369 unsigned long ptr_addr) 370 { 371 #ifdef CONFIG_SLAB_FREELIST_HARDENED 372 /* 373 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged. 374 * Normally, this doesn't cause any issues, as both set_freepointer() 375 * and get_freepointer() are called with a pointer with the same tag. 376 * However, there are some issues with CONFIG_SLUB_DEBUG code. For 377 * example, when __free_slub() iterates over objects in a cache, it 378 * passes untagged pointers to check_object(). check_object() in turns 379 * calls get_freepointer() with an untagged pointer, which causes the 380 * freepointer to be restored incorrectly. 381 */ 382 return (void *)((unsigned long)ptr ^ s->random ^ 383 swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); 384 #else 385 return ptr; 386 #endif 387 } 388 389 /* Returns the freelist pointer recorded at location ptr_addr. */ 390 static inline void *freelist_dereference(const struct kmem_cache *s, 391 void *ptr_addr) 392 { 393 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), 394 (unsigned long)ptr_addr); 395 } 396 397 static inline void *get_freepointer(struct kmem_cache *s, void *object) 398 { 399 object = kasan_reset_tag(object); 400 return freelist_dereference(s, object + s->offset); 401 } 402 403 #ifndef CONFIG_SLUB_TINY 404 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 405 { 406 prefetchw(object + s->offset); 407 } 408 #endif 409 410 /* 411 * When running under KMSAN, get_freepointer_safe() may return an uninitialized 412 * pointer value in the case the current thread loses the race for the next 413 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in 414 * slab_alloc_node() will fail, so the uninitialized value won't be used, but 415 * KMSAN will still check all arguments of cmpxchg because of imperfect 416 * handling of inline assembly. 417 * To work around this problem, we apply __no_kmsan_checks to ensure that 418 * get_freepointer_safe() returns initialized memory. 419 */ 420 __no_kmsan_checks 421 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 422 { 423 unsigned long freepointer_addr; 424 void *p; 425 426 if (!debug_pagealloc_enabled_static()) 427 return get_freepointer(s, object); 428 429 object = kasan_reset_tag(object); 430 freepointer_addr = (unsigned long)object + s->offset; 431 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p)); 432 return freelist_ptr(s, p, freepointer_addr); 433 } 434 435 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 436 { 437 unsigned long freeptr_addr = (unsigned long)object + s->offset; 438 439 #ifdef CONFIG_SLAB_FREELIST_HARDENED 440 BUG_ON(object == fp); /* naive detection of double free or corruption */ 441 #endif 442 443 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 444 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); 445 } 446 447 /* Loop over all objects in a slab */ 448 #define for_each_object(__p, __s, __addr, __objects) \ 449 for (__p = fixup_red_left(__s, __addr); \ 450 __p < (__addr) + (__objects) * (__s)->size; \ 451 __p += (__s)->size) 452 453 static inline unsigned int order_objects(unsigned int order, unsigned int size) 454 { 455 return ((unsigned int)PAGE_SIZE << order) / size; 456 } 457 458 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 459 unsigned int size) 460 { 461 struct kmem_cache_order_objects x = { 462 (order << OO_SHIFT) + order_objects(order, size) 463 }; 464 465 return x; 466 } 467 468 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 469 { 470 return x.x >> OO_SHIFT; 471 } 472 473 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 474 { 475 return x.x & OO_MASK; 476 } 477 478 #ifdef CONFIG_SLUB_CPU_PARTIAL 479 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 480 { 481 unsigned int nr_slabs; 482 483 s->cpu_partial = nr_objects; 484 485 /* 486 * We take the number of objects but actually limit the number of 487 * slabs on the per cpu partial list, in order to limit excessive 488 * growth of the list. For simplicity we assume that the slabs will 489 * be half-full. 490 */ 491 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); 492 s->cpu_partial_slabs = nr_slabs; 493 } 494 #else 495 static inline void 496 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 497 { 498 } 499 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 500 501 /* 502 * Per slab locking using the pagelock 503 */ 504 static __always_inline void slab_lock(struct slab *slab) 505 { 506 struct page *page = slab_page(slab); 507 508 VM_BUG_ON_PAGE(PageTail(page), page); 509 bit_spin_lock(PG_locked, &page->flags); 510 } 511 512 static __always_inline void slab_unlock(struct slab *slab) 513 { 514 struct page *page = slab_page(slab); 515 516 VM_BUG_ON_PAGE(PageTail(page), page); 517 __bit_spin_unlock(PG_locked, &page->flags); 518 } 519 520 static inline bool 521 __update_freelist_fast(struct slab *slab, 522 void *freelist_old, unsigned long counters_old, 523 void *freelist_new, unsigned long counters_new) 524 { 525 #ifdef system_has_freelist_aba 526 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; 527 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; 528 529 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); 530 #else 531 return false; 532 #endif 533 } 534 535 static inline bool 536 __update_freelist_slow(struct slab *slab, 537 void *freelist_old, unsigned long counters_old, 538 void *freelist_new, unsigned long counters_new) 539 { 540 bool ret = false; 541 542 slab_lock(slab); 543 if (slab->freelist == freelist_old && 544 slab->counters == counters_old) { 545 slab->freelist = freelist_new; 546 slab->counters = counters_new; 547 ret = true; 548 } 549 slab_unlock(slab); 550 551 return ret; 552 } 553 554 /* 555 * Interrupts must be disabled (for the fallback code to work right), typically 556 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is 557 * part of bit_spin_lock(), is sufficient because the policy is not to allow any 558 * allocation/ free operation in hardirq context. Therefore nothing can 559 * interrupt the operation. 560 */ 561 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, 562 void *freelist_old, unsigned long counters_old, 563 void *freelist_new, unsigned long counters_new, 564 const char *n) 565 { 566 bool ret; 567 568 if (USE_LOCKLESS_FAST_PATH()) 569 lockdep_assert_irqs_disabled(); 570 571 if (s->flags & __CMPXCHG_DOUBLE) { 572 ret = __update_freelist_fast(slab, freelist_old, counters_old, 573 freelist_new, counters_new); 574 } else { 575 ret = __update_freelist_slow(slab, freelist_old, counters_old, 576 freelist_new, counters_new); 577 } 578 if (likely(ret)) 579 return true; 580 581 cpu_relax(); 582 stat(s, CMPXCHG_DOUBLE_FAIL); 583 584 #ifdef SLUB_DEBUG_CMPXCHG 585 pr_info("%s %s: cmpxchg double redo ", n, s->name); 586 #endif 587 588 return false; 589 } 590 591 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, 592 void *freelist_old, unsigned long counters_old, 593 void *freelist_new, unsigned long counters_new, 594 const char *n) 595 { 596 bool ret; 597 598 if (s->flags & __CMPXCHG_DOUBLE) { 599 ret = __update_freelist_fast(slab, freelist_old, counters_old, 600 freelist_new, counters_new); 601 } else { 602 unsigned long flags; 603 604 local_irq_save(flags); 605 ret = __update_freelist_slow(slab, freelist_old, counters_old, 606 freelist_new, counters_new); 607 local_irq_restore(flags); 608 } 609 if (likely(ret)) 610 return true; 611 612 cpu_relax(); 613 stat(s, CMPXCHG_DOUBLE_FAIL); 614 615 #ifdef SLUB_DEBUG_CMPXCHG 616 pr_info("%s %s: cmpxchg double redo ", n, s->name); 617 #endif 618 619 return false; 620 } 621 622 #ifdef CONFIG_SLUB_DEBUG 623 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 624 static DEFINE_SPINLOCK(object_map_lock); 625 626 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 627 struct slab *slab) 628 { 629 void *addr = slab_address(slab); 630 void *p; 631 632 bitmap_zero(obj_map, slab->objects); 633 634 for (p = slab->freelist; p; p = get_freepointer(s, p)) 635 set_bit(__obj_to_index(s, addr, p), obj_map); 636 } 637 638 #if IS_ENABLED(CONFIG_KUNIT) 639 static bool slab_add_kunit_errors(void) 640 { 641 struct kunit_resource *resource; 642 643 if (!kunit_get_current_test()) 644 return false; 645 646 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 647 if (!resource) 648 return false; 649 650 (*(int *)resource->data)++; 651 kunit_put_resource(resource); 652 return true; 653 } 654 #else 655 static inline bool slab_add_kunit_errors(void) { return false; } 656 #endif 657 658 static inline unsigned int size_from_object(struct kmem_cache *s) 659 { 660 if (s->flags & SLAB_RED_ZONE) 661 return s->size - s->red_left_pad; 662 663 return s->size; 664 } 665 666 static inline void *restore_red_left(struct kmem_cache *s, void *p) 667 { 668 if (s->flags & SLAB_RED_ZONE) 669 p -= s->red_left_pad; 670 671 return p; 672 } 673 674 /* 675 * Debug settings: 676 */ 677 #if defined(CONFIG_SLUB_DEBUG_ON) 678 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 679 #else 680 static slab_flags_t slub_debug; 681 #endif 682 683 static char *slub_debug_string; 684 static int disable_higher_order_debug; 685 686 /* 687 * slub is about to manipulate internal object metadata. This memory lies 688 * outside the range of the allocated object, so accessing it would normally 689 * be reported by kasan as a bounds error. metadata_access_enable() is used 690 * to tell kasan that these accesses are OK. 691 */ 692 static inline void metadata_access_enable(void) 693 { 694 kasan_disable_current(); 695 } 696 697 static inline void metadata_access_disable(void) 698 { 699 kasan_enable_current(); 700 } 701 702 /* 703 * Object debugging 704 */ 705 706 /* Verify that a pointer has an address that is valid within a slab page */ 707 static inline int check_valid_pointer(struct kmem_cache *s, 708 struct slab *slab, void *object) 709 { 710 void *base; 711 712 if (!object) 713 return 1; 714 715 base = slab_address(slab); 716 object = kasan_reset_tag(object); 717 object = restore_red_left(s, object); 718 if (object < base || object >= base + slab->objects * s->size || 719 (object - base) % s->size) { 720 return 0; 721 } 722 723 return 1; 724 } 725 726 static void print_section(char *level, char *text, u8 *addr, 727 unsigned int length) 728 { 729 metadata_access_enable(); 730 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 731 16, 1, kasan_reset_tag((void *)addr), length, 1); 732 metadata_access_disable(); 733 } 734 735 /* 736 * See comment in calculate_sizes(). 737 */ 738 static inline bool freeptr_outside_object(struct kmem_cache *s) 739 { 740 return s->offset >= s->inuse; 741 } 742 743 /* 744 * Return offset of the end of info block which is inuse + free pointer if 745 * not overlapping with object. 746 */ 747 static inline unsigned int get_info_end(struct kmem_cache *s) 748 { 749 if (freeptr_outside_object(s)) 750 return s->inuse + sizeof(void *); 751 else 752 return s->inuse; 753 } 754 755 static struct track *get_track(struct kmem_cache *s, void *object, 756 enum track_item alloc) 757 { 758 struct track *p; 759 760 p = object + get_info_end(s); 761 762 return kasan_reset_tag(p + alloc); 763 } 764 765 #ifdef CONFIG_STACKDEPOT 766 static noinline depot_stack_handle_t set_track_prepare(void) 767 { 768 depot_stack_handle_t handle; 769 unsigned long entries[TRACK_ADDRS_COUNT]; 770 unsigned int nr_entries; 771 772 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 773 handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); 774 775 return handle; 776 } 777 #else 778 static inline depot_stack_handle_t set_track_prepare(void) 779 { 780 return 0; 781 } 782 #endif 783 784 static void set_track_update(struct kmem_cache *s, void *object, 785 enum track_item alloc, unsigned long addr, 786 depot_stack_handle_t handle) 787 { 788 struct track *p = get_track(s, object, alloc); 789 790 #ifdef CONFIG_STACKDEPOT 791 p->handle = handle; 792 #endif 793 p->addr = addr; 794 p->cpu = smp_processor_id(); 795 p->pid = current->pid; 796 p->when = jiffies; 797 } 798 799 static __always_inline void set_track(struct kmem_cache *s, void *object, 800 enum track_item alloc, unsigned long addr) 801 { 802 depot_stack_handle_t handle = set_track_prepare(); 803 804 set_track_update(s, object, alloc, addr, handle); 805 } 806 807 static void init_tracking(struct kmem_cache *s, void *object) 808 { 809 struct track *p; 810 811 if (!(s->flags & SLAB_STORE_USER)) 812 return; 813 814 p = get_track(s, object, TRACK_ALLOC); 815 memset(p, 0, 2*sizeof(struct track)); 816 } 817 818 static void print_track(const char *s, struct track *t, unsigned long pr_time) 819 { 820 depot_stack_handle_t handle __maybe_unused; 821 822 if (!t->addr) 823 return; 824 825 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 826 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 827 #ifdef CONFIG_STACKDEPOT 828 handle = READ_ONCE(t->handle); 829 if (handle) 830 stack_depot_print(handle); 831 else 832 pr_err("object allocation/free stack trace missing\n"); 833 #endif 834 } 835 836 void print_tracking(struct kmem_cache *s, void *object) 837 { 838 unsigned long pr_time = jiffies; 839 if (!(s->flags & SLAB_STORE_USER)) 840 return; 841 842 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 843 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 844 } 845 846 static void print_slab_info(const struct slab *slab) 847 { 848 struct folio *folio = (struct folio *)slab_folio(slab); 849 850 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", 851 slab, slab->objects, slab->inuse, slab->freelist, 852 folio_flags(folio, 0)); 853 } 854 855 /* 856 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API 857 * family will round up the real request size to these fixed ones, so 858 * there could be an extra area than what is requested. Save the original 859 * request size in the meta data area, for better debug and sanity check. 860 */ 861 static inline void set_orig_size(struct kmem_cache *s, 862 void *object, unsigned int orig_size) 863 { 864 void *p = kasan_reset_tag(object); 865 866 if (!slub_debug_orig_size(s)) 867 return; 868 869 #ifdef CONFIG_KASAN_GENERIC 870 /* 871 * KASAN could save its free meta data in object's data area at 872 * offset 0, if the size is larger than 'orig_size', it will 873 * overlap the data redzone in [orig_size+1, object_size], and 874 * the check should be skipped. 875 */ 876 if (kasan_metadata_size(s, true) > orig_size) 877 orig_size = s->object_size; 878 #endif 879 880 p += get_info_end(s); 881 p += sizeof(struct track) * 2; 882 883 *(unsigned int *)p = orig_size; 884 } 885 886 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) 887 { 888 void *p = kasan_reset_tag(object); 889 890 if (!slub_debug_orig_size(s)) 891 return s->object_size; 892 893 p += get_info_end(s); 894 p += sizeof(struct track) * 2; 895 896 return *(unsigned int *)p; 897 } 898 899 void skip_orig_size_check(struct kmem_cache *s, const void *object) 900 { 901 set_orig_size(s, (void *)object, s->object_size); 902 } 903 904 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 905 { 906 struct va_format vaf; 907 va_list args; 908 909 va_start(args, fmt); 910 vaf.fmt = fmt; 911 vaf.va = &args; 912 pr_err("=============================================================================\n"); 913 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 914 pr_err("-----------------------------------------------------------------------------\n\n"); 915 va_end(args); 916 } 917 918 __printf(2, 3) 919 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 920 { 921 struct va_format vaf; 922 va_list args; 923 924 if (slab_add_kunit_errors()) 925 return; 926 927 va_start(args, fmt); 928 vaf.fmt = fmt; 929 vaf.va = &args; 930 pr_err("FIX %s: %pV\n", s->name, &vaf); 931 va_end(args); 932 } 933 934 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) 935 { 936 unsigned int off; /* Offset of last byte */ 937 u8 *addr = slab_address(slab); 938 939 print_tracking(s, p); 940 941 print_slab_info(slab); 942 943 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 944 p, p - addr, get_freepointer(s, p)); 945 946 if (s->flags & SLAB_RED_ZONE) 947 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 948 s->red_left_pad); 949 else if (p > addr + 16) 950 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 951 952 print_section(KERN_ERR, "Object ", p, 953 min_t(unsigned int, s->object_size, PAGE_SIZE)); 954 if (s->flags & SLAB_RED_ZONE) 955 print_section(KERN_ERR, "Redzone ", p + s->object_size, 956 s->inuse - s->object_size); 957 958 off = get_info_end(s); 959 960 if (s->flags & SLAB_STORE_USER) 961 off += 2 * sizeof(struct track); 962 963 if (slub_debug_orig_size(s)) 964 off += sizeof(unsigned int); 965 966 off += kasan_metadata_size(s, false); 967 968 if (off != size_from_object(s)) 969 /* Beginning of the filler is the free pointer */ 970 print_section(KERN_ERR, "Padding ", p + off, 971 size_from_object(s) - off); 972 973 dump_stack(); 974 } 975 976 static void object_err(struct kmem_cache *s, struct slab *slab, 977 u8 *object, char *reason) 978 { 979 if (slab_add_kunit_errors()) 980 return; 981 982 slab_bug(s, "%s", reason); 983 print_trailer(s, slab, object); 984 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 985 } 986 987 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 988 void **freelist, void *nextfree) 989 { 990 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 991 !check_valid_pointer(s, slab, nextfree) && freelist) { 992 object_err(s, slab, *freelist, "Freechain corrupt"); 993 *freelist = NULL; 994 slab_fix(s, "Isolate corrupted freechain"); 995 return true; 996 } 997 998 return false; 999 } 1000 1001 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, 1002 const char *fmt, ...) 1003 { 1004 va_list args; 1005 char buf[100]; 1006 1007 if (slab_add_kunit_errors()) 1008 return; 1009 1010 va_start(args, fmt); 1011 vsnprintf(buf, sizeof(buf), fmt, args); 1012 va_end(args); 1013 slab_bug(s, "%s", buf); 1014 print_slab_info(slab); 1015 dump_stack(); 1016 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1017 } 1018 1019 static void init_object(struct kmem_cache *s, void *object, u8 val) 1020 { 1021 u8 *p = kasan_reset_tag(object); 1022 unsigned int poison_size = s->object_size; 1023 1024 if (s->flags & SLAB_RED_ZONE) { 1025 memset(p - s->red_left_pad, val, s->red_left_pad); 1026 1027 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1028 /* 1029 * Redzone the extra allocated space by kmalloc than 1030 * requested, and the poison size will be limited to 1031 * the original request size accordingly. 1032 */ 1033 poison_size = get_orig_size(s, object); 1034 } 1035 } 1036 1037 if (s->flags & __OBJECT_POISON) { 1038 memset(p, POISON_FREE, poison_size - 1); 1039 p[poison_size - 1] = POISON_END; 1040 } 1041 1042 if (s->flags & SLAB_RED_ZONE) 1043 memset(p + poison_size, val, s->inuse - poison_size); 1044 } 1045 1046 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 1047 void *from, void *to) 1048 { 1049 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 1050 memset(from, data, to - from); 1051 } 1052 1053 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab, 1054 u8 *object, char *what, 1055 u8 *start, unsigned int value, unsigned int bytes) 1056 { 1057 u8 *fault; 1058 u8 *end; 1059 u8 *addr = slab_address(slab); 1060 1061 metadata_access_enable(); 1062 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 1063 metadata_access_disable(); 1064 if (!fault) 1065 return 1; 1066 1067 end = start + bytes; 1068 while (end > fault && end[-1] == value) 1069 end--; 1070 1071 if (slab_add_kunit_errors()) 1072 goto skip_bug_print; 1073 1074 slab_bug(s, "%s overwritten", what); 1075 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 1076 fault, end - 1, fault - addr, 1077 fault[0], value); 1078 print_trailer(s, slab, object); 1079 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1080 1081 skip_bug_print: 1082 restore_bytes(s, what, value, fault, end); 1083 return 0; 1084 } 1085 1086 /* 1087 * Object layout: 1088 * 1089 * object address 1090 * Bytes of the object to be managed. 1091 * If the freepointer may overlay the object then the free 1092 * pointer is at the middle of the object. 1093 * 1094 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 1095 * 0xa5 (POISON_END) 1096 * 1097 * object + s->object_size 1098 * Padding to reach word boundary. This is also used for Redzoning. 1099 * Padding is extended by another word if Redzoning is enabled and 1100 * object_size == inuse. 1101 * 1102 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 1103 * 0xcc (RED_ACTIVE) for objects in use. 1104 * 1105 * object + s->inuse 1106 * Meta data starts here. 1107 * 1108 * A. Free pointer (if we cannot overwrite object on free) 1109 * B. Tracking data for SLAB_STORE_USER 1110 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) 1111 * D. Padding to reach required alignment boundary or at minimum 1112 * one word if debugging is on to be able to detect writes 1113 * before the word boundary. 1114 * 1115 * Padding is done using 0x5a (POISON_INUSE) 1116 * 1117 * object + s->size 1118 * Nothing is used beyond s->size. 1119 * 1120 * If slabcaches are merged then the object_size and inuse boundaries are mostly 1121 * ignored. And therefore no slab options that rely on these boundaries 1122 * may be used with merged slabcaches. 1123 */ 1124 1125 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) 1126 { 1127 unsigned long off = get_info_end(s); /* The end of info */ 1128 1129 if (s->flags & SLAB_STORE_USER) { 1130 /* We also have user information there */ 1131 off += 2 * sizeof(struct track); 1132 1133 if (s->flags & SLAB_KMALLOC) 1134 off += sizeof(unsigned int); 1135 } 1136 1137 off += kasan_metadata_size(s, false); 1138 1139 if (size_from_object(s) == off) 1140 return 1; 1141 1142 return check_bytes_and_report(s, slab, p, "Object padding", 1143 p + off, POISON_INUSE, size_from_object(s) - off); 1144 } 1145 1146 /* Check the pad bytes at the end of a slab page */ 1147 static void slab_pad_check(struct kmem_cache *s, struct slab *slab) 1148 { 1149 u8 *start; 1150 u8 *fault; 1151 u8 *end; 1152 u8 *pad; 1153 int length; 1154 int remainder; 1155 1156 if (!(s->flags & SLAB_POISON)) 1157 return; 1158 1159 start = slab_address(slab); 1160 length = slab_size(slab); 1161 end = start + length; 1162 remainder = length % s->size; 1163 if (!remainder) 1164 return; 1165 1166 pad = end - remainder; 1167 metadata_access_enable(); 1168 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1169 metadata_access_disable(); 1170 if (!fault) 1171 return; 1172 while (end > fault && end[-1] == POISON_INUSE) 1173 end--; 1174 1175 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1176 fault, end - 1, fault - start); 1177 print_section(KERN_ERR, "Padding ", pad, remainder); 1178 1179 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1180 } 1181 1182 static int check_object(struct kmem_cache *s, struct slab *slab, 1183 void *object, u8 val) 1184 { 1185 u8 *p = object; 1186 u8 *endobject = object + s->object_size; 1187 unsigned int orig_size; 1188 1189 if (s->flags & SLAB_RED_ZONE) { 1190 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1191 object - s->red_left_pad, val, s->red_left_pad)) 1192 return 0; 1193 1194 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1195 endobject, val, s->inuse - s->object_size)) 1196 return 0; 1197 1198 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1199 orig_size = get_orig_size(s, object); 1200 1201 if (s->object_size > orig_size && 1202 !check_bytes_and_report(s, slab, object, 1203 "kmalloc Redzone", p + orig_size, 1204 val, s->object_size - orig_size)) { 1205 return 0; 1206 } 1207 } 1208 } else { 1209 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1210 check_bytes_and_report(s, slab, p, "Alignment padding", 1211 endobject, POISON_INUSE, 1212 s->inuse - s->object_size); 1213 } 1214 } 1215 1216 if (s->flags & SLAB_POISON) { 1217 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 1218 (!check_bytes_and_report(s, slab, p, "Poison", p, 1219 POISON_FREE, s->object_size - 1) || 1220 !check_bytes_and_report(s, slab, p, "End Poison", 1221 p + s->object_size - 1, POISON_END, 1))) 1222 return 0; 1223 /* 1224 * check_pad_bytes cleans up on its own. 1225 */ 1226 check_pad_bytes(s, slab, p); 1227 } 1228 1229 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 1230 /* 1231 * Object and freepointer overlap. Cannot check 1232 * freepointer while object is allocated. 1233 */ 1234 return 1; 1235 1236 /* Check free pointer validity */ 1237 if (!check_valid_pointer(s, slab, get_freepointer(s, p))) { 1238 object_err(s, slab, p, "Freepointer corrupt"); 1239 /* 1240 * No choice but to zap it and thus lose the remainder 1241 * of the free objects in this slab. May cause 1242 * another error because the object count is now wrong. 1243 */ 1244 set_freepointer(s, p, NULL); 1245 return 0; 1246 } 1247 return 1; 1248 } 1249 1250 static int check_slab(struct kmem_cache *s, struct slab *slab) 1251 { 1252 int maxobj; 1253 1254 if (!folio_test_slab(slab_folio(slab))) { 1255 slab_err(s, slab, "Not a valid slab page"); 1256 return 0; 1257 } 1258 1259 maxobj = order_objects(slab_order(slab), s->size); 1260 if (slab->objects > maxobj) { 1261 slab_err(s, slab, "objects %u > max %u", 1262 slab->objects, maxobj); 1263 return 0; 1264 } 1265 if (slab->inuse > slab->objects) { 1266 slab_err(s, slab, "inuse %u > max %u", 1267 slab->inuse, slab->objects); 1268 return 0; 1269 } 1270 /* Slab_pad_check fixes things up after itself */ 1271 slab_pad_check(s, slab); 1272 return 1; 1273 } 1274 1275 /* 1276 * Determine if a certain object in a slab is on the freelist. Must hold the 1277 * slab lock to guarantee that the chains are in a consistent state. 1278 */ 1279 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) 1280 { 1281 int nr = 0; 1282 void *fp; 1283 void *object = NULL; 1284 int max_objects; 1285 1286 fp = slab->freelist; 1287 while (fp && nr <= slab->objects) { 1288 if (fp == search) 1289 return 1; 1290 if (!check_valid_pointer(s, slab, fp)) { 1291 if (object) { 1292 object_err(s, slab, object, 1293 "Freechain corrupt"); 1294 set_freepointer(s, object, NULL); 1295 } else { 1296 slab_err(s, slab, "Freepointer corrupt"); 1297 slab->freelist = NULL; 1298 slab->inuse = slab->objects; 1299 slab_fix(s, "Freelist cleared"); 1300 return 0; 1301 } 1302 break; 1303 } 1304 object = fp; 1305 fp = get_freepointer(s, object); 1306 nr++; 1307 } 1308 1309 max_objects = order_objects(slab_order(slab), s->size); 1310 if (max_objects > MAX_OBJS_PER_PAGE) 1311 max_objects = MAX_OBJS_PER_PAGE; 1312 1313 if (slab->objects != max_objects) { 1314 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", 1315 slab->objects, max_objects); 1316 slab->objects = max_objects; 1317 slab_fix(s, "Number of objects adjusted"); 1318 } 1319 if (slab->inuse != slab->objects - nr) { 1320 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", 1321 slab->inuse, slab->objects - nr); 1322 slab->inuse = slab->objects - nr; 1323 slab_fix(s, "Object count adjusted"); 1324 } 1325 return search == NULL; 1326 } 1327 1328 static void trace(struct kmem_cache *s, struct slab *slab, void *object, 1329 int alloc) 1330 { 1331 if (s->flags & SLAB_TRACE) { 1332 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1333 s->name, 1334 alloc ? "alloc" : "free", 1335 object, slab->inuse, 1336 slab->freelist); 1337 1338 if (!alloc) 1339 print_section(KERN_INFO, "Object ", (void *)object, 1340 s->object_size); 1341 1342 dump_stack(); 1343 } 1344 } 1345 1346 /* 1347 * Tracking of fully allocated slabs for debugging purposes. 1348 */ 1349 static void add_full(struct kmem_cache *s, 1350 struct kmem_cache_node *n, struct slab *slab) 1351 { 1352 if (!(s->flags & SLAB_STORE_USER)) 1353 return; 1354 1355 lockdep_assert_held(&n->list_lock); 1356 list_add(&slab->slab_list, &n->full); 1357 } 1358 1359 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) 1360 { 1361 if (!(s->flags & SLAB_STORE_USER)) 1362 return; 1363 1364 lockdep_assert_held(&n->list_lock); 1365 list_del(&slab->slab_list); 1366 } 1367 1368 /* Tracking of the number of slabs for debugging purposes */ 1369 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1370 { 1371 struct kmem_cache_node *n = get_node(s, node); 1372 1373 return atomic_long_read(&n->nr_slabs); 1374 } 1375 1376 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1377 { 1378 return atomic_long_read(&n->nr_slabs); 1379 } 1380 1381 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1382 { 1383 struct kmem_cache_node *n = get_node(s, node); 1384 1385 /* 1386 * May be called early in order to allocate a slab for the 1387 * kmem_cache_node structure. Solve the chicken-egg 1388 * dilemma by deferring the increment of the count during 1389 * bootstrap (see early_kmem_cache_node_alloc). 1390 */ 1391 if (likely(n)) { 1392 atomic_long_inc(&n->nr_slabs); 1393 atomic_long_add(objects, &n->total_objects); 1394 } 1395 } 1396 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1397 { 1398 struct kmem_cache_node *n = get_node(s, node); 1399 1400 atomic_long_dec(&n->nr_slabs); 1401 atomic_long_sub(objects, &n->total_objects); 1402 } 1403 1404 /* Object debug checks for alloc/free paths */ 1405 static void setup_object_debug(struct kmem_cache *s, void *object) 1406 { 1407 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1408 return; 1409 1410 init_object(s, object, SLUB_RED_INACTIVE); 1411 init_tracking(s, object); 1412 } 1413 1414 static 1415 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) 1416 { 1417 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1418 return; 1419 1420 metadata_access_enable(); 1421 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); 1422 metadata_access_disable(); 1423 } 1424 1425 static inline int alloc_consistency_checks(struct kmem_cache *s, 1426 struct slab *slab, void *object) 1427 { 1428 if (!check_slab(s, slab)) 1429 return 0; 1430 1431 if (!check_valid_pointer(s, slab, object)) { 1432 object_err(s, slab, object, "Freelist Pointer check fails"); 1433 return 0; 1434 } 1435 1436 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) 1437 return 0; 1438 1439 return 1; 1440 } 1441 1442 static noinline bool alloc_debug_processing(struct kmem_cache *s, 1443 struct slab *slab, void *object, int orig_size) 1444 { 1445 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1446 if (!alloc_consistency_checks(s, slab, object)) 1447 goto bad; 1448 } 1449 1450 /* Success. Perform special debug activities for allocs */ 1451 trace(s, slab, object, 1); 1452 set_orig_size(s, object, orig_size); 1453 init_object(s, object, SLUB_RED_ACTIVE); 1454 return true; 1455 1456 bad: 1457 if (folio_test_slab(slab_folio(slab))) { 1458 /* 1459 * If this is a slab page then lets do the best we can 1460 * to avoid issues in the future. Marking all objects 1461 * as used avoids touching the remaining objects. 1462 */ 1463 slab_fix(s, "Marking all objects used"); 1464 slab->inuse = slab->objects; 1465 slab->freelist = NULL; 1466 } 1467 return false; 1468 } 1469 1470 static inline int free_consistency_checks(struct kmem_cache *s, 1471 struct slab *slab, void *object, unsigned long addr) 1472 { 1473 if (!check_valid_pointer(s, slab, object)) { 1474 slab_err(s, slab, "Invalid object pointer 0x%p", object); 1475 return 0; 1476 } 1477 1478 if (on_freelist(s, slab, object)) { 1479 object_err(s, slab, object, "Object already free"); 1480 return 0; 1481 } 1482 1483 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) 1484 return 0; 1485 1486 if (unlikely(s != slab->slab_cache)) { 1487 if (!folio_test_slab(slab_folio(slab))) { 1488 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", 1489 object); 1490 } else if (!slab->slab_cache) { 1491 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1492 object); 1493 dump_stack(); 1494 } else 1495 object_err(s, slab, object, 1496 "page slab pointer corrupt."); 1497 return 0; 1498 } 1499 return 1; 1500 } 1501 1502 /* 1503 * Parse a block of slub_debug options. Blocks are delimited by ';' 1504 * 1505 * @str: start of block 1506 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1507 * @slabs: return start of list of slabs, or NULL when there's no list 1508 * @init: assume this is initial parsing and not per-kmem-create parsing 1509 * 1510 * returns the start of next block if there's any, or NULL 1511 */ 1512 static char * 1513 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1514 { 1515 bool higher_order_disable = false; 1516 1517 /* Skip any completely empty blocks */ 1518 while (*str && *str == ';') 1519 str++; 1520 1521 if (*str == ',') { 1522 /* 1523 * No options but restriction on slabs. This means full 1524 * debugging for slabs matching a pattern. 1525 */ 1526 *flags = DEBUG_DEFAULT_FLAGS; 1527 goto check_slabs; 1528 } 1529 *flags = 0; 1530 1531 /* Determine which debug features should be switched on */ 1532 for (; *str && *str != ',' && *str != ';'; str++) { 1533 switch (tolower(*str)) { 1534 case '-': 1535 *flags = 0; 1536 break; 1537 case 'f': 1538 *flags |= SLAB_CONSISTENCY_CHECKS; 1539 break; 1540 case 'z': 1541 *flags |= SLAB_RED_ZONE; 1542 break; 1543 case 'p': 1544 *flags |= SLAB_POISON; 1545 break; 1546 case 'u': 1547 *flags |= SLAB_STORE_USER; 1548 break; 1549 case 't': 1550 *flags |= SLAB_TRACE; 1551 break; 1552 case 'a': 1553 *flags |= SLAB_FAILSLAB; 1554 break; 1555 case 'o': 1556 /* 1557 * Avoid enabling debugging on caches if its minimum 1558 * order would increase as a result. 1559 */ 1560 higher_order_disable = true; 1561 break; 1562 default: 1563 if (init) 1564 pr_err("slub_debug option '%c' unknown. skipped\n", *str); 1565 } 1566 } 1567 check_slabs: 1568 if (*str == ',') 1569 *slabs = ++str; 1570 else 1571 *slabs = NULL; 1572 1573 /* Skip over the slab list */ 1574 while (*str && *str != ';') 1575 str++; 1576 1577 /* Skip any completely empty blocks */ 1578 while (*str && *str == ';') 1579 str++; 1580 1581 if (init && higher_order_disable) 1582 disable_higher_order_debug = 1; 1583 1584 if (*str) 1585 return str; 1586 else 1587 return NULL; 1588 } 1589 1590 static int __init setup_slub_debug(char *str) 1591 { 1592 slab_flags_t flags; 1593 slab_flags_t global_flags; 1594 char *saved_str; 1595 char *slab_list; 1596 bool global_slub_debug_changed = false; 1597 bool slab_list_specified = false; 1598 1599 global_flags = DEBUG_DEFAULT_FLAGS; 1600 if (*str++ != '=' || !*str) 1601 /* 1602 * No options specified. Switch on full debugging. 1603 */ 1604 goto out; 1605 1606 saved_str = str; 1607 while (str) { 1608 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1609 1610 if (!slab_list) { 1611 global_flags = flags; 1612 global_slub_debug_changed = true; 1613 } else { 1614 slab_list_specified = true; 1615 if (flags & SLAB_STORE_USER) 1616 stack_depot_request_early_init(); 1617 } 1618 } 1619 1620 /* 1621 * For backwards compatibility, a single list of flags with list of 1622 * slabs means debugging is only changed for those slabs, so the global 1623 * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1624 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1625 * long as there is no option specifying flags without a slab list. 1626 */ 1627 if (slab_list_specified) { 1628 if (!global_slub_debug_changed) 1629 global_flags = slub_debug; 1630 slub_debug_string = saved_str; 1631 } 1632 out: 1633 slub_debug = global_flags; 1634 if (slub_debug & SLAB_STORE_USER) 1635 stack_depot_request_early_init(); 1636 if (slub_debug != 0 || slub_debug_string) 1637 static_branch_enable(&slub_debug_enabled); 1638 else 1639 static_branch_disable(&slub_debug_enabled); 1640 if ((static_branch_unlikely(&init_on_alloc) || 1641 static_branch_unlikely(&init_on_free)) && 1642 (slub_debug & SLAB_POISON)) 1643 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1644 return 1; 1645 } 1646 1647 __setup("slub_debug", setup_slub_debug); 1648 1649 /* 1650 * kmem_cache_flags - apply debugging options to the cache 1651 * @object_size: the size of an object without meta data 1652 * @flags: flags to set 1653 * @name: name of the cache 1654 * 1655 * Debug option(s) are applied to @flags. In addition to the debug 1656 * option(s), if a slab name (or multiple) is specified i.e. 1657 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1658 * then only the select slabs will receive the debug option(s). 1659 */ 1660 slab_flags_t kmem_cache_flags(unsigned int object_size, 1661 slab_flags_t flags, const char *name) 1662 { 1663 char *iter; 1664 size_t len; 1665 char *next_block; 1666 slab_flags_t block_flags; 1667 slab_flags_t slub_debug_local = slub_debug; 1668 1669 if (flags & SLAB_NO_USER_FLAGS) 1670 return flags; 1671 1672 /* 1673 * If the slab cache is for debugging (e.g. kmemleak) then 1674 * don't store user (stack trace) information by default, 1675 * but let the user enable it via the command line below. 1676 */ 1677 if (flags & SLAB_NOLEAKTRACE) 1678 slub_debug_local &= ~SLAB_STORE_USER; 1679 1680 len = strlen(name); 1681 next_block = slub_debug_string; 1682 /* Go through all blocks of debug options, see if any matches our slab's name */ 1683 while (next_block) { 1684 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1685 if (!iter) 1686 continue; 1687 /* Found a block that has a slab list, search it */ 1688 while (*iter) { 1689 char *end, *glob; 1690 size_t cmplen; 1691 1692 end = strchrnul(iter, ','); 1693 if (next_block && next_block < end) 1694 end = next_block - 1; 1695 1696 glob = strnchr(iter, end - iter, '*'); 1697 if (glob) 1698 cmplen = glob - iter; 1699 else 1700 cmplen = max_t(size_t, len, (end - iter)); 1701 1702 if (!strncmp(name, iter, cmplen)) { 1703 flags |= block_flags; 1704 return flags; 1705 } 1706 1707 if (!*end || *end == ';') 1708 break; 1709 iter = end + 1; 1710 } 1711 } 1712 1713 return flags | slub_debug_local; 1714 } 1715 #else /* !CONFIG_SLUB_DEBUG */ 1716 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} 1717 static inline 1718 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} 1719 1720 static inline bool alloc_debug_processing(struct kmem_cache *s, 1721 struct slab *slab, void *object, int orig_size) { return true; } 1722 1723 static inline bool free_debug_processing(struct kmem_cache *s, 1724 struct slab *slab, void *head, void *tail, int *bulk_cnt, 1725 unsigned long addr, depot_stack_handle_t handle) { return true; } 1726 1727 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} 1728 static inline int check_object(struct kmem_cache *s, struct slab *slab, 1729 void *object, u8 val) { return 1; } 1730 static inline depot_stack_handle_t set_track_prepare(void) { return 0; } 1731 static inline void set_track(struct kmem_cache *s, void *object, 1732 enum track_item alloc, unsigned long addr) {} 1733 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1734 struct slab *slab) {} 1735 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1736 struct slab *slab) {} 1737 slab_flags_t kmem_cache_flags(unsigned int object_size, 1738 slab_flags_t flags, const char *name) 1739 { 1740 return flags; 1741 } 1742 #define slub_debug 0 1743 1744 #define disable_higher_order_debug 0 1745 1746 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1747 { return 0; } 1748 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1749 { return 0; } 1750 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1751 int objects) {} 1752 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1753 int objects) {} 1754 1755 #ifndef CONFIG_SLUB_TINY 1756 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1757 void **freelist, void *nextfree) 1758 { 1759 return false; 1760 } 1761 #endif 1762 #endif /* CONFIG_SLUB_DEBUG */ 1763 1764 /* 1765 * Hooks for other subsystems that check memory allocations. In a typical 1766 * production configuration these hooks all should produce no code at all. 1767 */ 1768 static __always_inline bool slab_free_hook(struct kmem_cache *s, 1769 void *x, bool init) 1770 { 1771 kmemleak_free_recursive(x, s->flags); 1772 kmsan_slab_free(s, x); 1773 1774 debug_check_no_locks_freed(x, s->object_size); 1775 1776 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1777 debug_check_no_obj_freed(x, s->object_size); 1778 1779 /* Use KCSAN to help debug racy use-after-free. */ 1780 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 1781 __kcsan_check_access(x, s->object_size, 1782 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 1783 1784 /* 1785 * As memory initialization might be integrated into KASAN, 1786 * kasan_slab_free and initialization memset's must be 1787 * kept together to avoid discrepancies in behavior. 1788 * 1789 * The initialization memset's clear the object and the metadata, 1790 * but don't touch the SLAB redzone. 1791 */ 1792 if (init) { 1793 int rsize; 1794 1795 if (!kasan_has_integrated_init()) 1796 memset(kasan_reset_tag(x), 0, s->object_size); 1797 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 1798 memset((char *)kasan_reset_tag(x) + s->inuse, 0, 1799 s->size - s->inuse - rsize); 1800 } 1801 /* KASAN might put x into memory quarantine, delaying its reuse. */ 1802 return kasan_slab_free(s, x, init); 1803 } 1804 1805 static inline bool slab_free_freelist_hook(struct kmem_cache *s, 1806 void **head, void **tail, 1807 int *cnt) 1808 { 1809 1810 void *object; 1811 void *next = *head; 1812 void *old_tail = *tail ? *tail : *head; 1813 1814 if (is_kfence_address(next)) { 1815 slab_free_hook(s, next, false); 1816 return true; 1817 } 1818 1819 /* Head and tail of the reconstructed freelist */ 1820 *head = NULL; 1821 *tail = NULL; 1822 1823 do { 1824 object = next; 1825 next = get_freepointer(s, object); 1826 1827 /* If object's reuse doesn't have to be delayed */ 1828 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { 1829 /* Move object to the new freelist */ 1830 set_freepointer(s, object, *head); 1831 *head = object; 1832 if (!*tail) 1833 *tail = object; 1834 } else { 1835 /* 1836 * Adjust the reconstructed freelist depth 1837 * accordingly if object's reuse is delayed. 1838 */ 1839 --(*cnt); 1840 } 1841 } while (object != old_tail); 1842 1843 if (*head == *tail) 1844 *tail = NULL; 1845 1846 return *head != NULL; 1847 } 1848 1849 static void *setup_object(struct kmem_cache *s, void *object) 1850 { 1851 setup_object_debug(s, object); 1852 object = kasan_init_slab_obj(s, object); 1853 if (unlikely(s->ctor)) { 1854 kasan_unpoison_object_data(s, object); 1855 s->ctor(object); 1856 kasan_poison_object_data(s, object); 1857 } 1858 return object; 1859 } 1860 1861 /* 1862 * Slab allocation and freeing 1863 */ 1864 static inline struct slab *alloc_slab_page(gfp_t flags, int node, 1865 struct kmem_cache_order_objects oo) 1866 { 1867 struct folio *folio; 1868 struct slab *slab; 1869 unsigned int order = oo_order(oo); 1870 1871 if (node == NUMA_NO_NODE) 1872 folio = (struct folio *)alloc_pages(flags, order); 1873 else 1874 folio = (struct folio *)__alloc_pages_node(node, flags, order); 1875 1876 if (!folio) 1877 return NULL; 1878 1879 slab = folio_slab(folio); 1880 __folio_set_slab(folio); 1881 /* Make the flag visible before any changes to folio->mapping */ 1882 smp_wmb(); 1883 if (folio_is_pfmemalloc(folio)) 1884 slab_set_pfmemalloc(slab); 1885 1886 return slab; 1887 } 1888 1889 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1890 /* Pre-initialize the random sequence cache */ 1891 static int init_cache_random_seq(struct kmem_cache *s) 1892 { 1893 unsigned int count = oo_objects(s->oo); 1894 int err; 1895 1896 /* Bailout if already initialised */ 1897 if (s->random_seq) 1898 return 0; 1899 1900 err = cache_random_seq_create(s, count, GFP_KERNEL); 1901 if (err) { 1902 pr_err("SLUB: Unable to initialize free list for %s\n", 1903 s->name); 1904 return err; 1905 } 1906 1907 /* Transform to an offset on the set of pages */ 1908 if (s->random_seq) { 1909 unsigned int i; 1910 1911 for (i = 0; i < count; i++) 1912 s->random_seq[i] *= s->size; 1913 } 1914 return 0; 1915 } 1916 1917 /* Initialize each random sequence freelist per cache */ 1918 static void __init init_freelist_randomization(void) 1919 { 1920 struct kmem_cache *s; 1921 1922 mutex_lock(&slab_mutex); 1923 1924 list_for_each_entry(s, &slab_caches, list) 1925 init_cache_random_seq(s); 1926 1927 mutex_unlock(&slab_mutex); 1928 } 1929 1930 /* Get the next entry on the pre-computed freelist randomized */ 1931 static void *next_freelist_entry(struct kmem_cache *s, struct slab *slab, 1932 unsigned long *pos, void *start, 1933 unsigned long page_limit, 1934 unsigned long freelist_count) 1935 { 1936 unsigned int idx; 1937 1938 /* 1939 * If the target page allocation failed, the number of objects on the 1940 * page might be smaller than the usual size defined by the cache. 1941 */ 1942 do { 1943 idx = s->random_seq[*pos]; 1944 *pos += 1; 1945 if (*pos >= freelist_count) 1946 *pos = 0; 1947 } while (unlikely(idx >= page_limit)); 1948 1949 return (char *)start + idx; 1950 } 1951 1952 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 1953 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 1954 { 1955 void *start; 1956 void *cur; 1957 void *next; 1958 unsigned long idx, pos, page_limit, freelist_count; 1959 1960 if (slab->objects < 2 || !s->random_seq) 1961 return false; 1962 1963 freelist_count = oo_objects(s->oo); 1964 pos = get_random_u32_below(freelist_count); 1965 1966 page_limit = slab->objects * s->size; 1967 start = fixup_red_left(s, slab_address(slab)); 1968 1969 /* First entry is used as the base of the freelist */ 1970 cur = next_freelist_entry(s, slab, &pos, start, page_limit, 1971 freelist_count); 1972 cur = setup_object(s, cur); 1973 slab->freelist = cur; 1974 1975 for (idx = 1; idx < slab->objects; idx++) { 1976 next = next_freelist_entry(s, slab, &pos, start, page_limit, 1977 freelist_count); 1978 next = setup_object(s, next); 1979 set_freepointer(s, cur, next); 1980 cur = next; 1981 } 1982 set_freepointer(s, cur, NULL); 1983 1984 return true; 1985 } 1986 #else 1987 static inline int init_cache_random_seq(struct kmem_cache *s) 1988 { 1989 return 0; 1990 } 1991 static inline void init_freelist_randomization(void) { } 1992 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 1993 { 1994 return false; 1995 } 1996 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1997 1998 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1999 { 2000 struct slab *slab; 2001 struct kmem_cache_order_objects oo = s->oo; 2002 gfp_t alloc_gfp; 2003 void *start, *p, *next; 2004 int idx; 2005 bool shuffle; 2006 2007 flags &= gfp_allowed_mask; 2008 2009 flags |= s->allocflags; 2010 2011 /* 2012 * Let the initial higher-order allocation fail under memory pressure 2013 * so we fall-back to the minimum order allocation. 2014 */ 2015 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 2016 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 2017 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; 2018 2019 slab = alloc_slab_page(alloc_gfp, node, oo); 2020 if (unlikely(!slab)) { 2021 oo = s->min; 2022 alloc_gfp = flags; 2023 /* 2024 * Allocation may have failed due to fragmentation. 2025 * Try a lower order alloc if possible 2026 */ 2027 slab = alloc_slab_page(alloc_gfp, node, oo); 2028 if (unlikely(!slab)) 2029 return NULL; 2030 stat(s, ORDER_FALLBACK); 2031 } 2032 2033 slab->objects = oo_objects(oo); 2034 slab->inuse = 0; 2035 slab->frozen = 0; 2036 2037 account_slab(slab, oo_order(oo), s, flags); 2038 2039 slab->slab_cache = s; 2040 2041 kasan_poison_slab(slab); 2042 2043 start = slab_address(slab); 2044 2045 setup_slab_debug(s, slab, start); 2046 2047 shuffle = shuffle_freelist(s, slab); 2048 2049 if (!shuffle) { 2050 start = fixup_red_left(s, start); 2051 start = setup_object(s, start); 2052 slab->freelist = start; 2053 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { 2054 next = p + s->size; 2055 next = setup_object(s, next); 2056 set_freepointer(s, p, next); 2057 p = next; 2058 } 2059 set_freepointer(s, p, NULL); 2060 } 2061 2062 return slab; 2063 } 2064 2065 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) 2066 { 2067 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 2068 flags = kmalloc_fix_flags(flags); 2069 2070 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2071 2072 return allocate_slab(s, 2073 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 2074 } 2075 2076 static void __free_slab(struct kmem_cache *s, struct slab *slab) 2077 { 2078 struct folio *folio = slab_folio(slab); 2079 int order = folio_order(folio); 2080 int pages = 1 << order; 2081 2082 __slab_clear_pfmemalloc(slab); 2083 folio->mapping = NULL; 2084 /* Make the mapping reset visible before clearing the flag */ 2085 smp_wmb(); 2086 __folio_clear_slab(folio); 2087 mm_account_reclaimed_pages(pages); 2088 unaccount_slab(slab, order, s); 2089 __free_pages(&folio->page, order); 2090 } 2091 2092 static void rcu_free_slab(struct rcu_head *h) 2093 { 2094 struct slab *slab = container_of(h, struct slab, rcu_head); 2095 2096 __free_slab(slab->slab_cache, slab); 2097 } 2098 2099 static void free_slab(struct kmem_cache *s, struct slab *slab) 2100 { 2101 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 2102 void *p; 2103 2104 slab_pad_check(s, slab); 2105 for_each_object(p, s, slab_address(slab), slab->objects) 2106 check_object(s, slab, p, SLUB_RED_INACTIVE); 2107 } 2108 2109 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) 2110 call_rcu(&slab->rcu_head, rcu_free_slab); 2111 else 2112 __free_slab(s, slab); 2113 } 2114 2115 static void discard_slab(struct kmem_cache *s, struct slab *slab) 2116 { 2117 dec_slabs_node(s, slab_nid(slab), slab->objects); 2118 free_slab(s, slab); 2119 } 2120 2121 /* 2122 * Management of partially allocated slabs. 2123 */ 2124 static inline void 2125 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 2126 { 2127 n->nr_partial++; 2128 if (tail == DEACTIVATE_TO_TAIL) 2129 list_add_tail(&slab->slab_list, &n->partial); 2130 else 2131 list_add(&slab->slab_list, &n->partial); 2132 } 2133 2134 static inline void add_partial(struct kmem_cache_node *n, 2135 struct slab *slab, int tail) 2136 { 2137 lockdep_assert_held(&n->list_lock); 2138 __add_partial(n, slab, tail); 2139 } 2140 2141 static inline void remove_partial(struct kmem_cache_node *n, 2142 struct slab *slab) 2143 { 2144 lockdep_assert_held(&n->list_lock); 2145 list_del(&slab->slab_list); 2146 n->nr_partial--; 2147 } 2148 2149 /* 2150 * Called only for kmem_cache_debug() caches instead of acquire_slab(), with a 2151 * slab from the n->partial list. Remove only a single object from the slab, do 2152 * the alloc_debug_processing() checks and leave the slab on the list, or move 2153 * it to full list if it was the last free object. 2154 */ 2155 static void *alloc_single_from_partial(struct kmem_cache *s, 2156 struct kmem_cache_node *n, struct slab *slab, int orig_size) 2157 { 2158 void *object; 2159 2160 lockdep_assert_held(&n->list_lock); 2161 2162 object = slab->freelist; 2163 slab->freelist = get_freepointer(s, object); 2164 slab->inuse++; 2165 2166 if (!alloc_debug_processing(s, slab, object, orig_size)) { 2167 remove_partial(n, slab); 2168 return NULL; 2169 } 2170 2171 if (slab->inuse == slab->objects) { 2172 remove_partial(n, slab); 2173 add_full(s, n, slab); 2174 } 2175 2176 return object; 2177 } 2178 2179 /* 2180 * Called only for kmem_cache_debug() caches to allocate from a freshly 2181 * allocated slab. Allocate a single object instead of whole freelist 2182 * and put the slab to the partial (or full) list. 2183 */ 2184 static void *alloc_single_from_new_slab(struct kmem_cache *s, 2185 struct slab *slab, int orig_size) 2186 { 2187 int nid = slab_nid(slab); 2188 struct kmem_cache_node *n = get_node(s, nid); 2189 unsigned long flags; 2190 void *object; 2191 2192 2193 object = slab->freelist; 2194 slab->freelist = get_freepointer(s, object); 2195 slab->inuse = 1; 2196 2197 if (!alloc_debug_processing(s, slab, object, orig_size)) 2198 /* 2199 * It's not really expected that this would fail on a 2200 * freshly allocated slab, but a concurrent memory 2201 * corruption in theory could cause that. 2202 */ 2203 return NULL; 2204 2205 spin_lock_irqsave(&n->list_lock, flags); 2206 2207 if (slab->inuse == slab->objects) 2208 add_full(s, n, slab); 2209 else 2210 add_partial(n, slab, DEACTIVATE_TO_HEAD); 2211 2212 inc_slabs_node(s, nid, slab->objects); 2213 spin_unlock_irqrestore(&n->list_lock, flags); 2214 2215 return object; 2216 } 2217 2218 /* 2219 * Remove slab from the partial list, freeze it and 2220 * return the pointer to the freelist. 2221 * 2222 * Returns a list of objects or NULL if it fails. 2223 */ 2224 static inline void *acquire_slab(struct kmem_cache *s, 2225 struct kmem_cache_node *n, struct slab *slab, 2226 int mode) 2227 { 2228 void *freelist; 2229 unsigned long counters; 2230 struct slab new; 2231 2232 lockdep_assert_held(&n->list_lock); 2233 2234 /* 2235 * Zap the freelist and set the frozen bit. 2236 * The old freelist is the list of objects for the 2237 * per cpu allocation list. 2238 */ 2239 freelist = slab->freelist; 2240 counters = slab->counters; 2241 new.counters = counters; 2242 if (mode) { 2243 new.inuse = slab->objects; 2244 new.freelist = NULL; 2245 } else { 2246 new.freelist = freelist; 2247 } 2248 2249 VM_BUG_ON(new.frozen); 2250 new.frozen = 1; 2251 2252 if (!__slab_update_freelist(s, slab, 2253 freelist, counters, 2254 new.freelist, new.counters, 2255 "acquire_slab")) 2256 return NULL; 2257 2258 remove_partial(n, slab); 2259 WARN_ON(!freelist); 2260 return freelist; 2261 } 2262 2263 #ifdef CONFIG_SLUB_CPU_PARTIAL 2264 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); 2265 #else 2266 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, 2267 int drain) { } 2268 #endif 2269 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); 2270 2271 /* 2272 * Try to allocate a partial slab from a specific node. 2273 */ 2274 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, 2275 struct partial_context *pc) 2276 { 2277 struct slab *slab, *slab2; 2278 void *object = NULL; 2279 unsigned long flags; 2280 unsigned int partial_slabs = 0; 2281 2282 /* 2283 * Racy check. If we mistakenly see no partial slabs then we 2284 * just allocate an empty slab. If we mistakenly try to get a 2285 * partial slab and there is none available then get_partial() 2286 * will return NULL. 2287 */ 2288 if (!n || !n->nr_partial) 2289 return NULL; 2290 2291 spin_lock_irqsave(&n->list_lock, flags); 2292 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { 2293 void *t; 2294 2295 if (!pfmemalloc_match(slab, pc->flags)) 2296 continue; 2297 2298 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 2299 object = alloc_single_from_partial(s, n, slab, 2300 pc->orig_size); 2301 if (object) 2302 break; 2303 continue; 2304 } 2305 2306 t = acquire_slab(s, n, slab, object == NULL); 2307 if (!t) 2308 break; 2309 2310 if (!object) { 2311 *pc->slab = slab; 2312 stat(s, ALLOC_FROM_PARTIAL); 2313 object = t; 2314 } else { 2315 put_cpu_partial(s, slab, 0); 2316 stat(s, CPU_PARTIAL_NODE); 2317 partial_slabs++; 2318 } 2319 #ifdef CONFIG_SLUB_CPU_PARTIAL 2320 if (!kmem_cache_has_cpu_partial(s) 2321 || partial_slabs > s->cpu_partial_slabs / 2) 2322 break; 2323 #else 2324 break; 2325 #endif 2326 2327 } 2328 spin_unlock_irqrestore(&n->list_lock, flags); 2329 return object; 2330 } 2331 2332 /* 2333 * Get a slab from somewhere. Search in increasing NUMA distances. 2334 */ 2335 static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc) 2336 { 2337 #ifdef CONFIG_NUMA 2338 struct zonelist *zonelist; 2339 struct zoneref *z; 2340 struct zone *zone; 2341 enum zone_type highest_zoneidx = gfp_zone(pc->flags); 2342 void *object; 2343 unsigned int cpuset_mems_cookie; 2344 2345 /* 2346 * The defrag ratio allows a configuration of the tradeoffs between 2347 * inter node defragmentation and node local allocations. A lower 2348 * defrag_ratio increases the tendency to do local allocations 2349 * instead of attempting to obtain partial slabs from other nodes. 2350 * 2351 * If the defrag_ratio is set to 0 then kmalloc() always 2352 * returns node local objects. If the ratio is higher then kmalloc() 2353 * may return off node objects because partial slabs are obtained 2354 * from other nodes and filled up. 2355 * 2356 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2357 * (which makes defrag_ratio = 1000) then every (well almost) 2358 * allocation will first attempt to defrag slab caches on other nodes. 2359 * This means scanning over all nodes to look for partial slabs which 2360 * may be expensive if we do it every time we are trying to find a slab 2361 * with available objects. 2362 */ 2363 if (!s->remote_node_defrag_ratio || 2364 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2365 return NULL; 2366 2367 do { 2368 cpuset_mems_cookie = read_mems_allowed_begin(); 2369 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); 2370 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2371 struct kmem_cache_node *n; 2372 2373 n = get_node(s, zone_to_nid(zone)); 2374 2375 if (n && cpuset_zone_allowed(zone, pc->flags) && 2376 n->nr_partial > s->min_partial) { 2377 object = get_partial_node(s, n, pc); 2378 if (object) { 2379 /* 2380 * Don't check read_mems_allowed_retry() 2381 * here - if mems_allowed was updated in 2382 * parallel, that was a harmless race 2383 * between allocation and the cpuset 2384 * update 2385 */ 2386 return object; 2387 } 2388 } 2389 } 2390 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2391 #endif /* CONFIG_NUMA */ 2392 return NULL; 2393 } 2394 2395 /* 2396 * Get a partial slab, lock it and return it. 2397 */ 2398 static void *get_partial(struct kmem_cache *s, int node, struct partial_context *pc) 2399 { 2400 void *object; 2401 int searchnode = node; 2402 2403 if (node == NUMA_NO_NODE) 2404 searchnode = numa_mem_id(); 2405 2406 object = get_partial_node(s, get_node(s, searchnode), pc); 2407 if (object || node != NUMA_NO_NODE) 2408 return object; 2409 2410 return get_any_partial(s, pc); 2411 } 2412 2413 #ifndef CONFIG_SLUB_TINY 2414 2415 #ifdef CONFIG_PREEMPTION 2416 /* 2417 * Calculate the next globally unique transaction for disambiguation 2418 * during cmpxchg. The transactions start with the cpu number and are then 2419 * incremented by CONFIG_NR_CPUS. 2420 */ 2421 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2422 #else 2423 /* 2424 * No preemption supported therefore also no need to check for 2425 * different cpus. 2426 */ 2427 #define TID_STEP 1 2428 #endif /* CONFIG_PREEMPTION */ 2429 2430 static inline unsigned long next_tid(unsigned long tid) 2431 { 2432 return tid + TID_STEP; 2433 } 2434 2435 #ifdef SLUB_DEBUG_CMPXCHG 2436 static inline unsigned int tid_to_cpu(unsigned long tid) 2437 { 2438 return tid % TID_STEP; 2439 } 2440 2441 static inline unsigned long tid_to_event(unsigned long tid) 2442 { 2443 return tid / TID_STEP; 2444 } 2445 #endif 2446 2447 static inline unsigned int init_tid(int cpu) 2448 { 2449 return cpu; 2450 } 2451 2452 static inline void note_cmpxchg_failure(const char *n, 2453 const struct kmem_cache *s, unsigned long tid) 2454 { 2455 #ifdef SLUB_DEBUG_CMPXCHG 2456 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2457 2458 pr_info("%s %s: cmpxchg redo ", n, s->name); 2459 2460 #ifdef CONFIG_PREEMPTION 2461 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2462 pr_warn("due to cpu change %d -> %d\n", 2463 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2464 else 2465 #endif 2466 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2467 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2468 tid_to_event(tid), tid_to_event(actual_tid)); 2469 else 2470 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2471 actual_tid, tid, next_tid(tid)); 2472 #endif 2473 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2474 } 2475 2476 static void init_kmem_cache_cpus(struct kmem_cache *s) 2477 { 2478 int cpu; 2479 struct kmem_cache_cpu *c; 2480 2481 for_each_possible_cpu(cpu) { 2482 c = per_cpu_ptr(s->cpu_slab, cpu); 2483 local_lock_init(&c->lock); 2484 c->tid = init_tid(cpu); 2485 } 2486 } 2487 2488 /* 2489 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, 2490 * unfreezes the slabs and puts it on the proper list. 2491 * Assumes the slab has been already safely taken away from kmem_cache_cpu 2492 * by the caller. 2493 */ 2494 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, 2495 void *freelist) 2496 { 2497 enum slab_modes { M_NONE, M_PARTIAL, M_FREE, M_FULL_NOLIST }; 2498 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 2499 int free_delta = 0; 2500 enum slab_modes mode = M_NONE; 2501 void *nextfree, *freelist_iter, *freelist_tail; 2502 int tail = DEACTIVATE_TO_HEAD; 2503 unsigned long flags = 0; 2504 struct slab new; 2505 struct slab old; 2506 2507 if (slab->freelist) { 2508 stat(s, DEACTIVATE_REMOTE_FREES); 2509 tail = DEACTIVATE_TO_TAIL; 2510 } 2511 2512 /* 2513 * Stage one: Count the objects on cpu's freelist as free_delta and 2514 * remember the last object in freelist_tail for later splicing. 2515 */ 2516 freelist_tail = NULL; 2517 freelist_iter = freelist; 2518 while (freelist_iter) { 2519 nextfree = get_freepointer(s, freelist_iter); 2520 2521 /* 2522 * If 'nextfree' is invalid, it is possible that the object at 2523 * 'freelist_iter' is already corrupted. So isolate all objects 2524 * starting at 'freelist_iter' by skipping them. 2525 */ 2526 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) 2527 break; 2528 2529 freelist_tail = freelist_iter; 2530 free_delta++; 2531 2532 freelist_iter = nextfree; 2533 } 2534 2535 /* 2536 * Stage two: Unfreeze the slab while splicing the per-cpu 2537 * freelist to the head of slab's freelist. 2538 * 2539 * Ensure that the slab is unfrozen while the list presence 2540 * reflects the actual number of objects during unfreeze. 2541 * 2542 * We first perform cmpxchg holding lock and insert to list 2543 * when it succeed. If there is mismatch then the slab is not 2544 * unfrozen and number of objects in the slab may have changed. 2545 * Then release lock and retry cmpxchg again. 2546 */ 2547 redo: 2548 2549 old.freelist = READ_ONCE(slab->freelist); 2550 old.counters = READ_ONCE(slab->counters); 2551 VM_BUG_ON(!old.frozen); 2552 2553 /* Determine target state of the slab */ 2554 new.counters = old.counters; 2555 if (freelist_tail) { 2556 new.inuse -= free_delta; 2557 set_freepointer(s, freelist_tail, old.freelist); 2558 new.freelist = freelist; 2559 } else 2560 new.freelist = old.freelist; 2561 2562 new.frozen = 0; 2563 2564 if (!new.inuse && n->nr_partial >= s->min_partial) { 2565 mode = M_FREE; 2566 } else if (new.freelist) { 2567 mode = M_PARTIAL; 2568 /* 2569 * Taking the spinlock removes the possibility that 2570 * acquire_slab() will see a slab that is frozen 2571 */ 2572 spin_lock_irqsave(&n->list_lock, flags); 2573 } else { 2574 mode = M_FULL_NOLIST; 2575 } 2576 2577 2578 if (!slab_update_freelist(s, slab, 2579 old.freelist, old.counters, 2580 new.freelist, new.counters, 2581 "unfreezing slab")) { 2582 if (mode == M_PARTIAL) 2583 spin_unlock_irqrestore(&n->list_lock, flags); 2584 goto redo; 2585 } 2586 2587 2588 if (mode == M_PARTIAL) { 2589 add_partial(n, slab, tail); 2590 spin_unlock_irqrestore(&n->list_lock, flags); 2591 stat(s, tail); 2592 } else if (mode == M_FREE) { 2593 stat(s, DEACTIVATE_EMPTY); 2594 discard_slab(s, slab); 2595 stat(s, FREE_SLAB); 2596 } else if (mode == M_FULL_NOLIST) { 2597 stat(s, DEACTIVATE_FULL); 2598 } 2599 } 2600 2601 #ifdef CONFIG_SLUB_CPU_PARTIAL 2602 static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab) 2603 { 2604 struct kmem_cache_node *n = NULL, *n2 = NULL; 2605 struct slab *slab, *slab_to_discard = NULL; 2606 unsigned long flags = 0; 2607 2608 while (partial_slab) { 2609 struct slab new; 2610 struct slab old; 2611 2612 slab = partial_slab; 2613 partial_slab = slab->next; 2614 2615 n2 = get_node(s, slab_nid(slab)); 2616 if (n != n2) { 2617 if (n) 2618 spin_unlock_irqrestore(&n->list_lock, flags); 2619 2620 n = n2; 2621 spin_lock_irqsave(&n->list_lock, flags); 2622 } 2623 2624 do { 2625 2626 old.freelist = slab->freelist; 2627 old.counters = slab->counters; 2628 VM_BUG_ON(!old.frozen); 2629 2630 new.counters = old.counters; 2631 new.freelist = old.freelist; 2632 2633 new.frozen = 0; 2634 2635 } while (!__slab_update_freelist(s, slab, 2636 old.freelist, old.counters, 2637 new.freelist, new.counters, 2638 "unfreezing slab")); 2639 2640 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { 2641 slab->next = slab_to_discard; 2642 slab_to_discard = slab; 2643 } else { 2644 add_partial(n, slab, DEACTIVATE_TO_TAIL); 2645 stat(s, FREE_ADD_PARTIAL); 2646 } 2647 } 2648 2649 if (n) 2650 spin_unlock_irqrestore(&n->list_lock, flags); 2651 2652 while (slab_to_discard) { 2653 slab = slab_to_discard; 2654 slab_to_discard = slab_to_discard->next; 2655 2656 stat(s, DEACTIVATE_EMPTY); 2657 discard_slab(s, slab); 2658 stat(s, FREE_SLAB); 2659 } 2660 } 2661 2662 /* 2663 * Unfreeze all the cpu partial slabs. 2664 */ 2665 static void unfreeze_partials(struct kmem_cache *s) 2666 { 2667 struct slab *partial_slab; 2668 unsigned long flags; 2669 2670 local_lock_irqsave(&s->cpu_slab->lock, flags); 2671 partial_slab = this_cpu_read(s->cpu_slab->partial); 2672 this_cpu_write(s->cpu_slab->partial, NULL); 2673 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2674 2675 if (partial_slab) 2676 __unfreeze_partials(s, partial_slab); 2677 } 2678 2679 static void unfreeze_partials_cpu(struct kmem_cache *s, 2680 struct kmem_cache_cpu *c) 2681 { 2682 struct slab *partial_slab; 2683 2684 partial_slab = slub_percpu_partial(c); 2685 c->partial = NULL; 2686 2687 if (partial_slab) 2688 __unfreeze_partials(s, partial_slab); 2689 } 2690 2691 /* 2692 * Put a slab that was just frozen (in __slab_free|get_partial_node) into a 2693 * partial slab slot if available. 2694 * 2695 * If we did not find a slot then simply move all the partials to the 2696 * per node partial list. 2697 */ 2698 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) 2699 { 2700 struct slab *oldslab; 2701 struct slab *slab_to_unfreeze = NULL; 2702 unsigned long flags; 2703 int slabs = 0; 2704 2705 local_lock_irqsave(&s->cpu_slab->lock, flags); 2706 2707 oldslab = this_cpu_read(s->cpu_slab->partial); 2708 2709 if (oldslab) { 2710 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { 2711 /* 2712 * Partial array is full. Move the existing set to the 2713 * per node partial list. Postpone the actual unfreezing 2714 * outside of the critical section. 2715 */ 2716 slab_to_unfreeze = oldslab; 2717 oldslab = NULL; 2718 } else { 2719 slabs = oldslab->slabs; 2720 } 2721 } 2722 2723 slabs++; 2724 2725 slab->slabs = slabs; 2726 slab->next = oldslab; 2727 2728 this_cpu_write(s->cpu_slab->partial, slab); 2729 2730 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2731 2732 if (slab_to_unfreeze) { 2733 __unfreeze_partials(s, slab_to_unfreeze); 2734 stat(s, CPU_PARTIAL_DRAIN); 2735 } 2736 } 2737 2738 #else /* CONFIG_SLUB_CPU_PARTIAL */ 2739 2740 static inline void unfreeze_partials(struct kmem_cache *s) { } 2741 static inline void unfreeze_partials_cpu(struct kmem_cache *s, 2742 struct kmem_cache_cpu *c) { } 2743 2744 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2745 2746 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2747 { 2748 unsigned long flags; 2749 struct slab *slab; 2750 void *freelist; 2751 2752 local_lock_irqsave(&s->cpu_slab->lock, flags); 2753 2754 slab = c->slab; 2755 freelist = c->freelist; 2756 2757 c->slab = NULL; 2758 c->freelist = NULL; 2759 c->tid = next_tid(c->tid); 2760 2761 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2762 2763 if (slab) { 2764 deactivate_slab(s, slab, freelist); 2765 stat(s, CPUSLAB_FLUSH); 2766 } 2767 } 2768 2769 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 2770 { 2771 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2772 void *freelist = c->freelist; 2773 struct slab *slab = c->slab; 2774 2775 c->slab = NULL; 2776 c->freelist = NULL; 2777 c->tid = next_tid(c->tid); 2778 2779 if (slab) { 2780 deactivate_slab(s, slab, freelist); 2781 stat(s, CPUSLAB_FLUSH); 2782 } 2783 2784 unfreeze_partials_cpu(s, c); 2785 } 2786 2787 struct slub_flush_work { 2788 struct work_struct work; 2789 struct kmem_cache *s; 2790 bool skip; 2791 }; 2792 2793 /* 2794 * Flush cpu slab. 2795 * 2796 * Called from CPU work handler with migration disabled. 2797 */ 2798 static void flush_cpu_slab(struct work_struct *w) 2799 { 2800 struct kmem_cache *s; 2801 struct kmem_cache_cpu *c; 2802 struct slub_flush_work *sfw; 2803 2804 sfw = container_of(w, struct slub_flush_work, work); 2805 2806 s = sfw->s; 2807 c = this_cpu_ptr(s->cpu_slab); 2808 2809 if (c->slab) 2810 flush_slab(s, c); 2811 2812 unfreeze_partials(s); 2813 } 2814 2815 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 2816 { 2817 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2818 2819 return c->slab || slub_percpu_partial(c); 2820 } 2821 2822 static DEFINE_MUTEX(flush_lock); 2823 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 2824 2825 static void flush_all_cpus_locked(struct kmem_cache *s) 2826 { 2827 struct slub_flush_work *sfw; 2828 unsigned int cpu; 2829 2830 lockdep_assert_cpus_held(); 2831 mutex_lock(&flush_lock); 2832 2833 for_each_online_cpu(cpu) { 2834 sfw = &per_cpu(slub_flush, cpu); 2835 if (!has_cpu_slab(cpu, s)) { 2836 sfw->skip = true; 2837 continue; 2838 } 2839 INIT_WORK(&sfw->work, flush_cpu_slab); 2840 sfw->skip = false; 2841 sfw->s = s; 2842 queue_work_on(cpu, flushwq, &sfw->work); 2843 } 2844 2845 for_each_online_cpu(cpu) { 2846 sfw = &per_cpu(slub_flush, cpu); 2847 if (sfw->skip) 2848 continue; 2849 flush_work(&sfw->work); 2850 } 2851 2852 mutex_unlock(&flush_lock); 2853 } 2854 2855 static void flush_all(struct kmem_cache *s) 2856 { 2857 cpus_read_lock(); 2858 flush_all_cpus_locked(s); 2859 cpus_read_unlock(); 2860 } 2861 2862 /* 2863 * Use the cpu notifier to insure that the cpu slabs are flushed when 2864 * necessary. 2865 */ 2866 static int slub_cpu_dead(unsigned int cpu) 2867 { 2868 struct kmem_cache *s; 2869 2870 mutex_lock(&slab_mutex); 2871 list_for_each_entry(s, &slab_caches, list) 2872 __flush_cpu_slab(s, cpu); 2873 mutex_unlock(&slab_mutex); 2874 return 0; 2875 } 2876 2877 #else /* CONFIG_SLUB_TINY */ 2878 static inline void flush_all_cpus_locked(struct kmem_cache *s) { } 2879 static inline void flush_all(struct kmem_cache *s) { } 2880 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { } 2881 static inline int slub_cpu_dead(unsigned int cpu) { return 0; } 2882 #endif /* CONFIG_SLUB_TINY */ 2883 2884 /* 2885 * Check if the objects in a per cpu structure fit numa 2886 * locality expectations. 2887 */ 2888 static inline int node_match(struct slab *slab, int node) 2889 { 2890 #ifdef CONFIG_NUMA 2891 if (node != NUMA_NO_NODE && slab_nid(slab) != node) 2892 return 0; 2893 #endif 2894 return 1; 2895 } 2896 2897 #ifdef CONFIG_SLUB_DEBUG 2898 static int count_free(struct slab *slab) 2899 { 2900 return slab->objects - slab->inuse; 2901 } 2902 2903 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 2904 { 2905 return atomic_long_read(&n->total_objects); 2906 } 2907 2908 /* Supports checking bulk free of a constructed freelist */ 2909 static inline bool free_debug_processing(struct kmem_cache *s, 2910 struct slab *slab, void *head, void *tail, int *bulk_cnt, 2911 unsigned long addr, depot_stack_handle_t handle) 2912 { 2913 bool checks_ok = false; 2914 void *object = head; 2915 int cnt = 0; 2916 2917 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 2918 if (!check_slab(s, slab)) 2919 goto out; 2920 } 2921 2922 if (slab->inuse < *bulk_cnt) { 2923 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", 2924 slab->inuse, *bulk_cnt); 2925 goto out; 2926 } 2927 2928 next_object: 2929 2930 if (++cnt > *bulk_cnt) 2931 goto out_cnt; 2932 2933 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 2934 if (!free_consistency_checks(s, slab, object, addr)) 2935 goto out; 2936 } 2937 2938 if (s->flags & SLAB_STORE_USER) 2939 set_track_update(s, object, TRACK_FREE, addr, handle); 2940 trace(s, slab, object, 0); 2941 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 2942 init_object(s, object, SLUB_RED_INACTIVE); 2943 2944 /* Reached end of constructed freelist yet? */ 2945 if (object != tail) { 2946 object = get_freepointer(s, object); 2947 goto next_object; 2948 } 2949 checks_ok = true; 2950 2951 out_cnt: 2952 if (cnt != *bulk_cnt) { 2953 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", 2954 *bulk_cnt, cnt); 2955 *bulk_cnt = cnt; 2956 } 2957 2958 out: 2959 2960 if (!checks_ok) 2961 slab_fix(s, "Object at 0x%p not freed", object); 2962 2963 return checks_ok; 2964 } 2965 #endif /* CONFIG_SLUB_DEBUG */ 2966 2967 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS) 2968 static unsigned long count_partial(struct kmem_cache_node *n, 2969 int (*get_count)(struct slab *)) 2970 { 2971 unsigned long flags; 2972 unsigned long x = 0; 2973 struct slab *slab; 2974 2975 spin_lock_irqsave(&n->list_lock, flags); 2976 list_for_each_entry(slab, &n->partial, slab_list) 2977 x += get_count(slab); 2978 spin_unlock_irqrestore(&n->list_lock, flags); 2979 return x; 2980 } 2981 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */ 2982 2983 #ifdef CONFIG_SLUB_DEBUG 2984 static noinline void 2985 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2986 { 2987 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 2988 DEFAULT_RATELIMIT_BURST); 2989 int node; 2990 struct kmem_cache_node *n; 2991 2992 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 2993 return; 2994 2995 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 2996 nid, gfpflags, &gfpflags); 2997 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 2998 s->name, s->object_size, s->size, oo_order(s->oo), 2999 oo_order(s->min)); 3000 3001 if (oo_order(s->min) > get_order(s->object_size)) 3002 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", 3003 s->name); 3004 3005 for_each_kmem_cache_node(s, node, n) { 3006 unsigned long nr_slabs; 3007 unsigned long nr_objs; 3008 unsigned long nr_free; 3009 3010 nr_free = count_partial(n, count_free); 3011 nr_slabs = node_nr_slabs(n); 3012 nr_objs = node_nr_objs(n); 3013 3014 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 3015 node, nr_slabs, nr_objs, nr_free); 3016 } 3017 } 3018 #else /* CONFIG_SLUB_DEBUG */ 3019 static inline void 3020 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } 3021 #endif 3022 3023 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) 3024 { 3025 if (unlikely(slab_test_pfmemalloc(slab))) 3026 return gfp_pfmemalloc_allowed(gfpflags); 3027 3028 return true; 3029 } 3030 3031 #ifndef CONFIG_SLUB_TINY 3032 static inline bool 3033 __update_cpu_freelist_fast(struct kmem_cache *s, 3034 void *freelist_old, void *freelist_new, 3035 unsigned long tid) 3036 { 3037 freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; 3038 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; 3039 3040 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, 3041 &old.full, new.full); 3042 } 3043 3044 /* 3045 * Check the slab->freelist and either transfer the freelist to the 3046 * per cpu freelist or deactivate the slab. 3047 * 3048 * The slab is still frozen if the return value is not NULL. 3049 * 3050 * If this function returns NULL then the slab has been unfrozen. 3051 */ 3052 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) 3053 { 3054 struct slab new; 3055 unsigned long counters; 3056 void *freelist; 3057 3058 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3059 3060 do { 3061 freelist = slab->freelist; 3062 counters = slab->counters; 3063 3064 new.counters = counters; 3065 VM_BUG_ON(!new.frozen); 3066 3067 new.inuse = slab->objects; 3068 new.frozen = freelist != NULL; 3069 3070 } while (!__slab_update_freelist(s, slab, 3071 freelist, counters, 3072 NULL, new.counters, 3073 "get_freelist")); 3074 3075 return freelist; 3076 } 3077 3078 /* 3079 * Slow path. The lockless freelist is empty or we need to perform 3080 * debugging duties. 3081 * 3082 * Processing is still very fast if new objects have been freed to the 3083 * regular freelist. In that case we simply take over the regular freelist 3084 * as the lockless freelist and zap the regular freelist. 3085 * 3086 * If that is not working then we fall back to the partial lists. We take the 3087 * first element of the freelist as the object to allocate now and move the 3088 * rest of the freelist to the lockless freelist. 3089 * 3090 * And if we were unable to get a new slab from the partial slab lists then 3091 * we need to allocate a new slab. This is the slowest path since it involves 3092 * a call to the page allocator and the setup of a new slab. 3093 * 3094 * Version of __slab_alloc to use when we know that preemption is 3095 * already disabled (which is the case for bulk allocation). 3096 */ 3097 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3098 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3099 { 3100 void *freelist; 3101 struct slab *slab; 3102 unsigned long flags; 3103 struct partial_context pc; 3104 3105 stat(s, ALLOC_SLOWPATH); 3106 3107 reread_slab: 3108 3109 slab = READ_ONCE(c->slab); 3110 if (!slab) { 3111 /* 3112 * if the node is not online or has no normal memory, just 3113 * ignore the node constraint 3114 */ 3115 if (unlikely(node != NUMA_NO_NODE && 3116 !node_isset(node, slab_nodes))) 3117 node = NUMA_NO_NODE; 3118 goto new_slab; 3119 } 3120 redo: 3121 3122 if (unlikely(!node_match(slab, node))) { 3123 /* 3124 * same as above but node_match() being false already 3125 * implies node != NUMA_NO_NODE 3126 */ 3127 if (!node_isset(node, slab_nodes)) { 3128 node = NUMA_NO_NODE; 3129 } else { 3130 stat(s, ALLOC_NODE_MISMATCH); 3131 goto deactivate_slab; 3132 } 3133 } 3134 3135 /* 3136 * By rights, we should be searching for a slab page that was 3137 * PFMEMALLOC but right now, we are losing the pfmemalloc 3138 * information when the page leaves the per-cpu allocator 3139 */ 3140 if (unlikely(!pfmemalloc_match(slab, gfpflags))) 3141 goto deactivate_slab; 3142 3143 /* must check again c->slab in case we got preempted and it changed */ 3144 local_lock_irqsave(&s->cpu_slab->lock, flags); 3145 if (unlikely(slab != c->slab)) { 3146 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3147 goto reread_slab; 3148 } 3149 freelist = c->freelist; 3150 if (freelist) 3151 goto load_freelist; 3152 3153 freelist = get_freelist(s, slab); 3154 3155 if (!freelist) { 3156 c->slab = NULL; 3157 c->tid = next_tid(c->tid); 3158 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3159 stat(s, DEACTIVATE_BYPASS); 3160 goto new_slab; 3161 } 3162 3163 stat(s, ALLOC_REFILL); 3164 3165 load_freelist: 3166 3167 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3168 3169 /* 3170 * freelist is pointing to the list of objects to be used. 3171 * slab is pointing to the slab from which the objects are obtained. 3172 * That slab must be frozen for per cpu allocations to work. 3173 */ 3174 VM_BUG_ON(!c->slab->frozen); 3175 c->freelist = get_freepointer(s, freelist); 3176 c->tid = next_tid(c->tid); 3177 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3178 return freelist; 3179 3180 deactivate_slab: 3181 3182 local_lock_irqsave(&s->cpu_slab->lock, flags); 3183 if (slab != c->slab) { 3184 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3185 goto reread_slab; 3186 } 3187 freelist = c->freelist; 3188 c->slab = NULL; 3189 c->freelist = NULL; 3190 c->tid = next_tid(c->tid); 3191 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3192 deactivate_slab(s, slab, freelist); 3193 3194 new_slab: 3195 3196 if (slub_percpu_partial(c)) { 3197 local_lock_irqsave(&s->cpu_slab->lock, flags); 3198 if (unlikely(c->slab)) { 3199 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3200 goto reread_slab; 3201 } 3202 if (unlikely(!slub_percpu_partial(c))) { 3203 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3204 /* we were preempted and partial list got empty */ 3205 goto new_objects; 3206 } 3207 3208 slab = c->slab = slub_percpu_partial(c); 3209 slub_set_percpu_partial(c, slab); 3210 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3211 stat(s, CPU_PARTIAL_ALLOC); 3212 goto redo; 3213 } 3214 3215 new_objects: 3216 3217 pc.flags = gfpflags; 3218 pc.slab = &slab; 3219 pc.orig_size = orig_size; 3220 freelist = get_partial(s, node, &pc); 3221 if (freelist) 3222 goto check_new_slab; 3223 3224 slub_put_cpu_ptr(s->cpu_slab); 3225 slab = new_slab(s, gfpflags, node); 3226 c = slub_get_cpu_ptr(s->cpu_slab); 3227 3228 if (unlikely(!slab)) { 3229 slab_out_of_memory(s, gfpflags, node); 3230 return NULL; 3231 } 3232 3233 stat(s, ALLOC_SLAB); 3234 3235 if (kmem_cache_debug(s)) { 3236 freelist = alloc_single_from_new_slab(s, slab, orig_size); 3237 3238 if (unlikely(!freelist)) 3239 goto new_objects; 3240 3241 if (s->flags & SLAB_STORE_USER) 3242 set_track(s, freelist, TRACK_ALLOC, addr); 3243 3244 return freelist; 3245 } 3246 3247 /* 3248 * No other reference to the slab yet so we can 3249 * muck around with it freely without cmpxchg 3250 */ 3251 freelist = slab->freelist; 3252 slab->freelist = NULL; 3253 slab->inuse = slab->objects; 3254 slab->frozen = 1; 3255 3256 inc_slabs_node(s, slab_nid(slab), slab->objects); 3257 3258 check_new_slab: 3259 3260 if (kmem_cache_debug(s)) { 3261 /* 3262 * For debug caches here we had to go through 3263 * alloc_single_from_partial() so just store the tracking info 3264 * and return the object 3265 */ 3266 if (s->flags & SLAB_STORE_USER) 3267 set_track(s, freelist, TRACK_ALLOC, addr); 3268 3269 return freelist; 3270 } 3271 3272 if (unlikely(!pfmemalloc_match(slab, gfpflags))) { 3273 /* 3274 * For !pfmemalloc_match() case we don't load freelist so that 3275 * we don't make further mismatched allocations easier. 3276 */ 3277 deactivate_slab(s, slab, get_freepointer(s, freelist)); 3278 return freelist; 3279 } 3280 3281 retry_load_slab: 3282 3283 local_lock_irqsave(&s->cpu_slab->lock, flags); 3284 if (unlikely(c->slab)) { 3285 void *flush_freelist = c->freelist; 3286 struct slab *flush_slab = c->slab; 3287 3288 c->slab = NULL; 3289 c->freelist = NULL; 3290 c->tid = next_tid(c->tid); 3291 3292 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3293 3294 deactivate_slab(s, flush_slab, flush_freelist); 3295 3296 stat(s, CPUSLAB_FLUSH); 3297 3298 goto retry_load_slab; 3299 } 3300 c->slab = slab; 3301 3302 goto load_freelist; 3303 } 3304 3305 /* 3306 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 3307 * disabled. Compensates for possible cpu changes by refetching the per cpu area 3308 * pointer. 3309 */ 3310 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3311 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3312 { 3313 void *p; 3314 3315 #ifdef CONFIG_PREEMPT_COUNT 3316 /* 3317 * We may have been preempted and rescheduled on a different 3318 * cpu before disabling preemption. Need to reload cpu area 3319 * pointer. 3320 */ 3321 c = slub_get_cpu_ptr(s->cpu_slab); 3322 #endif 3323 3324 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); 3325 #ifdef CONFIG_PREEMPT_COUNT 3326 slub_put_cpu_ptr(s->cpu_slab); 3327 #endif 3328 return p; 3329 } 3330 3331 static __always_inline void *__slab_alloc_node(struct kmem_cache *s, 3332 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3333 { 3334 struct kmem_cache_cpu *c; 3335 struct slab *slab; 3336 unsigned long tid; 3337 void *object; 3338 3339 redo: 3340 /* 3341 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 3342 * enabled. We may switch back and forth between cpus while 3343 * reading from one cpu area. That does not matter as long 3344 * as we end up on the original cpu again when doing the cmpxchg. 3345 * 3346 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 3347 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 3348 * the tid. If we are preempted and switched to another cpu between the 3349 * two reads, it's OK as the two are still associated with the same cpu 3350 * and cmpxchg later will validate the cpu. 3351 */ 3352 c = raw_cpu_ptr(s->cpu_slab); 3353 tid = READ_ONCE(c->tid); 3354 3355 /* 3356 * Irqless object alloc/free algorithm used here depends on sequence 3357 * of fetching cpu_slab's data. tid should be fetched before anything 3358 * on c to guarantee that object and slab associated with previous tid 3359 * won't be used with current tid. If we fetch tid first, object and 3360 * slab could be one associated with next tid and our alloc/free 3361 * request will be failed. In this case, we will retry. So, no problem. 3362 */ 3363 barrier(); 3364 3365 /* 3366 * The transaction ids are globally unique per cpu and per operation on 3367 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 3368 * occurs on the right processor and that there was no operation on the 3369 * linked list in between. 3370 */ 3371 3372 object = c->freelist; 3373 slab = c->slab; 3374 3375 if (!USE_LOCKLESS_FAST_PATH() || 3376 unlikely(!object || !slab || !node_match(slab, node))) { 3377 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); 3378 } else { 3379 void *next_object = get_freepointer_safe(s, object); 3380 3381 /* 3382 * The cmpxchg will only match if there was no additional 3383 * operation and if we are on the right processor. 3384 * 3385 * The cmpxchg does the following atomically (without lock 3386 * semantics!) 3387 * 1. Relocate first pointer to the current per cpu area. 3388 * 2. Verify that tid and freelist have not been changed 3389 * 3. If they were not changed replace tid and freelist 3390 * 3391 * Since this is without lock semantics the protection is only 3392 * against code executing on this cpu *not* from access by 3393 * other cpus. 3394 */ 3395 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { 3396 note_cmpxchg_failure("slab_alloc", s, tid); 3397 goto redo; 3398 } 3399 prefetch_freepointer(s, next_object); 3400 stat(s, ALLOC_FASTPATH); 3401 } 3402 3403 return object; 3404 } 3405 #else /* CONFIG_SLUB_TINY */ 3406 static void *__slab_alloc_node(struct kmem_cache *s, 3407 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3408 { 3409 struct partial_context pc; 3410 struct slab *slab; 3411 void *object; 3412 3413 pc.flags = gfpflags; 3414 pc.slab = &slab; 3415 pc.orig_size = orig_size; 3416 object = get_partial(s, node, &pc); 3417 3418 if (object) 3419 return object; 3420 3421 slab = new_slab(s, gfpflags, node); 3422 if (unlikely(!slab)) { 3423 slab_out_of_memory(s, gfpflags, node); 3424 return NULL; 3425 } 3426 3427 object = alloc_single_from_new_slab(s, slab, orig_size); 3428 3429 return object; 3430 } 3431 #endif /* CONFIG_SLUB_TINY */ 3432 3433 /* 3434 * If the object has been wiped upon free, make sure it's fully initialized by 3435 * zeroing out freelist pointer. 3436 */ 3437 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 3438 void *obj) 3439 { 3440 if (unlikely(slab_want_init_on_free(s)) && obj) 3441 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 3442 0, sizeof(void *)); 3443 } 3444 3445 /* 3446 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 3447 * have the fastpath folded into their functions. So no function call 3448 * overhead for requests that can be satisfied on the fastpath. 3449 * 3450 * The fastpath works by first checking if the lockless freelist can be used. 3451 * If not then __slab_alloc is called for slow processing. 3452 * 3453 * Otherwise we can simply pick the next object from the lockless free list. 3454 */ 3455 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, 3456 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3457 { 3458 void *object; 3459 struct obj_cgroup *objcg = NULL; 3460 bool init = false; 3461 3462 s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags); 3463 if (!s) 3464 return NULL; 3465 3466 object = kfence_alloc(s, orig_size, gfpflags); 3467 if (unlikely(object)) 3468 goto out; 3469 3470 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); 3471 3472 maybe_wipe_obj_freeptr(s, object); 3473 init = slab_want_init_on_alloc(gfpflags, s); 3474 3475 out: 3476 /* 3477 * When init equals 'true', like for kzalloc() family, only 3478 * @orig_size bytes might be zeroed instead of s->object_size 3479 */ 3480 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size); 3481 3482 return object; 3483 } 3484 3485 static __fastpath_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru, 3486 gfp_t gfpflags, unsigned long addr, size_t orig_size) 3487 { 3488 return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size); 3489 } 3490 3491 static __fastpath_inline 3492 void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, 3493 gfp_t gfpflags) 3494 { 3495 void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size); 3496 3497 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 3498 3499 return ret; 3500 } 3501 3502 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 3503 { 3504 return __kmem_cache_alloc_lru(s, NULL, gfpflags); 3505 } 3506 EXPORT_SYMBOL(kmem_cache_alloc); 3507 3508 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, 3509 gfp_t gfpflags) 3510 { 3511 return __kmem_cache_alloc_lru(s, lru, gfpflags); 3512 } 3513 EXPORT_SYMBOL(kmem_cache_alloc_lru); 3514 3515 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, 3516 int node, size_t orig_size, 3517 unsigned long caller) 3518 { 3519 return slab_alloc_node(s, NULL, gfpflags, node, 3520 caller, orig_size); 3521 } 3522 3523 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 3524 { 3525 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); 3526 3527 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); 3528 3529 return ret; 3530 } 3531 EXPORT_SYMBOL(kmem_cache_alloc_node); 3532 3533 static noinline void free_to_partial_list( 3534 struct kmem_cache *s, struct slab *slab, 3535 void *head, void *tail, int bulk_cnt, 3536 unsigned long addr) 3537 { 3538 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 3539 struct slab *slab_free = NULL; 3540 int cnt = bulk_cnt; 3541 unsigned long flags; 3542 depot_stack_handle_t handle = 0; 3543 3544 if (s->flags & SLAB_STORE_USER) 3545 handle = set_track_prepare(); 3546 3547 spin_lock_irqsave(&n->list_lock, flags); 3548 3549 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { 3550 void *prior = slab->freelist; 3551 3552 /* Perform the actual freeing while we still hold the locks */ 3553 slab->inuse -= cnt; 3554 set_freepointer(s, tail, prior); 3555 slab->freelist = head; 3556 3557 /* 3558 * If the slab is empty, and node's partial list is full, 3559 * it should be discarded anyway no matter it's on full or 3560 * partial list. 3561 */ 3562 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) 3563 slab_free = slab; 3564 3565 if (!prior) { 3566 /* was on full list */ 3567 remove_full(s, n, slab); 3568 if (!slab_free) { 3569 add_partial(n, slab, DEACTIVATE_TO_TAIL); 3570 stat(s, FREE_ADD_PARTIAL); 3571 } 3572 } else if (slab_free) { 3573 remove_partial(n, slab); 3574 stat(s, FREE_REMOVE_PARTIAL); 3575 } 3576 } 3577 3578 if (slab_free) { 3579 /* 3580 * Update the counters while still holding n->list_lock to 3581 * prevent spurious validation warnings 3582 */ 3583 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); 3584 } 3585 3586 spin_unlock_irqrestore(&n->list_lock, flags); 3587 3588 if (slab_free) { 3589 stat(s, FREE_SLAB); 3590 free_slab(s, slab_free); 3591 } 3592 } 3593 3594 /* 3595 * Slow path handling. This may still be called frequently since objects 3596 * have a longer lifetime than the cpu slabs in most processing loads. 3597 * 3598 * So we still attempt to reduce cache line usage. Just take the slab 3599 * lock and free the item. If there is no additional partial slab 3600 * handling required then we can return immediately. 3601 */ 3602 static void __slab_free(struct kmem_cache *s, struct slab *slab, 3603 void *head, void *tail, int cnt, 3604 unsigned long addr) 3605 3606 { 3607 void *prior; 3608 int was_frozen; 3609 struct slab new; 3610 unsigned long counters; 3611 struct kmem_cache_node *n = NULL; 3612 unsigned long flags; 3613 3614 stat(s, FREE_SLOWPATH); 3615 3616 if (kfence_free(head)) 3617 return; 3618 3619 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 3620 free_to_partial_list(s, slab, head, tail, cnt, addr); 3621 return; 3622 } 3623 3624 do { 3625 if (unlikely(n)) { 3626 spin_unlock_irqrestore(&n->list_lock, flags); 3627 n = NULL; 3628 } 3629 prior = slab->freelist; 3630 counters = slab->counters; 3631 set_freepointer(s, tail, prior); 3632 new.counters = counters; 3633 was_frozen = new.frozen; 3634 new.inuse -= cnt; 3635 if ((!new.inuse || !prior) && !was_frozen) { 3636 3637 if (kmem_cache_has_cpu_partial(s) && !prior) { 3638 3639 /* 3640 * Slab was on no list before and will be 3641 * partially empty 3642 * We can defer the list move and instead 3643 * freeze it. 3644 */ 3645 new.frozen = 1; 3646 3647 } else { /* Needs to be taken off a list */ 3648 3649 n = get_node(s, slab_nid(slab)); 3650 /* 3651 * Speculatively acquire the list_lock. 3652 * If the cmpxchg does not succeed then we may 3653 * drop the list_lock without any processing. 3654 * 3655 * Otherwise the list_lock will synchronize with 3656 * other processors updating the list of slabs. 3657 */ 3658 spin_lock_irqsave(&n->list_lock, flags); 3659 3660 } 3661 } 3662 3663 } while (!slab_update_freelist(s, slab, 3664 prior, counters, 3665 head, new.counters, 3666 "__slab_free")); 3667 3668 if (likely(!n)) { 3669 3670 if (likely(was_frozen)) { 3671 /* 3672 * The list lock was not taken therefore no list 3673 * activity can be necessary. 3674 */ 3675 stat(s, FREE_FROZEN); 3676 } else if (new.frozen) { 3677 /* 3678 * If we just froze the slab then put it onto the 3679 * per cpu partial list. 3680 */ 3681 put_cpu_partial(s, slab, 1); 3682 stat(s, CPU_PARTIAL_FREE); 3683 } 3684 3685 return; 3686 } 3687 3688 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 3689 goto slab_empty; 3690 3691 /* 3692 * Objects left in the slab. If it was not on the partial list before 3693 * then add it. 3694 */ 3695 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 3696 remove_full(s, n, slab); 3697 add_partial(n, slab, DEACTIVATE_TO_TAIL); 3698 stat(s, FREE_ADD_PARTIAL); 3699 } 3700 spin_unlock_irqrestore(&n->list_lock, flags); 3701 return; 3702 3703 slab_empty: 3704 if (prior) { 3705 /* 3706 * Slab on the partial list. 3707 */ 3708 remove_partial(n, slab); 3709 stat(s, FREE_REMOVE_PARTIAL); 3710 } else { 3711 /* Slab must be on the full list */ 3712 remove_full(s, n, slab); 3713 } 3714 3715 spin_unlock_irqrestore(&n->list_lock, flags); 3716 stat(s, FREE_SLAB); 3717 discard_slab(s, slab); 3718 } 3719 3720 #ifndef CONFIG_SLUB_TINY 3721 /* 3722 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 3723 * can perform fastpath freeing without additional function calls. 3724 * 3725 * The fastpath is only possible if we are freeing to the current cpu slab 3726 * of this processor. This typically the case if we have just allocated 3727 * the item before. 3728 * 3729 * If fastpath is not possible then fall back to __slab_free where we deal 3730 * with all sorts of special processing. 3731 * 3732 * Bulk free of a freelist with several objects (all pointing to the 3733 * same slab) possible by specifying head and tail ptr, plus objects 3734 * count (cnt). Bulk free indicated by tail pointer being set. 3735 */ 3736 static __always_inline void do_slab_free(struct kmem_cache *s, 3737 struct slab *slab, void *head, void *tail, 3738 int cnt, unsigned long addr) 3739 { 3740 void *tail_obj = tail ? : head; 3741 struct kmem_cache_cpu *c; 3742 unsigned long tid; 3743 void **freelist; 3744 3745 redo: 3746 /* 3747 * Determine the currently cpus per cpu slab. 3748 * The cpu may change afterward. However that does not matter since 3749 * data is retrieved via this pointer. If we are on the same cpu 3750 * during the cmpxchg then the free will succeed. 3751 */ 3752 c = raw_cpu_ptr(s->cpu_slab); 3753 tid = READ_ONCE(c->tid); 3754 3755 /* Same with comment on barrier() in slab_alloc_node() */ 3756 barrier(); 3757 3758 if (unlikely(slab != c->slab)) { 3759 __slab_free(s, slab, head, tail_obj, cnt, addr); 3760 return; 3761 } 3762 3763 if (USE_LOCKLESS_FAST_PATH()) { 3764 freelist = READ_ONCE(c->freelist); 3765 3766 set_freepointer(s, tail_obj, freelist); 3767 3768 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { 3769 note_cmpxchg_failure("slab_free", s, tid); 3770 goto redo; 3771 } 3772 } else { 3773 /* Update the free list under the local lock */ 3774 local_lock(&s->cpu_slab->lock); 3775 c = this_cpu_ptr(s->cpu_slab); 3776 if (unlikely(slab != c->slab)) { 3777 local_unlock(&s->cpu_slab->lock); 3778 goto redo; 3779 } 3780 tid = c->tid; 3781 freelist = c->freelist; 3782 3783 set_freepointer(s, tail_obj, freelist); 3784 c->freelist = head; 3785 c->tid = next_tid(tid); 3786 3787 local_unlock(&s->cpu_slab->lock); 3788 } 3789 stat(s, FREE_FASTPATH); 3790 } 3791 #else /* CONFIG_SLUB_TINY */ 3792 static void do_slab_free(struct kmem_cache *s, 3793 struct slab *slab, void *head, void *tail, 3794 int cnt, unsigned long addr) 3795 { 3796 void *tail_obj = tail ? : head; 3797 3798 __slab_free(s, slab, head, tail_obj, cnt, addr); 3799 } 3800 #endif /* CONFIG_SLUB_TINY */ 3801 3802 static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab, 3803 void *head, void *tail, void **p, int cnt, 3804 unsigned long addr) 3805 { 3806 memcg_slab_free_hook(s, slab, p, cnt); 3807 /* 3808 * With KASAN enabled slab_free_freelist_hook modifies the freelist 3809 * to remove objects, whose reuse must be delayed. 3810 */ 3811 if (slab_free_freelist_hook(s, &head, &tail, &cnt)) 3812 do_slab_free(s, slab, head, tail, cnt, addr); 3813 } 3814 3815 #ifdef CONFIG_KASAN_GENERIC 3816 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 3817 { 3818 do_slab_free(cache, virt_to_slab(x), x, NULL, 1, addr); 3819 } 3820 #endif 3821 3822 void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller) 3823 { 3824 slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller); 3825 } 3826 3827 void kmem_cache_free(struct kmem_cache *s, void *x) 3828 { 3829 s = cache_from_obj(s, x); 3830 if (!s) 3831 return; 3832 trace_kmem_cache_free(_RET_IP_, x, s); 3833 slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_); 3834 } 3835 EXPORT_SYMBOL(kmem_cache_free); 3836 3837 struct detached_freelist { 3838 struct slab *slab; 3839 void *tail; 3840 void *freelist; 3841 int cnt; 3842 struct kmem_cache *s; 3843 }; 3844 3845 /* 3846 * This function progressively scans the array with free objects (with 3847 * a limited look ahead) and extract objects belonging to the same 3848 * slab. It builds a detached freelist directly within the given 3849 * slab/objects. This can happen without any need for 3850 * synchronization, because the objects are owned by running process. 3851 * The freelist is build up as a single linked list in the objects. 3852 * The idea is, that this detached freelist can then be bulk 3853 * transferred to the real freelist(s), but only requiring a single 3854 * synchronization primitive. Look ahead in the array is limited due 3855 * to performance reasons. 3856 */ 3857 static inline 3858 int build_detached_freelist(struct kmem_cache *s, size_t size, 3859 void **p, struct detached_freelist *df) 3860 { 3861 int lookahead = 3; 3862 void *object; 3863 struct folio *folio; 3864 size_t same; 3865 3866 object = p[--size]; 3867 folio = virt_to_folio(object); 3868 if (!s) { 3869 /* Handle kalloc'ed objects */ 3870 if (unlikely(!folio_test_slab(folio))) { 3871 free_large_kmalloc(folio, object); 3872 df->slab = NULL; 3873 return size; 3874 } 3875 /* Derive kmem_cache from object */ 3876 df->slab = folio_slab(folio); 3877 df->s = df->slab->slab_cache; 3878 } else { 3879 df->slab = folio_slab(folio); 3880 df->s = cache_from_obj(s, object); /* Support for memcg */ 3881 } 3882 3883 /* Start new detached freelist */ 3884 df->tail = object; 3885 df->freelist = object; 3886 df->cnt = 1; 3887 3888 if (is_kfence_address(object)) 3889 return size; 3890 3891 set_freepointer(df->s, object, NULL); 3892 3893 same = size; 3894 while (size) { 3895 object = p[--size]; 3896 /* df->slab is always set at this point */ 3897 if (df->slab == virt_to_slab(object)) { 3898 /* Opportunity build freelist */ 3899 set_freepointer(df->s, object, df->freelist); 3900 df->freelist = object; 3901 df->cnt++; 3902 same--; 3903 if (size != same) 3904 swap(p[size], p[same]); 3905 continue; 3906 } 3907 3908 /* Limit look ahead search */ 3909 if (!--lookahead) 3910 break; 3911 } 3912 3913 return same; 3914 } 3915 3916 /* Note that interrupts must be enabled when calling this function. */ 3917 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 3918 { 3919 if (!size) 3920 return; 3921 3922 do { 3923 struct detached_freelist df; 3924 3925 size = build_detached_freelist(s, size, p, &df); 3926 if (!df.slab) 3927 continue; 3928 3929 slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt, 3930 _RET_IP_); 3931 } while (likely(size)); 3932 } 3933 EXPORT_SYMBOL(kmem_cache_free_bulk); 3934 3935 #ifndef CONFIG_SLUB_TINY 3936 static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 3937 size_t size, void **p, struct obj_cgroup *objcg) 3938 { 3939 struct kmem_cache_cpu *c; 3940 unsigned long irqflags; 3941 int i; 3942 3943 /* 3944 * Drain objects in the per cpu slab, while disabling local 3945 * IRQs, which protects against PREEMPT and interrupts 3946 * handlers invoking normal fastpath. 3947 */ 3948 c = slub_get_cpu_ptr(s->cpu_slab); 3949 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 3950 3951 for (i = 0; i < size; i++) { 3952 void *object = kfence_alloc(s, s->object_size, flags); 3953 3954 if (unlikely(object)) { 3955 p[i] = object; 3956 continue; 3957 } 3958 3959 object = c->freelist; 3960 if (unlikely(!object)) { 3961 /* 3962 * We may have removed an object from c->freelist using 3963 * the fastpath in the previous iteration; in that case, 3964 * c->tid has not been bumped yet. 3965 * Since ___slab_alloc() may reenable interrupts while 3966 * allocating memory, we should bump c->tid now. 3967 */ 3968 c->tid = next_tid(c->tid); 3969 3970 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 3971 3972 /* 3973 * Invoking slow path likely have side-effect 3974 * of re-populating per CPU c->freelist 3975 */ 3976 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 3977 _RET_IP_, c, s->object_size); 3978 if (unlikely(!p[i])) 3979 goto error; 3980 3981 c = this_cpu_ptr(s->cpu_slab); 3982 maybe_wipe_obj_freeptr(s, p[i]); 3983 3984 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 3985 3986 continue; /* goto for-loop */ 3987 } 3988 c->freelist = get_freepointer(s, object); 3989 p[i] = object; 3990 maybe_wipe_obj_freeptr(s, p[i]); 3991 } 3992 c->tid = next_tid(c->tid); 3993 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 3994 slub_put_cpu_ptr(s->cpu_slab); 3995 3996 return i; 3997 3998 error: 3999 slub_put_cpu_ptr(s->cpu_slab); 4000 slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size); 4001 kmem_cache_free_bulk(s, i, p); 4002 return 0; 4003 4004 } 4005 #else /* CONFIG_SLUB_TINY */ 4006 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 4007 size_t size, void **p, struct obj_cgroup *objcg) 4008 { 4009 int i; 4010 4011 for (i = 0; i < size; i++) { 4012 void *object = kfence_alloc(s, s->object_size, flags); 4013 4014 if (unlikely(object)) { 4015 p[i] = object; 4016 continue; 4017 } 4018 4019 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE, 4020 _RET_IP_, s->object_size); 4021 if (unlikely(!p[i])) 4022 goto error; 4023 4024 maybe_wipe_obj_freeptr(s, p[i]); 4025 } 4026 4027 return i; 4028 4029 error: 4030 slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size); 4031 kmem_cache_free_bulk(s, i, p); 4032 return 0; 4033 } 4034 #endif /* CONFIG_SLUB_TINY */ 4035 4036 /* Note that interrupts must be enabled when calling this function. */ 4037 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 4038 void **p) 4039 { 4040 int i; 4041 struct obj_cgroup *objcg = NULL; 4042 4043 if (!size) 4044 return 0; 4045 4046 /* memcg and kmem_cache debug support */ 4047 s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); 4048 if (unlikely(!s)) 4049 return 0; 4050 4051 i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg); 4052 4053 /* 4054 * memcg and kmem_cache debug support and memory initialization. 4055 * Done outside of the IRQ disabled fastpath loop. 4056 */ 4057 if (i != 0) 4058 slab_post_alloc_hook(s, objcg, flags, size, p, 4059 slab_want_init_on_alloc(flags, s), s->object_size); 4060 return i; 4061 } 4062 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 4063 4064 4065 /* 4066 * Object placement in a slab is made very easy because we always start at 4067 * offset 0. If we tune the size of the object to the alignment then we can 4068 * get the required alignment by putting one properly sized object after 4069 * another. 4070 * 4071 * Notice that the allocation order determines the sizes of the per cpu 4072 * caches. Each processor has always one slab available for allocations. 4073 * Increasing the allocation order reduces the number of times that slabs 4074 * must be moved on and off the partial lists and is therefore a factor in 4075 * locking overhead. 4076 */ 4077 4078 /* 4079 * Minimum / Maximum order of slab pages. This influences locking overhead 4080 * and slab fragmentation. A higher order reduces the number of partial slabs 4081 * and increases the number of allocations possible without having to 4082 * take the list_lock. 4083 */ 4084 static unsigned int slub_min_order; 4085 static unsigned int slub_max_order = 4086 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; 4087 static unsigned int slub_min_objects; 4088 4089 /* 4090 * Calculate the order of allocation given an slab object size. 4091 * 4092 * The order of allocation has significant impact on performance and other 4093 * system components. Generally order 0 allocations should be preferred since 4094 * order 0 does not cause fragmentation in the page allocator. Larger objects 4095 * be problematic to put into order 0 slabs because there may be too much 4096 * unused space left. We go to a higher order if more than 1/16th of the slab 4097 * would be wasted. 4098 * 4099 * In order to reach satisfactory performance we must ensure that a minimum 4100 * number of objects is in one slab. Otherwise we may generate too much 4101 * activity on the partial lists which requires taking the list_lock. This is 4102 * less a concern for large slabs though which are rarely used. 4103 * 4104 * slub_max_order specifies the order where we begin to stop considering the 4105 * number of objects in a slab as critical. If we reach slub_max_order then 4106 * we try to keep the page order as low as possible. So we accept more waste 4107 * of space in favor of a small page order. 4108 * 4109 * Higher order allocations also allow the placement of more objects in a 4110 * slab and thereby reduce object handling overhead. If the user has 4111 * requested a higher minimum order then we start with that one instead of 4112 * the smallest order which will fit the object. 4113 */ 4114 static inline unsigned int calc_slab_order(unsigned int size, 4115 unsigned int min_objects, unsigned int max_order, 4116 unsigned int fract_leftover) 4117 { 4118 unsigned int min_order = slub_min_order; 4119 unsigned int order; 4120 4121 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 4122 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 4123 4124 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); 4125 order <= max_order; order++) { 4126 4127 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 4128 unsigned int rem; 4129 4130 rem = slab_size % size; 4131 4132 if (rem <= slab_size / fract_leftover) 4133 break; 4134 } 4135 4136 return order; 4137 } 4138 4139 static inline int calculate_order(unsigned int size) 4140 { 4141 unsigned int order; 4142 unsigned int min_objects; 4143 unsigned int max_objects; 4144 unsigned int nr_cpus; 4145 4146 /* 4147 * Attempt to find best configuration for a slab. This 4148 * works by first attempting to generate a layout with 4149 * the best configuration and backing off gradually. 4150 * 4151 * First we increase the acceptable waste in a slab. Then 4152 * we reduce the minimum objects required in a slab. 4153 */ 4154 min_objects = slub_min_objects; 4155 if (!min_objects) { 4156 /* 4157 * Some architectures will only update present cpus when 4158 * onlining them, so don't trust the number if it's just 1. But 4159 * we also don't want to use nr_cpu_ids always, as on some other 4160 * architectures, there can be many possible cpus, but never 4161 * onlined. Here we compromise between trying to avoid too high 4162 * order on systems that appear larger than they are, and too 4163 * low order on systems that appear smaller than they are. 4164 */ 4165 nr_cpus = num_present_cpus(); 4166 if (nr_cpus <= 1) 4167 nr_cpus = nr_cpu_ids; 4168 min_objects = 4 * (fls(nr_cpus) + 1); 4169 } 4170 max_objects = order_objects(slub_max_order, size); 4171 min_objects = min(min_objects, max_objects); 4172 4173 while (min_objects > 1) { 4174 unsigned int fraction; 4175 4176 fraction = 16; 4177 while (fraction >= 4) { 4178 order = calc_slab_order(size, min_objects, 4179 slub_max_order, fraction); 4180 if (order <= slub_max_order) 4181 return order; 4182 fraction /= 2; 4183 } 4184 min_objects--; 4185 } 4186 4187 /* 4188 * We were unable to place multiple objects in a slab. Now 4189 * lets see if we can place a single object there. 4190 */ 4191 order = calc_slab_order(size, 1, slub_max_order, 1); 4192 if (order <= slub_max_order) 4193 return order; 4194 4195 /* 4196 * Doh this slab cannot be placed using slub_max_order. 4197 */ 4198 order = calc_slab_order(size, 1, MAX_ORDER, 1); 4199 if (order <= MAX_ORDER) 4200 return order; 4201 return -ENOSYS; 4202 } 4203 4204 static void 4205 init_kmem_cache_node(struct kmem_cache_node *n) 4206 { 4207 n->nr_partial = 0; 4208 spin_lock_init(&n->list_lock); 4209 INIT_LIST_HEAD(&n->partial); 4210 #ifdef CONFIG_SLUB_DEBUG 4211 atomic_long_set(&n->nr_slabs, 0); 4212 atomic_long_set(&n->total_objects, 0); 4213 INIT_LIST_HEAD(&n->full); 4214 #endif 4215 } 4216 4217 #ifndef CONFIG_SLUB_TINY 4218 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 4219 { 4220 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 4221 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * 4222 sizeof(struct kmem_cache_cpu)); 4223 4224 /* 4225 * Must align to double word boundary for the double cmpxchg 4226 * instructions to work; see __pcpu_double_call_return_bool(). 4227 */ 4228 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 4229 2 * sizeof(void *)); 4230 4231 if (!s->cpu_slab) 4232 return 0; 4233 4234 init_kmem_cache_cpus(s); 4235 4236 return 1; 4237 } 4238 #else 4239 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 4240 { 4241 return 1; 4242 } 4243 #endif /* CONFIG_SLUB_TINY */ 4244 4245 static struct kmem_cache *kmem_cache_node; 4246 4247 /* 4248 * No kmalloc_node yet so do it by hand. We know that this is the first 4249 * slab on the node for this slabcache. There are no concurrent accesses 4250 * possible. 4251 * 4252 * Note that this function only works on the kmem_cache_node 4253 * when allocating for the kmem_cache_node. This is used for bootstrapping 4254 * memory on a fresh node that has no slab structures yet. 4255 */ 4256 static void early_kmem_cache_node_alloc(int node) 4257 { 4258 struct slab *slab; 4259 struct kmem_cache_node *n; 4260 4261 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 4262 4263 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); 4264 4265 BUG_ON(!slab); 4266 inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects); 4267 if (slab_nid(slab) != node) { 4268 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 4269 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 4270 } 4271 4272 n = slab->freelist; 4273 BUG_ON(!n); 4274 #ifdef CONFIG_SLUB_DEBUG 4275 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 4276 init_tracking(kmem_cache_node, n); 4277 #endif 4278 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 4279 slab->freelist = get_freepointer(kmem_cache_node, n); 4280 slab->inuse = 1; 4281 kmem_cache_node->node[node] = n; 4282 init_kmem_cache_node(n); 4283 inc_slabs_node(kmem_cache_node, node, slab->objects); 4284 4285 /* 4286 * No locks need to be taken here as it has just been 4287 * initialized and there is no concurrent access. 4288 */ 4289 __add_partial(n, slab, DEACTIVATE_TO_HEAD); 4290 } 4291 4292 static void free_kmem_cache_nodes(struct kmem_cache *s) 4293 { 4294 int node; 4295 struct kmem_cache_node *n; 4296 4297 for_each_kmem_cache_node(s, node, n) { 4298 s->node[node] = NULL; 4299 kmem_cache_free(kmem_cache_node, n); 4300 } 4301 } 4302 4303 void __kmem_cache_release(struct kmem_cache *s) 4304 { 4305 cache_random_seq_destroy(s); 4306 #ifndef CONFIG_SLUB_TINY 4307 free_percpu(s->cpu_slab); 4308 #endif 4309 free_kmem_cache_nodes(s); 4310 } 4311 4312 static int init_kmem_cache_nodes(struct kmem_cache *s) 4313 { 4314 int node; 4315 4316 for_each_node_mask(node, slab_nodes) { 4317 struct kmem_cache_node *n; 4318 4319 if (slab_state == DOWN) { 4320 early_kmem_cache_node_alloc(node); 4321 continue; 4322 } 4323 n = kmem_cache_alloc_node(kmem_cache_node, 4324 GFP_KERNEL, node); 4325 4326 if (!n) { 4327 free_kmem_cache_nodes(s); 4328 return 0; 4329 } 4330 4331 init_kmem_cache_node(n); 4332 s->node[node] = n; 4333 } 4334 return 1; 4335 } 4336 4337 static void set_cpu_partial(struct kmem_cache *s) 4338 { 4339 #ifdef CONFIG_SLUB_CPU_PARTIAL 4340 unsigned int nr_objects; 4341 4342 /* 4343 * cpu_partial determined the maximum number of objects kept in the 4344 * per cpu partial lists of a processor. 4345 * 4346 * Per cpu partial lists mainly contain slabs that just have one 4347 * object freed. If they are used for allocation then they can be 4348 * filled up again with minimal effort. The slab will never hit the 4349 * per node partial lists and therefore no locking will be required. 4350 * 4351 * For backwards compatibility reasons, this is determined as number 4352 * of objects, even though we now limit maximum number of pages, see 4353 * slub_set_cpu_partial() 4354 */ 4355 if (!kmem_cache_has_cpu_partial(s)) 4356 nr_objects = 0; 4357 else if (s->size >= PAGE_SIZE) 4358 nr_objects = 6; 4359 else if (s->size >= 1024) 4360 nr_objects = 24; 4361 else if (s->size >= 256) 4362 nr_objects = 52; 4363 else 4364 nr_objects = 120; 4365 4366 slub_set_cpu_partial(s, nr_objects); 4367 #endif 4368 } 4369 4370 /* 4371 * calculate_sizes() determines the order and the distribution of data within 4372 * a slab object. 4373 */ 4374 static int calculate_sizes(struct kmem_cache *s) 4375 { 4376 slab_flags_t flags = s->flags; 4377 unsigned int size = s->object_size; 4378 unsigned int order; 4379 4380 /* 4381 * Round up object size to the next word boundary. We can only 4382 * place the free pointer at word boundaries and this determines 4383 * the possible location of the free pointer. 4384 */ 4385 size = ALIGN(size, sizeof(void *)); 4386 4387 #ifdef CONFIG_SLUB_DEBUG 4388 /* 4389 * Determine if we can poison the object itself. If the user of 4390 * the slab may touch the object after free or before allocation 4391 * then we should never poison the object itself. 4392 */ 4393 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 4394 !s->ctor) 4395 s->flags |= __OBJECT_POISON; 4396 else 4397 s->flags &= ~__OBJECT_POISON; 4398 4399 4400 /* 4401 * If we are Redzoning then check if there is some space between the 4402 * end of the object and the free pointer. If not then add an 4403 * additional word to have some bytes to store Redzone information. 4404 */ 4405 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 4406 size += sizeof(void *); 4407 #endif 4408 4409 /* 4410 * With that we have determined the number of bytes in actual use 4411 * by the object and redzoning. 4412 */ 4413 s->inuse = size; 4414 4415 if (slub_debug_orig_size(s) || 4416 (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 4417 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || 4418 s->ctor) { 4419 /* 4420 * Relocate free pointer after the object if it is not 4421 * permitted to overwrite the first word of the object on 4422 * kmem_cache_free. 4423 * 4424 * This is the case if we do RCU, have a constructor or 4425 * destructor, are poisoning the objects, or are 4426 * redzoning an object smaller than sizeof(void *). 4427 * 4428 * The assumption that s->offset >= s->inuse means free 4429 * pointer is outside of the object is used in the 4430 * freeptr_outside_object() function. If that is no 4431 * longer true, the function needs to be modified. 4432 */ 4433 s->offset = size; 4434 size += sizeof(void *); 4435 } else { 4436 /* 4437 * Store freelist pointer near middle of object to keep 4438 * it away from the edges of the object to avoid small 4439 * sized over/underflows from neighboring allocations. 4440 */ 4441 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 4442 } 4443 4444 #ifdef CONFIG_SLUB_DEBUG 4445 if (flags & SLAB_STORE_USER) { 4446 /* 4447 * Need to store information about allocs and frees after 4448 * the object. 4449 */ 4450 size += 2 * sizeof(struct track); 4451 4452 /* Save the original kmalloc request size */ 4453 if (flags & SLAB_KMALLOC) 4454 size += sizeof(unsigned int); 4455 } 4456 #endif 4457 4458 kasan_cache_create(s, &size, &s->flags); 4459 #ifdef CONFIG_SLUB_DEBUG 4460 if (flags & SLAB_RED_ZONE) { 4461 /* 4462 * Add some empty padding so that we can catch 4463 * overwrites from earlier objects rather than let 4464 * tracking information or the free pointer be 4465 * corrupted if a user writes before the start 4466 * of the object. 4467 */ 4468 size += sizeof(void *); 4469 4470 s->red_left_pad = sizeof(void *); 4471 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 4472 size += s->red_left_pad; 4473 } 4474 #endif 4475 4476 /* 4477 * SLUB stores one object immediately after another beginning from 4478 * offset 0. In order to align the objects we have to simply size 4479 * each object to conform to the alignment. 4480 */ 4481 size = ALIGN(size, s->align); 4482 s->size = size; 4483 s->reciprocal_size = reciprocal_value(size); 4484 order = calculate_order(size); 4485 4486 if ((int)order < 0) 4487 return 0; 4488 4489 s->allocflags = 0; 4490 if (order) 4491 s->allocflags |= __GFP_COMP; 4492 4493 if (s->flags & SLAB_CACHE_DMA) 4494 s->allocflags |= GFP_DMA; 4495 4496 if (s->flags & SLAB_CACHE_DMA32) 4497 s->allocflags |= GFP_DMA32; 4498 4499 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4500 s->allocflags |= __GFP_RECLAIMABLE; 4501 4502 /* 4503 * Determine the number of objects per slab 4504 */ 4505 s->oo = oo_make(order, size); 4506 s->min = oo_make(get_order(size), size); 4507 4508 return !!oo_objects(s->oo); 4509 } 4510 4511 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 4512 { 4513 s->flags = kmem_cache_flags(s->size, flags, s->name); 4514 #ifdef CONFIG_SLAB_FREELIST_HARDENED 4515 s->random = get_random_long(); 4516 #endif 4517 4518 if (!calculate_sizes(s)) 4519 goto error; 4520 if (disable_higher_order_debug) { 4521 /* 4522 * Disable debugging flags that store metadata if the min slab 4523 * order increased. 4524 */ 4525 if (get_order(s->size) > get_order(s->object_size)) { 4526 s->flags &= ~DEBUG_METADATA_FLAGS; 4527 s->offset = 0; 4528 if (!calculate_sizes(s)) 4529 goto error; 4530 } 4531 } 4532 4533 #ifdef system_has_freelist_aba 4534 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { 4535 /* Enable fast mode */ 4536 s->flags |= __CMPXCHG_DOUBLE; 4537 } 4538 #endif 4539 4540 /* 4541 * The larger the object size is, the more slabs we want on the partial 4542 * list to avoid pounding the page allocator excessively. 4543 */ 4544 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); 4545 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); 4546 4547 set_cpu_partial(s); 4548 4549 #ifdef CONFIG_NUMA 4550 s->remote_node_defrag_ratio = 1000; 4551 #endif 4552 4553 /* Initialize the pre-computed randomized freelist if slab is up */ 4554 if (slab_state >= UP) { 4555 if (init_cache_random_seq(s)) 4556 goto error; 4557 } 4558 4559 if (!init_kmem_cache_nodes(s)) 4560 goto error; 4561 4562 if (alloc_kmem_cache_cpus(s)) 4563 return 0; 4564 4565 error: 4566 __kmem_cache_release(s); 4567 return -EINVAL; 4568 } 4569 4570 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, 4571 const char *text) 4572 { 4573 #ifdef CONFIG_SLUB_DEBUG 4574 void *addr = slab_address(slab); 4575 void *p; 4576 4577 slab_err(s, slab, text, s->name); 4578 4579 spin_lock(&object_map_lock); 4580 __fill_map(object_map, s, slab); 4581 4582 for_each_object(p, s, addr, slab->objects) { 4583 4584 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { 4585 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 4586 print_tracking(s, p); 4587 } 4588 } 4589 spin_unlock(&object_map_lock); 4590 #endif 4591 } 4592 4593 /* 4594 * Attempt to free all partial slabs on a node. 4595 * This is called from __kmem_cache_shutdown(). We must take list_lock 4596 * because sysfs file might still access partial list after the shutdowning. 4597 */ 4598 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 4599 { 4600 LIST_HEAD(discard); 4601 struct slab *slab, *h; 4602 4603 BUG_ON(irqs_disabled()); 4604 spin_lock_irq(&n->list_lock); 4605 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { 4606 if (!slab->inuse) { 4607 remove_partial(n, slab); 4608 list_add(&slab->slab_list, &discard); 4609 } else { 4610 list_slab_objects(s, slab, 4611 "Objects remaining in %s on __kmem_cache_shutdown()"); 4612 } 4613 } 4614 spin_unlock_irq(&n->list_lock); 4615 4616 list_for_each_entry_safe(slab, h, &discard, slab_list) 4617 discard_slab(s, slab); 4618 } 4619 4620 bool __kmem_cache_empty(struct kmem_cache *s) 4621 { 4622 int node; 4623 struct kmem_cache_node *n; 4624 4625 for_each_kmem_cache_node(s, node, n) 4626 if (n->nr_partial || slabs_node(s, node)) 4627 return false; 4628 return true; 4629 } 4630 4631 /* 4632 * Release all resources used by a slab cache. 4633 */ 4634 int __kmem_cache_shutdown(struct kmem_cache *s) 4635 { 4636 int node; 4637 struct kmem_cache_node *n; 4638 4639 flush_all_cpus_locked(s); 4640 /* Attempt to free all objects */ 4641 for_each_kmem_cache_node(s, node, n) { 4642 free_partial(s, n); 4643 if (n->nr_partial || slabs_node(s, node)) 4644 return 1; 4645 } 4646 return 0; 4647 } 4648 4649 #ifdef CONFIG_PRINTK 4650 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 4651 { 4652 void *base; 4653 int __maybe_unused i; 4654 unsigned int objnr; 4655 void *objp; 4656 void *objp0; 4657 struct kmem_cache *s = slab->slab_cache; 4658 struct track __maybe_unused *trackp; 4659 4660 kpp->kp_ptr = object; 4661 kpp->kp_slab = slab; 4662 kpp->kp_slab_cache = s; 4663 base = slab_address(slab); 4664 objp0 = kasan_reset_tag(object); 4665 #ifdef CONFIG_SLUB_DEBUG 4666 objp = restore_red_left(s, objp0); 4667 #else 4668 objp = objp0; 4669 #endif 4670 objnr = obj_to_index(s, slab, objp); 4671 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 4672 objp = base + s->size * objnr; 4673 kpp->kp_objp = objp; 4674 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size 4675 || (objp - base) % s->size) || 4676 !(s->flags & SLAB_STORE_USER)) 4677 return; 4678 #ifdef CONFIG_SLUB_DEBUG 4679 objp = fixup_red_left(s, objp); 4680 trackp = get_track(s, objp, TRACK_ALLOC); 4681 kpp->kp_ret = (void *)trackp->addr; 4682 #ifdef CONFIG_STACKDEPOT 4683 { 4684 depot_stack_handle_t handle; 4685 unsigned long *entries; 4686 unsigned int nr_entries; 4687 4688 handle = READ_ONCE(trackp->handle); 4689 if (handle) { 4690 nr_entries = stack_depot_fetch(handle, &entries); 4691 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 4692 kpp->kp_stack[i] = (void *)entries[i]; 4693 } 4694 4695 trackp = get_track(s, objp, TRACK_FREE); 4696 handle = READ_ONCE(trackp->handle); 4697 if (handle) { 4698 nr_entries = stack_depot_fetch(handle, &entries); 4699 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 4700 kpp->kp_free_stack[i] = (void *)entries[i]; 4701 } 4702 } 4703 #endif 4704 #endif 4705 } 4706 #endif 4707 4708 /******************************************************************** 4709 * Kmalloc subsystem 4710 *******************************************************************/ 4711 4712 static int __init setup_slub_min_order(char *str) 4713 { 4714 get_option(&str, (int *)&slub_min_order); 4715 4716 return 1; 4717 } 4718 4719 __setup("slub_min_order=", setup_slub_min_order); 4720 4721 static int __init setup_slub_max_order(char *str) 4722 { 4723 get_option(&str, (int *)&slub_max_order); 4724 slub_max_order = min_t(unsigned int, slub_max_order, MAX_ORDER); 4725 4726 return 1; 4727 } 4728 4729 __setup("slub_max_order=", setup_slub_max_order); 4730 4731 static int __init setup_slub_min_objects(char *str) 4732 { 4733 get_option(&str, (int *)&slub_min_objects); 4734 4735 return 1; 4736 } 4737 4738 __setup("slub_min_objects=", setup_slub_min_objects); 4739 4740 #ifdef CONFIG_HARDENED_USERCOPY 4741 /* 4742 * Rejects incorrectly sized objects and objects that are to be copied 4743 * to/from userspace but do not fall entirely within the containing slab 4744 * cache's usercopy region. 4745 * 4746 * Returns NULL if check passes, otherwise const char * to name of cache 4747 * to indicate an error. 4748 */ 4749 void __check_heap_object(const void *ptr, unsigned long n, 4750 const struct slab *slab, bool to_user) 4751 { 4752 struct kmem_cache *s; 4753 unsigned int offset; 4754 bool is_kfence = is_kfence_address(ptr); 4755 4756 ptr = kasan_reset_tag(ptr); 4757 4758 /* Find object and usable object size. */ 4759 s = slab->slab_cache; 4760 4761 /* Reject impossible pointers. */ 4762 if (ptr < slab_address(slab)) 4763 usercopy_abort("SLUB object not in SLUB page?!", NULL, 4764 to_user, 0, n); 4765 4766 /* Find offset within object. */ 4767 if (is_kfence) 4768 offset = ptr - kfence_object_start(ptr); 4769 else 4770 offset = (ptr - slab_address(slab)) % s->size; 4771 4772 /* Adjust for redzone and reject if within the redzone. */ 4773 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 4774 if (offset < s->red_left_pad) 4775 usercopy_abort("SLUB object in left red zone", 4776 s->name, to_user, offset, n); 4777 offset -= s->red_left_pad; 4778 } 4779 4780 /* Allow address range falling entirely within usercopy region. */ 4781 if (offset >= s->useroffset && 4782 offset - s->useroffset <= s->usersize && 4783 n <= s->useroffset - offset + s->usersize) 4784 return; 4785 4786 usercopy_abort("SLUB object", s->name, to_user, offset, n); 4787 } 4788 #endif /* CONFIG_HARDENED_USERCOPY */ 4789 4790 #define SHRINK_PROMOTE_MAX 32 4791 4792 /* 4793 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 4794 * up most to the head of the partial lists. New allocations will then 4795 * fill those up and thus they can be removed from the partial lists. 4796 * 4797 * The slabs with the least items are placed last. This results in them 4798 * being allocated from last increasing the chance that the last objects 4799 * are freed in them. 4800 */ 4801 static int __kmem_cache_do_shrink(struct kmem_cache *s) 4802 { 4803 int node; 4804 int i; 4805 struct kmem_cache_node *n; 4806 struct slab *slab; 4807 struct slab *t; 4808 struct list_head discard; 4809 struct list_head promote[SHRINK_PROMOTE_MAX]; 4810 unsigned long flags; 4811 int ret = 0; 4812 4813 for_each_kmem_cache_node(s, node, n) { 4814 INIT_LIST_HEAD(&discard); 4815 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 4816 INIT_LIST_HEAD(promote + i); 4817 4818 spin_lock_irqsave(&n->list_lock, flags); 4819 4820 /* 4821 * Build lists of slabs to discard or promote. 4822 * 4823 * Note that concurrent frees may occur while we hold the 4824 * list_lock. slab->inuse here is the upper limit. 4825 */ 4826 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { 4827 int free = slab->objects - slab->inuse; 4828 4829 /* Do not reread slab->inuse */ 4830 barrier(); 4831 4832 /* We do not keep full slabs on the list */ 4833 BUG_ON(free <= 0); 4834 4835 if (free == slab->objects) { 4836 list_move(&slab->slab_list, &discard); 4837 n->nr_partial--; 4838 dec_slabs_node(s, node, slab->objects); 4839 } else if (free <= SHRINK_PROMOTE_MAX) 4840 list_move(&slab->slab_list, promote + free - 1); 4841 } 4842 4843 /* 4844 * Promote the slabs filled up most to the head of the 4845 * partial list. 4846 */ 4847 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 4848 list_splice(promote + i, &n->partial); 4849 4850 spin_unlock_irqrestore(&n->list_lock, flags); 4851 4852 /* Release empty slabs */ 4853 list_for_each_entry_safe(slab, t, &discard, slab_list) 4854 free_slab(s, slab); 4855 4856 if (slabs_node(s, node)) 4857 ret = 1; 4858 } 4859 4860 return ret; 4861 } 4862 4863 int __kmem_cache_shrink(struct kmem_cache *s) 4864 { 4865 flush_all(s); 4866 return __kmem_cache_do_shrink(s); 4867 } 4868 4869 static int slab_mem_going_offline_callback(void *arg) 4870 { 4871 struct kmem_cache *s; 4872 4873 mutex_lock(&slab_mutex); 4874 list_for_each_entry(s, &slab_caches, list) { 4875 flush_all_cpus_locked(s); 4876 __kmem_cache_do_shrink(s); 4877 } 4878 mutex_unlock(&slab_mutex); 4879 4880 return 0; 4881 } 4882 4883 static void slab_mem_offline_callback(void *arg) 4884 { 4885 struct memory_notify *marg = arg; 4886 int offline_node; 4887 4888 offline_node = marg->status_change_nid_normal; 4889 4890 /* 4891 * If the node still has available memory. we need kmem_cache_node 4892 * for it yet. 4893 */ 4894 if (offline_node < 0) 4895 return; 4896 4897 mutex_lock(&slab_mutex); 4898 node_clear(offline_node, slab_nodes); 4899 /* 4900 * We no longer free kmem_cache_node structures here, as it would be 4901 * racy with all get_node() users, and infeasible to protect them with 4902 * slab_mutex. 4903 */ 4904 mutex_unlock(&slab_mutex); 4905 } 4906 4907 static int slab_mem_going_online_callback(void *arg) 4908 { 4909 struct kmem_cache_node *n; 4910 struct kmem_cache *s; 4911 struct memory_notify *marg = arg; 4912 int nid = marg->status_change_nid_normal; 4913 int ret = 0; 4914 4915 /* 4916 * If the node's memory is already available, then kmem_cache_node is 4917 * already created. Nothing to do. 4918 */ 4919 if (nid < 0) 4920 return 0; 4921 4922 /* 4923 * We are bringing a node online. No memory is available yet. We must 4924 * allocate a kmem_cache_node structure in order to bring the node 4925 * online. 4926 */ 4927 mutex_lock(&slab_mutex); 4928 list_for_each_entry(s, &slab_caches, list) { 4929 /* 4930 * The structure may already exist if the node was previously 4931 * onlined and offlined. 4932 */ 4933 if (get_node(s, nid)) 4934 continue; 4935 /* 4936 * XXX: kmem_cache_alloc_node will fallback to other nodes 4937 * since memory is not yet available from the node that 4938 * is brought up. 4939 */ 4940 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 4941 if (!n) { 4942 ret = -ENOMEM; 4943 goto out; 4944 } 4945 init_kmem_cache_node(n); 4946 s->node[nid] = n; 4947 } 4948 /* 4949 * Any cache created after this point will also have kmem_cache_node 4950 * initialized for the new node. 4951 */ 4952 node_set(nid, slab_nodes); 4953 out: 4954 mutex_unlock(&slab_mutex); 4955 return ret; 4956 } 4957 4958 static int slab_memory_callback(struct notifier_block *self, 4959 unsigned long action, void *arg) 4960 { 4961 int ret = 0; 4962 4963 switch (action) { 4964 case MEM_GOING_ONLINE: 4965 ret = slab_mem_going_online_callback(arg); 4966 break; 4967 case MEM_GOING_OFFLINE: 4968 ret = slab_mem_going_offline_callback(arg); 4969 break; 4970 case MEM_OFFLINE: 4971 case MEM_CANCEL_ONLINE: 4972 slab_mem_offline_callback(arg); 4973 break; 4974 case MEM_ONLINE: 4975 case MEM_CANCEL_OFFLINE: 4976 break; 4977 } 4978 if (ret) 4979 ret = notifier_from_errno(ret); 4980 else 4981 ret = NOTIFY_OK; 4982 return ret; 4983 } 4984 4985 /******************************************************************** 4986 * Basic setup of slabs 4987 *******************************************************************/ 4988 4989 /* 4990 * Used for early kmem_cache structures that were allocated using 4991 * the page allocator. Allocate them properly then fix up the pointers 4992 * that may be pointing to the wrong kmem_cache structure. 4993 */ 4994 4995 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 4996 { 4997 int node; 4998 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 4999 struct kmem_cache_node *n; 5000 5001 memcpy(s, static_cache, kmem_cache->object_size); 5002 5003 /* 5004 * This runs very early, and only the boot processor is supposed to be 5005 * up. Even if it weren't true, IRQs are not up so we couldn't fire 5006 * IPIs around. 5007 */ 5008 __flush_cpu_slab(s, smp_processor_id()); 5009 for_each_kmem_cache_node(s, node, n) { 5010 struct slab *p; 5011 5012 list_for_each_entry(p, &n->partial, slab_list) 5013 p->slab_cache = s; 5014 5015 #ifdef CONFIG_SLUB_DEBUG 5016 list_for_each_entry(p, &n->full, slab_list) 5017 p->slab_cache = s; 5018 #endif 5019 } 5020 list_add(&s->list, &slab_caches); 5021 return s; 5022 } 5023 5024 void __init kmem_cache_init(void) 5025 { 5026 static __initdata struct kmem_cache boot_kmem_cache, 5027 boot_kmem_cache_node; 5028 int node; 5029 5030 if (debug_guardpage_minorder()) 5031 slub_max_order = 0; 5032 5033 /* Print slub debugging pointers without hashing */ 5034 if (__slub_debug_enabled()) 5035 no_hash_pointers_enable(NULL); 5036 5037 kmem_cache_node = &boot_kmem_cache_node; 5038 kmem_cache = &boot_kmem_cache; 5039 5040 /* 5041 * Initialize the nodemask for which we will allocate per node 5042 * structures. Here we don't need taking slab_mutex yet. 5043 */ 5044 for_each_node_state(node, N_NORMAL_MEMORY) 5045 node_set(node, slab_nodes); 5046 5047 create_boot_cache(kmem_cache_node, "kmem_cache_node", 5048 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); 5049 5050 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 5051 5052 /* Able to allocate the per node structures */ 5053 slab_state = PARTIAL; 5054 5055 create_boot_cache(kmem_cache, "kmem_cache", 5056 offsetof(struct kmem_cache, node) + 5057 nr_node_ids * sizeof(struct kmem_cache_node *), 5058 SLAB_HWCACHE_ALIGN, 0, 0); 5059 5060 kmem_cache = bootstrap(&boot_kmem_cache); 5061 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 5062 5063 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 5064 setup_kmalloc_cache_index_table(); 5065 create_kmalloc_caches(0); 5066 5067 /* Setup random freelists for each cache */ 5068 init_freelist_randomization(); 5069 5070 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 5071 slub_cpu_dead); 5072 5073 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 5074 cache_line_size(), 5075 slub_min_order, slub_max_order, slub_min_objects, 5076 nr_cpu_ids, nr_node_ids); 5077 } 5078 5079 void __init kmem_cache_init_late(void) 5080 { 5081 #ifndef CONFIG_SLUB_TINY 5082 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); 5083 WARN_ON(!flushwq); 5084 #endif 5085 } 5086 5087 struct kmem_cache * 5088 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 5089 slab_flags_t flags, void (*ctor)(void *)) 5090 { 5091 struct kmem_cache *s; 5092 5093 s = find_mergeable(size, align, flags, name, ctor); 5094 if (s) { 5095 if (sysfs_slab_alias(s, name)) 5096 return NULL; 5097 5098 s->refcount++; 5099 5100 /* 5101 * Adjust the object sizes so that we clear 5102 * the complete object on kzalloc. 5103 */ 5104 s->object_size = max(s->object_size, size); 5105 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 5106 } 5107 5108 return s; 5109 } 5110 5111 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 5112 { 5113 int err; 5114 5115 err = kmem_cache_open(s, flags); 5116 if (err) 5117 return err; 5118 5119 /* Mutex is not taken during early boot */ 5120 if (slab_state <= UP) 5121 return 0; 5122 5123 err = sysfs_slab_add(s); 5124 if (err) { 5125 __kmem_cache_release(s); 5126 return err; 5127 } 5128 5129 if (s->flags & SLAB_STORE_USER) 5130 debugfs_slab_add(s); 5131 5132 return 0; 5133 } 5134 5135 #ifdef SLAB_SUPPORTS_SYSFS 5136 static int count_inuse(struct slab *slab) 5137 { 5138 return slab->inuse; 5139 } 5140 5141 static int count_total(struct slab *slab) 5142 { 5143 return slab->objects; 5144 } 5145 #endif 5146 5147 #ifdef CONFIG_SLUB_DEBUG 5148 static void validate_slab(struct kmem_cache *s, struct slab *slab, 5149 unsigned long *obj_map) 5150 { 5151 void *p; 5152 void *addr = slab_address(slab); 5153 5154 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) 5155 return; 5156 5157 /* Now we know that a valid freelist exists */ 5158 __fill_map(obj_map, s, slab); 5159 for_each_object(p, s, addr, slab->objects) { 5160 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 5161 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 5162 5163 if (!check_object(s, slab, p, val)) 5164 break; 5165 } 5166 } 5167 5168 static int validate_slab_node(struct kmem_cache *s, 5169 struct kmem_cache_node *n, unsigned long *obj_map) 5170 { 5171 unsigned long count = 0; 5172 struct slab *slab; 5173 unsigned long flags; 5174 5175 spin_lock_irqsave(&n->list_lock, flags); 5176 5177 list_for_each_entry(slab, &n->partial, slab_list) { 5178 validate_slab(s, slab, obj_map); 5179 count++; 5180 } 5181 if (count != n->nr_partial) { 5182 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 5183 s->name, count, n->nr_partial); 5184 slab_add_kunit_errors(); 5185 } 5186 5187 if (!(s->flags & SLAB_STORE_USER)) 5188 goto out; 5189 5190 list_for_each_entry(slab, &n->full, slab_list) { 5191 validate_slab(s, slab, obj_map); 5192 count++; 5193 } 5194 if (count != atomic_long_read(&n->nr_slabs)) { 5195 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 5196 s->name, count, atomic_long_read(&n->nr_slabs)); 5197 slab_add_kunit_errors(); 5198 } 5199 5200 out: 5201 spin_unlock_irqrestore(&n->list_lock, flags); 5202 return count; 5203 } 5204 5205 long validate_slab_cache(struct kmem_cache *s) 5206 { 5207 int node; 5208 unsigned long count = 0; 5209 struct kmem_cache_node *n; 5210 unsigned long *obj_map; 5211 5212 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 5213 if (!obj_map) 5214 return -ENOMEM; 5215 5216 flush_all(s); 5217 for_each_kmem_cache_node(s, node, n) 5218 count += validate_slab_node(s, n, obj_map); 5219 5220 bitmap_free(obj_map); 5221 5222 return count; 5223 } 5224 EXPORT_SYMBOL(validate_slab_cache); 5225 5226 #ifdef CONFIG_DEBUG_FS 5227 /* 5228 * Generate lists of code addresses where slabcache objects are allocated 5229 * and freed. 5230 */ 5231 5232 struct location { 5233 depot_stack_handle_t handle; 5234 unsigned long count; 5235 unsigned long addr; 5236 unsigned long waste; 5237 long long sum_time; 5238 long min_time; 5239 long max_time; 5240 long min_pid; 5241 long max_pid; 5242 DECLARE_BITMAP(cpus, NR_CPUS); 5243 nodemask_t nodes; 5244 }; 5245 5246 struct loc_track { 5247 unsigned long max; 5248 unsigned long count; 5249 struct location *loc; 5250 loff_t idx; 5251 }; 5252 5253 static struct dentry *slab_debugfs_root; 5254 5255 static void free_loc_track(struct loc_track *t) 5256 { 5257 if (t->max) 5258 free_pages((unsigned long)t->loc, 5259 get_order(sizeof(struct location) * t->max)); 5260 } 5261 5262 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 5263 { 5264 struct location *l; 5265 int order; 5266 5267 order = get_order(sizeof(struct location) * max); 5268 5269 l = (void *)__get_free_pages(flags, order); 5270 if (!l) 5271 return 0; 5272 5273 if (t->count) { 5274 memcpy(l, t->loc, sizeof(struct location) * t->count); 5275 free_loc_track(t); 5276 } 5277 t->max = max; 5278 t->loc = l; 5279 return 1; 5280 } 5281 5282 static int add_location(struct loc_track *t, struct kmem_cache *s, 5283 const struct track *track, 5284 unsigned int orig_size) 5285 { 5286 long start, end, pos; 5287 struct location *l; 5288 unsigned long caddr, chandle, cwaste; 5289 unsigned long age = jiffies - track->when; 5290 depot_stack_handle_t handle = 0; 5291 unsigned int waste = s->object_size - orig_size; 5292 5293 #ifdef CONFIG_STACKDEPOT 5294 handle = READ_ONCE(track->handle); 5295 #endif 5296 start = -1; 5297 end = t->count; 5298 5299 for ( ; ; ) { 5300 pos = start + (end - start + 1) / 2; 5301 5302 /* 5303 * There is nothing at "end". If we end up there 5304 * we need to add something to before end. 5305 */ 5306 if (pos == end) 5307 break; 5308 5309 l = &t->loc[pos]; 5310 caddr = l->addr; 5311 chandle = l->handle; 5312 cwaste = l->waste; 5313 if ((track->addr == caddr) && (handle == chandle) && 5314 (waste == cwaste)) { 5315 5316 l->count++; 5317 if (track->when) { 5318 l->sum_time += age; 5319 if (age < l->min_time) 5320 l->min_time = age; 5321 if (age > l->max_time) 5322 l->max_time = age; 5323 5324 if (track->pid < l->min_pid) 5325 l->min_pid = track->pid; 5326 if (track->pid > l->max_pid) 5327 l->max_pid = track->pid; 5328 5329 cpumask_set_cpu(track->cpu, 5330 to_cpumask(l->cpus)); 5331 } 5332 node_set(page_to_nid(virt_to_page(track)), l->nodes); 5333 return 1; 5334 } 5335 5336 if (track->addr < caddr) 5337 end = pos; 5338 else if (track->addr == caddr && handle < chandle) 5339 end = pos; 5340 else if (track->addr == caddr && handle == chandle && 5341 waste < cwaste) 5342 end = pos; 5343 else 5344 start = pos; 5345 } 5346 5347 /* 5348 * Not found. Insert new tracking element. 5349 */ 5350 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 5351 return 0; 5352 5353 l = t->loc + pos; 5354 if (pos < t->count) 5355 memmove(l + 1, l, 5356 (t->count - pos) * sizeof(struct location)); 5357 t->count++; 5358 l->count = 1; 5359 l->addr = track->addr; 5360 l->sum_time = age; 5361 l->min_time = age; 5362 l->max_time = age; 5363 l->min_pid = track->pid; 5364 l->max_pid = track->pid; 5365 l->handle = handle; 5366 l->waste = waste; 5367 cpumask_clear(to_cpumask(l->cpus)); 5368 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 5369 nodes_clear(l->nodes); 5370 node_set(page_to_nid(virt_to_page(track)), l->nodes); 5371 return 1; 5372 } 5373 5374 static void process_slab(struct loc_track *t, struct kmem_cache *s, 5375 struct slab *slab, enum track_item alloc, 5376 unsigned long *obj_map) 5377 { 5378 void *addr = slab_address(slab); 5379 bool is_alloc = (alloc == TRACK_ALLOC); 5380 void *p; 5381 5382 __fill_map(obj_map, s, slab); 5383 5384 for_each_object(p, s, addr, slab->objects) 5385 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 5386 add_location(t, s, get_track(s, p, alloc), 5387 is_alloc ? get_orig_size(s, p) : 5388 s->object_size); 5389 } 5390 #endif /* CONFIG_DEBUG_FS */ 5391 #endif /* CONFIG_SLUB_DEBUG */ 5392 5393 #ifdef SLAB_SUPPORTS_SYSFS 5394 enum slab_stat_type { 5395 SL_ALL, /* All slabs */ 5396 SL_PARTIAL, /* Only partially allocated slabs */ 5397 SL_CPU, /* Only slabs used for cpu caches */ 5398 SL_OBJECTS, /* Determine allocated objects not slabs */ 5399 SL_TOTAL /* Determine object capacity not slabs */ 5400 }; 5401 5402 #define SO_ALL (1 << SL_ALL) 5403 #define SO_PARTIAL (1 << SL_PARTIAL) 5404 #define SO_CPU (1 << SL_CPU) 5405 #define SO_OBJECTS (1 << SL_OBJECTS) 5406 #define SO_TOTAL (1 << SL_TOTAL) 5407 5408 static ssize_t show_slab_objects(struct kmem_cache *s, 5409 char *buf, unsigned long flags) 5410 { 5411 unsigned long total = 0; 5412 int node; 5413 int x; 5414 unsigned long *nodes; 5415 int len = 0; 5416 5417 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 5418 if (!nodes) 5419 return -ENOMEM; 5420 5421 if (flags & SO_CPU) { 5422 int cpu; 5423 5424 for_each_possible_cpu(cpu) { 5425 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 5426 cpu); 5427 int node; 5428 struct slab *slab; 5429 5430 slab = READ_ONCE(c->slab); 5431 if (!slab) 5432 continue; 5433 5434 node = slab_nid(slab); 5435 if (flags & SO_TOTAL) 5436 x = slab->objects; 5437 else if (flags & SO_OBJECTS) 5438 x = slab->inuse; 5439 else 5440 x = 1; 5441 5442 total += x; 5443 nodes[node] += x; 5444 5445 #ifdef CONFIG_SLUB_CPU_PARTIAL 5446 slab = slub_percpu_partial_read_once(c); 5447 if (slab) { 5448 node = slab_nid(slab); 5449 if (flags & SO_TOTAL) 5450 WARN_ON_ONCE(1); 5451 else if (flags & SO_OBJECTS) 5452 WARN_ON_ONCE(1); 5453 else 5454 x = slab->slabs; 5455 total += x; 5456 nodes[node] += x; 5457 } 5458 #endif 5459 } 5460 } 5461 5462 /* 5463 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 5464 * already held which will conflict with an existing lock order: 5465 * 5466 * mem_hotplug_lock->slab_mutex->kernfs_mutex 5467 * 5468 * We don't really need mem_hotplug_lock (to hold off 5469 * slab_mem_going_offline_callback) here because slab's memory hot 5470 * unplug code doesn't destroy the kmem_cache->node[] data. 5471 */ 5472 5473 #ifdef CONFIG_SLUB_DEBUG 5474 if (flags & SO_ALL) { 5475 struct kmem_cache_node *n; 5476 5477 for_each_kmem_cache_node(s, node, n) { 5478 5479 if (flags & SO_TOTAL) 5480 x = atomic_long_read(&n->total_objects); 5481 else if (flags & SO_OBJECTS) 5482 x = atomic_long_read(&n->total_objects) - 5483 count_partial(n, count_free); 5484 else 5485 x = atomic_long_read(&n->nr_slabs); 5486 total += x; 5487 nodes[node] += x; 5488 } 5489 5490 } else 5491 #endif 5492 if (flags & SO_PARTIAL) { 5493 struct kmem_cache_node *n; 5494 5495 for_each_kmem_cache_node(s, node, n) { 5496 if (flags & SO_TOTAL) 5497 x = count_partial(n, count_total); 5498 else if (flags & SO_OBJECTS) 5499 x = count_partial(n, count_inuse); 5500 else 5501 x = n->nr_partial; 5502 total += x; 5503 nodes[node] += x; 5504 } 5505 } 5506 5507 len += sysfs_emit_at(buf, len, "%lu", total); 5508 #ifdef CONFIG_NUMA 5509 for (node = 0; node < nr_node_ids; node++) { 5510 if (nodes[node]) 5511 len += sysfs_emit_at(buf, len, " N%d=%lu", 5512 node, nodes[node]); 5513 } 5514 #endif 5515 len += sysfs_emit_at(buf, len, "\n"); 5516 kfree(nodes); 5517 5518 return len; 5519 } 5520 5521 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 5522 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 5523 5524 struct slab_attribute { 5525 struct attribute attr; 5526 ssize_t (*show)(struct kmem_cache *s, char *buf); 5527 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 5528 }; 5529 5530 #define SLAB_ATTR_RO(_name) \ 5531 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) 5532 5533 #define SLAB_ATTR(_name) \ 5534 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) 5535 5536 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 5537 { 5538 return sysfs_emit(buf, "%u\n", s->size); 5539 } 5540 SLAB_ATTR_RO(slab_size); 5541 5542 static ssize_t align_show(struct kmem_cache *s, char *buf) 5543 { 5544 return sysfs_emit(buf, "%u\n", s->align); 5545 } 5546 SLAB_ATTR_RO(align); 5547 5548 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 5549 { 5550 return sysfs_emit(buf, "%u\n", s->object_size); 5551 } 5552 SLAB_ATTR_RO(object_size); 5553 5554 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 5555 { 5556 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 5557 } 5558 SLAB_ATTR_RO(objs_per_slab); 5559 5560 static ssize_t order_show(struct kmem_cache *s, char *buf) 5561 { 5562 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 5563 } 5564 SLAB_ATTR_RO(order); 5565 5566 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 5567 { 5568 return sysfs_emit(buf, "%lu\n", s->min_partial); 5569 } 5570 5571 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 5572 size_t length) 5573 { 5574 unsigned long min; 5575 int err; 5576 5577 err = kstrtoul(buf, 10, &min); 5578 if (err) 5579 return err; 5580 5581 s->min_partial = min; 5582 return length; 5583 } 5584 SLAB_ATTR(min_partial); 5585 5586 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 5587 { 5588 unsigned int nr_partial = 0; 5589 #ifdef CONFIG_SLUB_CPU_PARTIAL 5590 nr_partial = s->cpu_partial; 5591 #endif 5592 5593 return sysfs_emit(buf, "%u\n", nr_partial); 5594 } 5595 5596 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 5597 size_t length) 5598 { 5599 unsigned int objects; 5600 int err; 5601 5602 err = kstrtouint(buf, 10, &objects); 5603 if (err) 5604 return err; 5605 if (objects && !kmem_cache_has_cpu_partial(s)) 5606 return -EINVAL; 5607 5608 slub_set_cpu_partial(s, objects); 5609 flush_all(s); 5610 return length; 5611 } 5612 SLAB_ATTR(cpu_partial); 5613 5614 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 5615 { 5616 if (!s->ctor) 5617 return 0; 5618 return sysfs_emit(buf, "%pS\n", s->ctor); 5619 } 5620 SLAB_ATTR_RO(ctor); 5621 5622 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 5623 { 5624 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 5625 } 5626 SLAB_ATTR_RO(aliases); 5627 5628 static ssize_t partial_show(struct kmem_cache *s, char *buf) 5629 { 5630 return show_slab_objects(s, buf, SO_PARTIAL); 5631 } 5632 SLAB_ATTR_RO(partial); 5633 5634 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 5635 { 5636 return show_slab_objects(s, buf, SO_CPU); 5637 } 5638 SLAB_ATTR_RO(cpu_slabs); 5639 5640 static ssize_t objects_show(struct kmem_cache *s, char *buf) 5641 { 5642 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 5643 } 5644 SLAB_ATTR_RO(objects); 5645 5646 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 5647 { 5648 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 5649 } 5650 SLAB_ATTR_RO(objects_partial); 5651 5652 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 5653 { 5654 int objects = 0; 5655 int slabs = 0; 5656 int cpu __maybe_unused; 5657 int len = 0; 5658 5659 #ifdef CONFIG_SLUB_CPU_PARTIAL 5660 for_each_online_cpu(cpu) { 5661 struct slab *slab; 5662 5663 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5664 5665 if (slab) 5666 slabs += slab->slabs; 5667 } 5668 #endif 5669 5670 /* Approximate half-full slabs, see slub_set_cpu_partial() */ 5671 objects = (slabs * oo_objects(s->oo)) / 2; 5672 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); 5673 5674 #if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP) 5675 for_each_online_cpu(cpu) { 5676 struct slab *slab; 5677 5678 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5679 if (slab) { 5680 slabs = READ_ONCE(slab->slabs); 5681 objects = (slabs * oo_objects(s->oo)) / 2; 5682 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 5683 cpu, objects, slabs); 5684 } 5685 } 5686 #endif 5687 len += sysfs_emit_at(buf, len, "\n"); 5688 5689 return len; 5690 } 5691 SLAB_ATTR_RO(slabs_cpu_partial); 5692 5693 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 5694 { 5695 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 5696 } 5697 SLAB_ATTR_RO(reclaim_account); 5698 5699 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 5700 { 5701 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 5702 } 5703 SLAB_ATTR_RO(hwcache_align); 5704 5705 #ifdef CONFIG_ZONE_DMA 5706 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 5707 { 5708 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 5709 } 5710 SLAB_ATTR_RO(cache_dma); 5711 #endif 5712 5713 #ifdef CONFIG_HARDENED_USERCOPY 5714 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 5715 { 5716 return sysfs_emit(buf, "%u\n", s->usersize); 5717 } 5718 SLAB_ATTR_RO(usersize); 5719 #endif 5720 5721 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 5722 { 5723 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 5724 } 5725 SLAB_ATTR_RO(destroy_by_rcu); 5726 5727 #ifdef CONFIG_SLUB_DEBUG 5728 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 5729 { 5730 return show_slab_objects(s, buf, SO_ALL); 5731 } 5732 SLAB_ATTR_RO(slabs); 5733 5734 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 5735 { 5736 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 5737 } 5738 SLAB_ATTR_RO(total_objects); 5739 5740 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 5741 { 5742 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 5743 } 5744 SLAB_ATTR_RO(sanity_checks); 5745 5746 static ssize_t trace_show(struct kmem_cache *s, char *buf) 5747 { 5748 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 5749 } 5750 SLAB_ATTR_RO(trace); 5751 5752 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 5753 { 5754 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 5755 } 5756 5757 SLAB_ATTR_RO(red_zone); 5758 5759 static ssize_t poison_show(struct kmem_cache *s, char *buf) 5760 { 5761 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 5762 } 5763 5764 SLAB_ATTR_RO(poison); 5765 5766 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 5767 { 5768 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 5769 } 5770 5771 SLAB_ATTR_RO(store_user); 5772 5773 static ssize_t validate_show(struct kmem_cache *s, char *buf) 5774 { 5775 return 0; 5776 } 5777 5778 static ssize_t validate_store(struct kmem_cache *s, 5779 const char *buf, size_t length) 5780 { 5781 int ret = -EINVAL; 5782 5783 if (buf[0] == '1' && kmem_cache_debug(s)) { 5784 ret = validate_slab_cache(s); 5785 if (ret >= 0) 5786 ret = length; 5787 } 5788 return ret; 5789 } 5790 SLAB_ATTR(validate); 5791 5792 #endif /* CONFIG_SLUB_DEBUG */ 5793 5794 #ifdef CONFIG_FAILSLAB 5795 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 5796 { 5797 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 5798 } 5799 5800 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 5801 size_t length) 5802 { 5803 if (s->refcount > 1) 5804 return -EINVAL; 5805 5806 if (buf[0] == '1') 5807 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); 5808 else 5809 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); 5810 5811 return length; 5812 } 5813 SLAB_ATTR(failslab); 5814 #endif 5815 5816 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 5817 { 5818 return 0; 5819 } 5820 5821 static ssize_t shrink_store(struct kmem_cache *s, 5822 const char *buf, size_t length) 5823 { 5824 if (buf[0] == '1') 5825 kmem_cache_shrink(s); 5826 else 5827 return -EINVAL; 5828 return length; 5829 } 5830 SLAB_ATTR(shrink); 5831 5832 #ifdef CONFIG_NUMA 5833 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 5834 { 5835 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 5836 } 5837 5838 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 5839 const char *buf, size_t length) 5840 { 5841 unsigned int ratio; 5842 int err; 5843 5844 err = kstrtouint(buf, 10, &ratio); 5845 if (err) 5846 return err; 5847 if (ratio > 100) 5848 return -ERANGE; 5849 5850 s->remote_node_defrag_ratio = ratio * 10; 5851 5852 return length; 5853 } 5854 SLAB_ATTR(remote_node_defrag_ratio); 5855 #endif 5856 5857 #ifdef CONFIG_SLUB_STATS 5858 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 5859 { 5860 unsigned long sum = 0; 5861 int cpu; 5862 int len = 0; 5863 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 5864 5865 if (!data) 5866 return -ENOMEM; 5867 5868 for_each_online_cpu(cpu) { 5869 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 5870 5871 data[cpu] = x; 5872 sum += x; 5873 } 5874 5875 len += sysfs_emit_at(buf, len, "%lu", sum); 5876 5877 #ifdef CONFIG_SMP 5878 for_each_online_cpu(cpu) { 5879 if (data[cpu]) 5880 len += sysfs_emit_at(buf, len, " C%d=%u", 5881 cpu, data[cpu]); 5882 } 5883 #endif 5884 kfree(data); 5885 len += sysfs_emit_at(buf, len, "\n"); 5886 5887 return len; 5888 } 5889 5890 static void clear_stat(struct kmem_cache *s, enum stat_item si) 5891 { 5892 int cpu; 5893 5894 for_each_online_cpu(cpu) 5895 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 5896 } 5897 5898 #define STAT_ATTR(si, text) \ 5899 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 5900 { \ 5901 return show_stat(s, buf, si); \ 5902 } \ 5903 static ssize_t text##_store(struct kmem_cache *s, \ 5904 const char *buf, size_t length) \ 5905 { \ 5906 if (buf[0] != '0') \ 5907 return -EINVAL; \ 5908 clear_stat(s, si); \ 5909 return length; \ 5910 } \ 5911 SLAB_ATTR(text); \ 5912 5913 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 5914 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 5915 STAT_ATTR(FREE_FASTPATH, free_fastpath); 5916 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 5917 STAT_ATTR(FREE_FROZEN, free_frozen); 5918 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 5919 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 5920 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 5921 STAT_ATTR(ALLOC_SLAB, alloc_slab); 5922 STAT_ATTR(ALLOC_REFILL, alloc_refill); 5923 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 5924 STAT_ATTR(FREE_SLAB, free_slab); 5925 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 5926 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 5927 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 5928 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 5929 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 5930 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 5931 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 5932 STAT_ATTR(ORDER_FALLBACK, order_fallback); 5933 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 5934 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 5935 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 5936 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 5937 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 5938 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 5939 #endif /* CONFIG_SLUB_STATS */ 5940 5941 #ifdef CONFIG_KFENCE 5942 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) 5943 { 5944 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); 5945 } 5946 5947 static ssize_t skip_kfence_store(struct kmem_cache *s, 5948 const char *buf, size_t length) 5949 { 5950 int ret = length; 5951 5952 if (buf[0] == '0') 5953 s->flags &= ~SLAB_SKIP_KFENCE; 5954 else if (buf[0] == '1') 5955 s->flags |= SLAB_SKIP_KFENCE; 5956 else 5957 ret = -EINVAL; 5958 5959 return ret; 5960 } 5961 SLAB_ATTR(skip_kfence); 5962 #endif 5963 5964 static struct attribute *slab_attrs[] = { 5965 &slab_size_attr.attr, 5966 &object_size_attr.attr, 5967 &objs_per_slab_attr.attr, 5968 &order_attr.attr, 5969 &min_partial_attr.attr, 5970 &cpu_partial_attr.attr, 5971 &objects_attr.attr, 5972 &objects_partial_attr.attr, 5973 &partial_attr.attr, 5974 &cpu_slabs_attr.attr, 5975 &ctor_attr.attr, 5976 &aliases_attr.attr, 5977 &align_attr.attr, 5978 &hwcache_align_attr.attr, 5979 &reclaim_account_attr.attr, 5980 &destroy_by_rcu_attr.attr, 5981 &shrink_attr.attr, 5982 &slabs_cpu_partial_attr.attr, 5983 #ifdef CONFIG_SLUB_DEBUG 5984 &total_objects_attr.attr, 5985 &slabs_attr.attr, 5986 &sanity_checks_attr.attr, 5987 &trace_attr.attr, 5988 &red_zone_attr.attr, 5989 &poison_attr.attr, 5990 &store_user_attr.attr, 5991 &validate_attr.attr, 5992 #endif 5993 #ifdef CONFIG_ZONE_DMA 5994 &cache_dma_attr.attr, 5995 #endif 5996 #ifdef CONFIG_NUMA 5997 &remote_node_defrag_ratio_attr.attr, 5998 #endif 5999 #ifdef CONFIG_SLUB_STATS 6000 &alloc_fastpath_attr.attr, 6001 &alloc_slowpath_attr.attr, 6002 &free_fastpath_attr.attr, 6003 &free_slowpath_attr.attr, 6004 &free_frozen_attr.attr, 6005 &free_add_partial_attr.attr, 6006 &free_remove_partial_attr.attr, 6007 &alloc_from_partial_attr.attr, 6008 &alloc_slab_attr.attr, 6009 &alloc_refill_attr.attr, 6010 &alloc_node_mismatch_attr.attr, 6011 &free_slab_attr.attr, 6012 &cpuslab_flush_attr.attr, 6013 &deactivate_full_attr.attr, 6014 &deactivate_empty_attr.attr, 6015 &deactivate_to_head_attr.attr, 6016 &deactivate_to_tail_attr.attr, 6017 &deactivate_remote_frees_attr.attr, 6018 &deactivate_bypass_attr.attr, 6019 &order_fallback_attr.attr, 6020 &cmpxchg_double_fail_attr.attr, 6021 &cmpxchg_double_cpu_fail_attr.attr, 6022 &cpu_partial_alloc_attr.attr, 6023 &cpu_partial_free_attr.attr, 6024 &cpu_partial_node_attr.attr, 6025 &cpu_partial_drain_attr.attr, 6026 #endif 6027 #ifdef CONFIG_FAILSLAB 6028 &failslab_attr.attr, 6029 #endif 6030 #ifdef CONFIG_HARDENED_USERCOPY 6031 &usersize_attr.attr, 6032 #endif 6033 #ifdef CONFIG_KFENCE 6034 &skip_kfence_attr.attr, 6035 #endif 6036 6037 NULL 6038 }; 6039 6040 static const struct attribute_group slab_attr_group = { 6041 .attrs = slab_attrs, 6042 }; 6043 6044 static ssize_t slab_attr_show(struct kobject *kobj, 6045 struct attribute *attr, 6046 char *buf) 6047 { 6048 struct slab_attribute *attribute; 6049 struct kmem_cache *s; 6050 6051 attribute = to_slab_attr(attr); 6052 s = to_slab(kobj); 6053 6054 if (!attribute->show) 6055 return -EIO; 6056 6057 return attribute->show(s, buf); 6058 } 6059 6060 static ssize_t slab_attr_store(struct kobject *kobj, 6061 struct attribute *attr, 6062 const char *buf, size_t len) 6063 { 6064 struct slab_attribute *attribute; 6065 struct kmem_cache *s; 6066 6067 attribute = to_slab_attr(attr); 6068 s = to_slab(kobj); 6069 6070 if (!attribute->store) 6071 return -EIO; 6072 6073 return attribute->store(s, buf, len); 6074 } 6075 6076 static void kmem_cache_release(struct kobject *k) 6077 { 6078 slab_kmem_cache_release(to_slab(k)); 6079 } 6080 6081 static const struct sysfs_ops slab_sysfs_ops = { 6082 .show = slab_attr_show, 6083 .store = slab_attr_store, 6084 }; 6085 6086 static const struct kobj_type slab_ktype = { 6087 .sysfs_ops = &slab_sysfs_ops, 6088 .release = kmem_cache_release, 6089 }; 6090 6091 static struct kset *slab_kset; 6092 6093 static inline struct kset *cache_kset(struct kmem_cache *s) 6094 { 6095 return slab_kset; 6096 } 6097 6098 #define ID_STR_LENGTH 32 6099 6100 /* Create a unique string id for a slab cache: 6101 * 6102 * Format :[flags-]size 6103 */ 6104 static char *create_unique_id(struct kmem_cache *s) 6105 { 6106 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 6107 char *p = name; 6108 6109 if (!name) 6110 return ERR_PTR(-ENOMEM); 6111 6112 *p++ = ':'; 6113 /* 6114 * First flags affecting slabcache operations. We will only 6115 * get here for aliasable slabs so we do not need to support 6116 * too many flags. The flags here must cover all flags that 6117 * are matched during merging to guarantee that the id is 6118 * unique. 6119 */ 6120 if (s->flags & SLAB_CACHE_DMA) 6121 *p++ = 'd'; 6122 if (s->flags & SLAB_CACHE_DMA32) 6123 *p++ = 'D'; 6124 if (s->flags & SLAB_RECLAIM_ACCOUNT) 6125 *p++ = 'a'; 6126 if (s->flags & SLAB_CONSISTENCY_CHECKS) 6127 *p++ = 'F'; 6128 if (s->flags & SLAB_ACCOUNT) 6129 *p++ = 'A'; 6130 if (p != name + 1) 6131 *p++ = '-'; 6132 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); 6133 6134 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { 6135 kfree(name); 6136 return ERR_PTR(-EINVAL); 6137 } 6138 kmsan_unpoison_memory(name, p - name); 6139 return name; 6140 } 6141 6142 static int sysfs_slab_add(struct kmem_cache *s) 6143 { 6144 int err; 6145 const char *name; 6146 struct kset *kset = cache_kset(s); 6147 int unmergeable = slab_unmergeable(s); 6148 6149 if (!unmergeable && disable_higher_order_debug && 6150 (slub_debug & DEBUG_METADATA_FLAGS)) 6151 unmergeable = 1; 6152 6153 if (unmergeable) { 6154 /* 6155 * Slabcache can never be merged so we can use the name proper. 6156 * This is typically the case for debug situations. In that 6157 * case we can catch duplicate names easily. 6158 */ 6159 sysfs_remove_link(&slab_kset->kobj, s->name); 6160 name = s->name; 6161 } else { 6162 /* 6163 * Create a unique name for the slab as a target 6164 * for the symlinks. 6165 */ 6166 name = create_unique_id(s); 6167 if (IS_ERR(name)) 6168 return PTR_ERR(name); 6169 } 6170 6171 s->kobj.kset = kset; 6172 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 6173 if (err) 6174 goto out; 6175 6176 err = sysfs_create_group(&s->kobj, &slab_attr_group); 6177 if (err) 6178 goto out_del_kobj; 6179 6180 if (!unmergeable) { 6181 /* Setup first alias */ 6182 sysfs_slab_alias(s, s->name); 6183 } 6184 out: 6185 if (!unmergeable) 6186 kfree(name); 6187 return err; 6188 out_del_kobj: 6189 kobject_del(&s->kobj); 6190 goto out; 6191 } 6192 6193 void sysfs_slab_unlink(struct kmem_cache *s) 6194 { 6195 if (slab_state >= FULL) 6196 kobject_del(&s->kobj); 6197 } 6198 6199 void sysfs_slab_release(struct kmem_cache *s) 6200 { 6201 if (slab_state >= FULL) 6202 kobject_put(&s->kobj); 6203 } 6204 6205 /* 6206 * Need to buffer aliases during bootup until sysfs becomes 6207 * available lest we lose that information. 6208 */ 6209 struct saved_alias { 6210 struct kmem_cache *s; 6211 const char *name; 6212 struct saved_alias *next; 6213 }; 6214 6215 static struct saved_alias *alias_list; 6216 6217 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 6218 { 6219 struct saved_alias *al; 6220 6221 if (slab_state == FULL) { 6222 /* 6223 * If we have a leftover link then remove it. 6224 */ 6225 sysfs_remove_link(&slab_kset->kobj, name); 6226 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 6227 } 6228 6229 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 6230 if (!al) 6231 return -ENOMEM; 6232 6233 al->s = s; 6234 al->name = name; 6235 al->next = alias_list; 6236 alias_list = al; 6237 kmsan_unpoison_memory(al, sizeof(*al)); 6238 return 0; 6239 } 6240 6241 static int __init slab_sysfs_init(void) 6242 { 6243 struct kmem_cache *s; 6244 int err; 6245 6246 mutex_lock(&slab_mutex); 6247 6248 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 6249 if (!slab_kset) { 6250 mutex_unlock(&slab_mutex); 6251 pr_err("Cannot register slab subsystem.\n"); 6252 return -ENOSYS; 6253 } 6254 6255 slab_state = FULL; 6256 6257 list_for_each_entry(s, &slab_caches, list) { 6258 err = sysfs_slab_add(s); 6259 if (err) 6260 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 6261 s->name); 6262 } 6263 6264 while (alias_list) { 6265 struct saved_alias *al = alias_list; 6266 6267 alias_list = alias_list->next; 6268 err = sysfs_slab_alias(al->s, al->name); 6269 if (err) 6270 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 6271 al->name); 6272 kfree(al); 6273 } 6274 6275 mutex_unlock(&slab_mutex); 6276 return 0; 6277 } 6278 late_initcall(slab_sysfs_init); 6279 #endif /* SLAB_SUPPORTS_SYSFS */ 6280 6281 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 6282 static int slab_debugfs_show(struct seq_file *seq, void *v) 6283 { 6284 struct loc_track *t = seq->private; 6285 struct location *l; 6286 unsigned long idx; 6287 6288 idx = (unsigned long) t->idx; 6289 if (idx < t->count) { 6290 l = &t->loc[idx]; 6291 6292 seq_printf(seq, "%7ld ", l->count); 6293 6294 if (l->addr) 6295 seq_printf(seq, "%pS", (void *)l->addr); 6296 else 6297 seq_puts(seq, "<not-available>"); 6298 6299 if (l->waste) 6300 seq_printf(seq, " waste=%lu/%lu", 6301 l->count * l->waste, l->waste); 6302 6303 if (l->sum_time != l->min_time) { 6304 seq_printf(seq, " age=%ld/%llu/%ld", 6305 l->min_time, div_u64(l->sum_time, l->count), 6306 l->max_time); 6307 } else 6308 seq_printf(seq, " age=%ld", l->min_time); 6309 6310 if (l->min_pid != l->max_pid) 6311 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 6312 else 6313 seq_printf(seq, " pid=%ld", 6314 l->min_pid); 6315 6316 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 6317 seq_printf(seq, " cpus=%*pbl", 6318 cpumask_pr_args(to_cpumask(l->cpus))); 6319 6320 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 6321 seq_printf(seq, " nodes=%*pbl", 6322 nodemask_pr_args(&l->nodes)); 6323 6324 #ifdef CONFIG_STACKDEPOT 6325 { 6326 depot_stack_handle_t handle; 6327 unsigned long *entries; 6328 unsigned int nr_entries, j; 6329 6330 handle = READ_ONCE(l->handle); 6331 if (handle) { 6332 nr_entries = stack_depot_fetch(handle, &entries); 6333 seq_puts(seq, "\n"); 6334 for (j = 0; j < nr_entries; j++) 6335 seq_printf(seq, " %pS\n", (void *)entries[j]); 6336 } 6337 } 6338 #endif 6339 seq_puts(seq, "\n"); 6340 } 6341 6342 if (!idx && !t->count) 6343 seq_puts(seq, "No data\n"); 6344 6345 return 0; 6346 } 6347 6348 static void slab_debugfs_stop(struct seq_file *seq, void *v) 6349 { 6350 } 6351 6352 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 6353 { 6354 struct loc_track *t = seq->private; 6355 6356 t->idx = ++(*ppos); 6357 if (*ppos <= t->count) 6358 return ppos; 6359 6360 return NULL; 6361 } 6362 6363 static int cmp_loc_by_count(const void *a, const void *b, const void *data) 6364 { 6365 struct location *loc1 = (struct location *)a; 6366 struct location *loc2 = (struct location *)b; 6367 6368 if (loc1->count > loc2->count) 6369 return -1; 6370 else 6371 return 1; 6372 } 6373 6374 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 6375 { 6376 struct loc_track *t = seq->private; 6377 6378 t->idx = *ppos; 6379 return ppos; 6380 } 6381 6382 static const struct seq_operations slab_debugfs_sops = { 6383 .start = slab_debugfs_start, 6384 .next = slab_debugfs_next, 6385 .stop = slab_debugfs_stop, 6386 .show = slab_debugfs_show, 6387 }; 6388 6389 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 6390 { 6391 6392 struct kmem_cache_node *n; 6393 enum track_item alloc; 6394 int node; 6395 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 6396 sizeof(struct loc_track)); 6397 struct kmem_cache *s = file_inode(filep)->i_private; 6398 unsigned long *obj_map; 6399 6400 if (!t) 6401 return -ENOMEM; 6402 6403 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 6404 if (!obj_map) { 6405 seq_release_private(inode, filep); 6406 return -ENOMEM; 6407 } 6408 6409 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 6410 alloc = TRACK_ALLOC; 6411 else 6412 alloc = TRACK_FREE; 6413 6414 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 6415 bitmap_free(obj_map); 6416 seq_release_private(inode, filep); 6417 return -ENOMEM; 6418 } 6419 6420 for_each_kmem_cache_node(s, node, n) { 6421 unsigned long flags; 6422 struct slab *slab; 6423 6424 if (!atomic_long_read(&n->nr_slabs)) 6425 continue; 6426 6427 spin_lock_irqsave(&n->list_lock, flags); 6428 list_for_each_entry(slab, &n->partial, slab_list) 6429 process_slab(t, s, slab, alloc, obj_map); 6430 list_for_each_entry(slab, &n->full, slab_list) 6431 process_slab(t, s, slab, alloc, obj_map); 6432 spin_unlock_irqrestore(&n->list_lock, flags); 6433 } 6434 6435 /* Sort locations by count */ 6436 sort_r(t->loc, t->count, sizeof(struct location), 6437 cmp_loc_by_count, NULL, NULL); 6438 6439 bitmap_free(obj_map); 6440 return 0; 6441 } 6442 6443 static int slab_debug_trace_release(struct inode *inode, struct file *file) 6444 { 6445 struct seq_file *seq = file->private_data; 6446 struct loc_track *t = seq->private; 6447 6448 free_loc_track(t); 6449 return seq_release_private(inode, file); 6450 } 6451 6452 static const struct file_operations slab_debugfs_fops = { 6453 .open = slab_debug_trace_open, 6454 .read = seq_read, 6455 .llseek = seq_lseek, 6456 .release = slab_debug_trace_release, 6457 }; 6458 6459 static void debugfs_slab_add(struct kmem_cache *s) 6460 { 6461 struct dentry *slab_cache_dir; 6462 6463 if (unlikely(!slab_debugfs_root)) 6464 return; 6465 6466 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 6467 6468 debugfs_create_file("alloc_traces", 0400, 6469 slab_cache_dir, s, &slab_debugfs_fops); 6470 6471 debugfs_create_file("free_traces", 0400, 6472 slab_cache_dir, s, &slab_debugfs_fops); 6473 } 6474 6475 void debugfs_slab_release(struct kmem_cache *s) 6476 { 6477 debugfs_lookup_and_remove(s->name, slab_debugfs_root); 6478 } 6479 6480 static int __init slab_debugfs_init(void) 6481 { 6482 struct kmem_cache *s; 6483 6484 slab_debugfs_root = debugfs_create_dir("slab", NULL); 6485 6486 list_for_each_entry(s, &slab_caches, list) 6487 if (s->flags & SLAB_STORE_USER) 6488 debugfs_slab_add(s); 6489 6490 return 0; 6491 6492 } 6493 __initcall(slab_debugfs_init); 6494 #endif 6495 /* 6496 * The /proc/slabinfo ABI 6497 */ 6498 #ifdef CONFIG_SLUB_DEBUG 6499 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 6500 { 6501 unsigned long nr_slabs = 0; 6502 unsigned long nr_objs = 0; 6503 unsigned long nr_free = 0; 6504 int node; 6505 struct kmem_cache_node *n; 6506 6507 for_each_kmem_cache_node(s, node, n) { 6508 nr_slabs += node_nr_slabs(n); 6509 nr_objs += node_nr_objs(n); 6510 nr_free += count_partial(n, count_free); 6511 } 6512 6513 sinfo->active_objs = nr_objs - nr_free; 6514 sinfo->num_objs = nr_objs; 6515 sinfo->active_slabs = nr_slabs; 6516 sinfo->num_slabs = nr_slabs; 6517 sinfo->objects_per_slab = oo_objects(s->oo); 6518 sinfo->cache_order = oo_order(s->oo); 6519 } 6520 6521 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) 6522 { 6523 } 6524 6525 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 6526 size_t count, loff_t *ppos) 6527 { 6528 return -EIO; 6529 } 6530 #endif /* CONFIG_SLUB_DEBUG */ 6531