1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/kmsan.h> 26 #include <linux/cpu.h> 27 #include <linux/cpuset.h> 28 #include <linux/mempolicy.h> 29 #include <linux/ctype.h> 30 #include <linux/stackdepot.h> 31 #include <linux/debugobjects.h> 32 #include <linux/kallsyms.h> 33 #include <linux/kfence.h> 34 #include <linux/memory.h> 35 #include <linux/math64.h> 36 #include <linux/fault-inject.h> 37 #include <linux/kmemleak.h> 38 #include <linux/stacktrace.h> 39 #include <linux/prefetch.h> 40 #include <linux/memcontrol.h> 41 #include <linux/random.h> 42 #include <kunit/test.h> 43 #include <kunit/test-bug.h> 44 #include <linux/sort.h> 45 46 #include <linux/debugfs.h> 47 #include <trace/events/kmem.h> 48 49 #include "internal.h" 50 51 /* 52 * Lock order: 53 * 1. slab_mutex (Global Mutex) 54 * 2. node->list_lock (Spinlock) 55 * 3. kmem_cache->cpu_slab->lock (Local lock) 56 * 4. slab_lock(slab) (Only on some arches) 57 * 5. object_map_lock (Only for debugging) 58 * 59 * slab_mutex 60 * 61 * The role of the slab_mutex is to protect the list of all the slabs 62 * and to synchronize major metadata changes to slab cache structures. 63 * Also synchronizes memory hotplug callbacks. 64 * 65 * slab_lock 66 * 67 * The slab_lock is a wrapper around the page lock, thus it is a bit 68 * spinlock. 69 * 70 * The slab_lock is only used on arches that do not have the ability 71 * to do a cmpxchg_double. It only protects: 72 * 73 * A. slab->freelist -> List of free objects in a slab 74 * B. slab->inuse -> Number of objects in use 75 * C. slab->objects -> Number of objects in slab 76 * D. slab->frozen -> frozen state 77 * 78 * Frozen slabs 79 * 80 * If a slab is frozen then it is exempt from list management. It is 81 * the cpu slab which is actively allocated from by the processor that 82 * froze it and it is not on any list. The processor that froze the 83 * slab is the one who can perform list operations on the slab. Other 84 * processors may put objects onto the freelist but the processor that 85 * froze the slab is the only one that can retrieve the objects from the 86 * slab's freelist. 87 * 88 * CPU partial slabs 89 * 90 * The partially empty slabs cached on the CPU partial list are used 91 * for performance reasons, which speeds up the allocation process. 92 * These slabs are not frozen, but are also exempt from list management, 93 * by clearing the PG_workingset flag when moving out of the node 94 * partial list. Please see __slab_free() for more details. 95 * 96 * To sum up, the current scheme is: 97 * - node partial slab: PG_Workingset && !frozen 98 * - cpu partial slab: !PG_Workingset && !frozen 99 * - cpu slab: !PG_Workingset && frozen 100 * - full slab: !PG_Workingset && !frozen 101 * 102 * list_lock 103 * 104 * The list_lock protects the partial and full list on each node and 105 * the partial slab counter. If taken then no new slabs may be added or 106 * removed from the lists nor make the number of partial slabs be modified. 107 * (Note that the total number of slabs is an atomic value that may be 108 * modified without taking the list lock). 109 * 110 * The list_lock is a centralized lock and thus we avoid taking it as 111 * much as possible. As long as SLUB does not have to handle partial 112 * slabs, operations can continue without any centralized lock. F.e. 113 * allocating a long series of objects that fill up slabs does not require 114 * the list lock. 115 * 116 * For debug caches, all allocations are forced to go through a list_lock 117 * protected region to serialize against concurrent validation. 118 * 119 * cpu_slab->lock local lock 120 * 121 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 122 * except the stat counters. This is a percpu structure manipulated only by 123 * the local cpu, so the lock protects against being preempted or interrupted 124 * by an irq. Fast path operations rely on lockless operations instead. 125 * 126 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption 127 * which means the lockless fastpath cannot be used as it might interfere with 128 * an in-progress slow path operations. In this case the local lock is always 129 * taken but it still utilizes the freelist for the common operations. 130 * 131 * lockless fastpaths 132 * 133 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 134 * are fully lockless when satisfied from the percpu slab (and when 135 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 136 * They also don't disable preemption or migration or irqs. They rely on 137 * the transaction id (tid) field to detect being preempted or moved to 138 * another cpu. 139 * 140 * irq, preemption, migration considerations 141 * 142 * Interrupts are disabled as part of list_lock or local_lock operations, or 143 * around the slab_lock operation, in order to make the slab allocator safe 144 * to use in the context of an irq. 145 * 146 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 147 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 148 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 149 * doesn't have to be revalidated in each section protected by the local lock. 150 * 151 * SLUB assigns one slab for allocation to each processor. 152 * Allocations only occur from these slabs called cpu slabs. 153 * 154 * Slabs with free elements are kept on a partial list and during regular 155 * operations no list for full slabs is used. If an object in a full slab is 156 * freed then the slab will show up again on the partial lists. 157 * We track full slabs for debugging purposes though because otherwise we 158 * cannot scan all objects. 159 * 160 * Slabs are freed when they become empty. Teardown and setup is 161 * minimal so we rely on the page allocators per cpu caches for 162 * fast frees and allocs. 163 * 164 * slab->frozen The slab is frozen and exempt from list processing. 165 * This means that the slab is dedicated to a purpose 166 * such as satisfying allocations for a specific 167 * processor. Objects may be freed in the slab while 168 * it is frozen but slab_free will then skip the usual 169 * list operations. It is up to the processor holding 170 * the slab to integrate the slab into the slab lists 171 * when the slab is no longer needed. 172 * 173 * One use of this flag is to mark slabs that are 174 * used for allocations. Then such a slab becomes a cpu 175 * slab. The cpu slab may be equipped with an additional 176 * freelist that allows lockless access to 177 * free objects in addition to the regular freelist 178 * that requires the slab lock. 179 * 180 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 181 * options set. This moves slab handling out of 182 * the fast path and disables lockless freelists. 183 */ 184 185 /* 186 * We could simply use migrate_disable()/enable() but as long as it's a 187 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 188 */ 189 #ifndef CONFIG_PREEMPT_RT 190 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 191 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 192 #define USE_LOCKLESS_FAST_PATH() (true) 193 #else 194 #define slub_get_cpu_ptr(var) \ 195 ({ \ 196 migrate_disable(); \ 197 this_cpu_ptr(var); \ 198 }) 199 #define slub_put_cpu_ptr(var) \ 200 do { \ 201 (void)(var); \ 202 migrate_enable(); \ 203 } while (0) 204 #define USE_LOCKLESS_FAST_PATH() (false) 205 #endif 206 207 #ifndef CONFIG_SLUB_TINY 208 #define __fastpath_inline __always_inline 209 #else 210 #define __fastpath_inline 211 #endif 212 213 #ifdef CONFIG_SLUB_DEBUG 214 #ifdef CONFIG_SLUB_DEBUG_ON 215 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 216 #else 217 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 218 #endif 219 #endif /* CONFIG_SLUB_DEBUG */ 220 221 /* Structure holding parameters for get_partial() call chain */ 222 struct partial_context { 223 gfp_t flags; 224 unsigned int orig_size; 225 void *object; 226 }; 227 228 static inline bool kmem_cache_debug(struct kmem_cache *s) 229 { 230 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 231 } 232 233 static inline bool slub_debug_orig_size(struct kmem_cache *s) 234 { 235 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) && 236 (s->flags & SLAB_KMALLOC)); 237 } 238 239 void *fixup_red_left(struct kmem_cache *s, void *p) 240 { 241 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 242 p += s->red_left_pad; 243 244 return p; 245 } 246 247 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 248 { 249 #ifdef CONFIG_SLUB_CPU_PARTIAL 250 return !kmem_cache_debug(s); 251 #else 252 return false; 253 #endif 254 } 255 256 /* 257 * Issues still to be resolved: 258 * 259 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 260 * 261 * - Variable sizing of the per node arrays 262 */ 263 264 /* Enable to log cmpxchg failures */ 265 #undef SLUB_DEBUG_CMPXCHG 266 267 #ifndef CONFIG_SLUB_TINY 268 /* 269 * Minimum number of partial slabs. These will be left on the partial 270 * lists even if they are empty. kmem_cache_shrink may reclaim them. 271 */ 272 #define MIN_PARTIAL 5 273 274 /* 275 * Maximum number of desirable partial slabs. 276 * The existence of more partial slabs makes kmem_cache_shrink 277 * sort the partial list by the number of objects in use. 278 */ 279 #define MAX_PARTIAL 10 280 #else 281 #define MIN_PARTIAL 0 282 #define MAX_PARTIAL 0 283 #endif 284 285 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 286 SLAB_POISON | SLAB_STORE_USER) 287 288 /* 289 * These debug flags cannot use CMPXCHG because there might be consistency 290 * issues when checking or reading debug information 291 */ 292 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 293 SLAB_TRACE) 294 295 296 /* 297 * Debugging flags that require metadata to be stored in the slab. These get 298 * disabled when slub_debug=O is used and a cache's min order increases with 299 * metadata. 300 */ 301 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 302 303 #define OO_SHIFT 16 304 #define OO_MASK ((1 << OO_SHIFT) - 1) 305 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ 306 307 /* Internal SLUB flags */ 308 /* Poison object */ 309 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) 310 /* Use cmpxchg_double */ 311 312 #ifdef system_has_freelist_aba 313 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) 314 #else 315 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U) 316 #endif 317 318 /* 319 * Tracking user of a slab. 320 */ 321 #define TRACK_ADDRS_COUNT 16 322 struct track { 323 unsigned long addr; /* Called from address */ 324 #ifdef CONFIG_STACKDEPOT 325 depot_stack_handle_t handle; 326 #endif 327 int cpu; /* Was running on cpu */ 328 int pid; /* Pid context */ 329 unsigned long when; /* When did the operation occur */ 330 }; 331 332 enum track_item { TRACK_ALLOC, TRACK_FREE }; 333 334 #ifdef SLAB_SUPPORTS_SYSFS 335 static int sysfs_slab_add(struct kmem_cache *); 336 static int sysfs_slab_alias(struct kmem_cache *, const char *); 337 #else 338 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 339 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 340 { return 0; } 341 #endif 342 343 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 344 static void debugfs_slab_add(struct kmem_cache *); 345 #else 346 static inline void debugfs_slab_add(struct kmem_cache *s) { } 347 #endif 348 349 enum stat_item { 350 ALLOC_FASTPATH, /* Allocation from cpu slab */ 351 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 352 FREE_FASTPATH, /* Free to cpu slab */ 353 FREE_SLOWPATH, /* Freeing not to cpu slab */ 354 FREE_FROZEN, /* Freeing to frozen slab */ 355 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 356 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 357 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ 358 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 359 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 360 ALLOC_NODE_MISMATCH, /* Switching cpu slab */ 361 FREE_SLAB, /* Slab freed to the page allocator */ 362 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 363 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 364 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 365 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 366 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 367 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 368 DEACTIVATE_BYPASS, /* Implicit deactivation */ 369 ORDER_FALLBACK, /* Number of times fallback was necessary */ 370 CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */ 371 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */ 372 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ 373 CPU_PARTIAL_FREE, /* Refill cpu partial on free */ 374 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ 375 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ 376 NR_SLUB_STAT_ITEMS 377 }; 378 379 #ifndef CONFIG_SLUB_TINY 380 /* 381 * When changing the layout, make sure freelist and tid are still compatible 382 * with this_cpu_cmpxchg_double() alignment requirements. 383 */ 384 struct kmem_cache_cpu { 385 union { 386 struct { 387 void **freelist; /* Pointer to next available object */ 388 unsigned long tid; /* Globally unique transaction id */ 389 }; 390 freelist_aba_t freelist_tid; 391 }; 392 struct slab *slab; /* The slab from which we are allocating */ 393 #ifdef CONFIG_SLUB_CPU_PARTIAL 394 struct slab *partial; /* Partially allocated frozen slabs */ 395 #endif 396 local_lock_t lock; /* Protects the fields above */ 397 #ifdef CONFIG_SLUB_STATS 398 unsigned int stat[NR_SLUB_STAT_ITEMS]; 399 #endif 400 }; 401 #endif /* CONFIG_SLUB_TINY */ 402 403 static inline void stat(const struct kmem_cache *s, enum stat_item si) 404 { 405 #ifdef CONFIG_SLUB_STATS 406 /* 407 * The rmw is racy on a preemptible kernel but this is acceptable, so 408 * avoid this_cpu_add()'s irq-disable overhead. 409 */ 410 raw_cpu_inc(s->cpu_slab->stat[si]); 411 #endif 412 } 413 414 static inline 415 void stat_add(const struct kmem_cache *s, enum stat_item si, int v) 416 { 417 #ifdef CONFIG_SLUB_STATS 418 raw_cpu_add(s->cpu_slab->stat[si], v); 419 #endif 420 } 421 422 /* 423 * The slab lists for all objects. 424 */ 425 struct kmem_cache_node { 426 spinlock_t list_lock; 427 unsigned long nr_partial; 428 struct list_head partial; 429 #ifdef CONFIG_SLUB_DEBUG 430 atomic_long_t nr_slabs; 431 atomic_long_t total_objects; 432 struct list_head full; 433 #endif 434 }; 435 436 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 437 { 438 return s->node[node]; 439 } 440 441 /* 442 * Iterator over all nodes. The body will be executed for each node that has 443 * a kmem_cache_node structure allocated (which is true for all online nodes) 444 */ 445 #define for_each_kmem_cache_node(__s, __node, __n) \ 446 for (__node = 0; __node < nr_node_ids; __node++) \ 447 if ((__n = get_node(__s, __node))) 448 449 /* 450 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 451 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 452 * differ during memory hotplug/hotremove operations. 453 * Protected by slab_mutex. 454 */ 455 static nodemask_t slab_nodes; 456 457 #ifndef CONFIG_SLUB_TINY 458 /* 459 * Workqueue used for flush_cpu_slab(). 460 */ 461 static struct workqueue_struct *flushwq; 462 #endif 463 464 /******************************************************************** 465 * Core slab cache functions 466 *******************************************************************/ 467 468 /* 469 * freeptr_t represents a SLUB freelist pointer, which might be encoded 470 * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. 471 */ 472 typedef struct { unsigned long v; } freeptr_t; 473 474 /* 475 * Returns freelist pointer (ptr). With hardening, this is obfuscated 476 * with an XOR of the address where the pointer is held and a per-cache 477 * random number. 478 */ 479 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s, 480 void *ptr, unsigned long ptr_addr) 481 { 482 unsigned long encoded; 483 484 #ifdef CONFIG_SLAB_FREELIST_HARDENED 485 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); 486 #else 487 encoded = (unsigned long)ptr; 488 #endif 489 return (freeptr_t){.v = encoded}; 490 } 491 492 static inline void *freelist_ptr_decode(const struct kmem_cache *s, 493 freeptr_t ptr, unsigned long ptr_addr) 494 { 495 void *decoded; 496 497 #ifdef CONFIG_SLAB_FREELIST_HARDENED 498 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); 499 #else 500 decoded = (void *)ptr.v; 501 #endif 502 return decoded; 503 } 504 505 static inline void *get_freepointer(struct kmem_cache *s, void *object) 506 { 507 unsigned long ptr_addr; 508 freeptr_t p; 509 510 object = kasan_reset_tag(object); 511 ptr_addr = (unsigned long)object + s->offset; 512 p = *(freeptr_t *)(ptr_addr); 513 return freelist_ptr_decode(s, p, ptr_addr); 514 } 515 516 #ifndef CONFIG_SLUB_TINY 517 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 518 { 519 prefetchw(object + s->offset); 520 } 521 #endif 522 523 /* 524 * When running under KMSAN, get_freepointer_safe() may return an uninitialized 525 * pointer value in the case the current thread loses the race for the next 526 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in 527 * slab_alloc_node() will fail, so the uninitialized value won't be used, but 528 * KMSAN will still check all arguments of cmpxchg because of imperfect 529 * handling of inline assembly. 530 * To work around this problem, we apply __no_kmsan_checks to ensure that 531 * get_freepointer_safe() returns initialized memory. 532 */ 533 __no_kmsan_checks 534 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 535 { 536 unsigned long freepointer_addr; 537 freeptr_t p; 538 539 if (!debug_pagealloc_enabled_static()) 540 return get_freepointer(s, object); 541 542 object = kasan_reset_tag(object); 543 freepointer_addr = (unsigned long)object + s->offset; 544 copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p)); 545 return freelist_ptr_decode(s, p, freepointer_addr); 546 } 547 548 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 549 { 550 unsigned long freeptr_addr = (unsigned long)object + s->offset; 551 552 #ifdef CONFIG_SLAB_FREELIST_HARDENED 553 BUG_ON(object == fp); /* naive detection of double free or corruption */ 554 #endif 555 556 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 557 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); 558 } 559 560 /* Loop over all objects in a slab */ 561 #define for_each_object(__p, __s, __addr, __objects) \ 562 for (__p = fixup_red_left(__s, __addr); \ 563 __p < (__addr) + (__objects) * (__s)->size; \ 564 __p += (__s)->size) 565 566 static inline unsigned int order_objects(unsigned int order, unsigned int size) 567 { 568 return ((unsigned int)PAGE_SIZE << order) / size; 569 } 570 571 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 572 unsigned int size) 573 { 574 struct kmem_cache_order_objects x = { 575 (order << OO_SHIFT) + order_objects(order, size) 576 }; 577 578 return x; 579 } 580 581 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 582 { 583 return x.x >> OO_SHIFT; 584 } 585 586 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 587 { 588 return x.x & OO_MASK; 589 } 590 591 #ifdef CONFIG_SLUB_CPU_PARTIAL 592 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 593 { 594 unsigned int nr_slabs; 595 596 s->cpu_partial = nr_objects; 597 598 /* 599 * We take the number of objects but actually limit the number of 600 * slabs on the per cpu partial list, in order to limit excessive 601 * growth of the list. For simplicity we assume that the slabs will 602 * be half-full. 603 */ 604 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); 605 s->cpu_partial_slabs = nr_slabs; 606 } 607 #else 608 static inline void 609 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 610 { 611 } 612 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 613 614 /* 615 * Per slab locking using the pagelock 616 */ 617 static __always_inline void slab_lock(struct slab *slab) 618 { 619 struct page *page = slab_page(slab); 620 621 VM_BUG_ON_PAGE(PageTail(page), page); 622 bit_spin_lock(PG_locked, &page->flags); 623 } 624 625 static __always_inline void slab_unlock(struct slab *slab) 626 { 627 struct page *page = slab_page(slab); 628 629 VM_BUG_ON_PAGE(PageTail(page), page); 630 bit_spin_unlock(PG_locked, &page->flags); 631 } 632 633 static inline bool 634 __update_freelist_fast(struct slab *slab, 635 void *freelist_old, unsigned long counters_old, 636 void *freelist_new, unsigned long counters_new) 637 { 638 #ifdef system_has_freelist_aba 639 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; 640 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; 641 642 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); 643 #else 644 return false; 645 #endif 646 } 647 648 static inline bool 649 __update_freelist_slow(struct slab *slab, 650 void *freelist_old, unsigned long counters_old, 651 void *freelist_new, unsigned long counters_new) 652 { 653 bool ret = false; 654 655 slab_lock(slab); 656 if (slab->freelist == freelist_old && 657 slab->counters == counters_old) { 658 slab->freelist = freelist_new; 659 slab->counters = counters_new; 660 ret = true; 661 } 662 slab_unlock(slab); 663 664 return ret; 665 } 666 667 /* 668 * Interrupts must be disabled (for the fallback code to work right), typically 669 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is 670 * part of bit_spin_lock(), is sufficient because the policy is not to allow any 671 * allocation/ free operation in hardirq context. Therefore nothing can 672 * interrupt the operation. 673 */ 674 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, 675 void *freelist_old, unsigned long counters_old, 676 void *freelist_new, unsigned long counters_new, 677 const char *n) 678 { 679 bool ret; 680 681 if (USE_LOCKLESS_FAST_PATH()) 682 lockdep_assert_irqs_disabled(); 683 684 if (s->flags & __CMPXCHG_DOUBLE) { 685 ret = __update_freelist_fast(slab, freelist_old, counters_old, 686 freelist_new, counters_new); 687 } else { 688 ret = __update_freelist_slow(slab, freelist_old, counters_old, 689 freelist_new, counters_new); 690 } 691 if (likely(ret)) 692 return true; 693 694 cpu_relax(); 695 stat(s, CMPXCHG_DOUBLE_FAIL); 696 697 #ifdef SLUB_DEBUG_CMPXCHG 698 pr_info("%s %s: cmpxchg double redo ", n, s->name); 699 #endif 700 701 return false; 702 } 703 704 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, 705 void *freelist_old, unsigned long counters_old, 706 void *freelist_new, unsigned long counters_new, 707 const char *n) 708 { 709 bool ret; 710 711 if (s->flags & __CMPXCHG_DOUBLE) { 712 ret = __update_freelist_fast(slab, freelist_old, counters_old, 713 freelist_new, counters_new); 714 } else { 715 unsigned long flags; 716 717 local_irq_save(flags); 718 ret = __update_freelist_slow(slab, freelist_old, counters_old, 719 freelist_new, counters_new); 720 local_irq_restore(flags); 721 } 722 if (likely(ret)) 723 return true; 724 725 cpu_relax(); 726 stat(s, CMPXCHG_DOUBLE_FAIL); 727 728 #ifdef SLUB_DEBUG_CMPXCHG 729 pr_info("%s %s: cmpxchg double redo ", n, s->name); 730 #endif 731 732 return false; 733 } 734 735 #ifdef CONFIG_SLUB_DEBUG 736 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 737 static DEFINE_SPINLOCK(object_map_lock); 738 739 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 740 struct slab *slab) 741 { 742 void *addr = slab_address(slab); 743 void *p; 744 745 bitmap_zero(obj_map, slab->objects); 746 747 for (p = slab->freelist; p; p = get_freepointer(s, p)) 748 set_bit(__obj_to_index(s, addr, p), obj_map); 749 } 750 751 #if IS_ENABLED(CONFIG_KUNIT) 752 static bool slab_add_kunit_errors(void) 753 { 754 struct kunit_resource *resource; 755 756 if (!kunit_get_current_test()) 757 return false; 758 759 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 760 if (!resource) 761 return false; 762 763 (*(int *)resource->data)++; 764 kunit_put_resource(resource); 765 return true; 766 } 767 #else 768 static inline bool slab_add_kunit_errors(void) { return false; } 769 #endif 770 771 static inline unsigned int size_from_object(struct kmem_cache *s) 772 { 773 if (s->flags & SLAB_RED_ZONE) 774 return s->size - s->red_left_pad; 775 776 return s->size; 777 } 778 779 static inline void *restore_red_left(struct kmem_cache *s, void *p) 780 { 781 if (s->flags & SLAB_RED_ZONE) 782 p -= s->red_left_pad; 783 784 return p; 785 } 786 787 /* 788 * Debug settings: 789 */ 790 #if defined(CONFIG_SLUB_DEBUG_ON) 791 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 792 #else 793 static slab_flags_t slub_debug; 794 #endif 795 796 static char *slub_debug_string; 797 static int disable_higher_order_debug; 798 799 /* 800 * slub is about to manipulate internal object metadata. This memory lies 801 * outside the range of the allocated object, so accessing it would normally 802 * be reported by kasan as a bounds error. metadata_access_enable() is used 803 * to tell kasan that these accesses are OK. 804 */ 805 static inline void metadata_access_enable(void) 806 { 807 kasan_disable_current(); 808 } 809 810 static inline void metadata_access_disable(void) 811 { 812 kasan_enable_current(); 813 } 814 815 /* 816 * Object debugging 817 */ 818 819 /* Verify that a pointer has an address that is valid within a slab page */ 820 static inline int check_valid_pointer(struct kmem_cache *s, 821 struct slab *slab, void *object) 822 { 823 void *base; 824 825 if (!object) 826 return 1; 827 828 base = slab_address(slab); 829 object = kasan_reset_tag(object); 830 object = restore_red_left(s, object); 831 if (object < base || object >= base + slab->objects * s->size || 832 (object - base) % s->size) { 833 return 0; 834 } 835 836 return 1; 837 } 838 839 static void print_section(char *level, char *text, u8 *addr, 840 unsigned int length) 841 { 842 metadata_access_enable(); 843 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 844 16, 1, kasan_reset_tag((void *)addr), length, 1); 845 metadata_access_disable(); 846 } 847 848 /* 849 * See comment in calculate_sizes(). 850 */ 851 static inline bool freeptr_outside_object(struct kmem_cache *s) 852 { 853 return s->offset >= s->inuse; 854 } 855 856 /* 857 * Return offset of the end of info block which is inuse + free pointer if 858 * not overlapping with object. 859 */ 860 static inline unsigned int get_info_end(struct kmem_cache *s) 861 { 862 if (freeptr_outside_object(s)) 863 return s->inuse + sizeof(void *); 864 else 865 return s->inuse; 866 } 867 868 static struct track *get_track(struct kmem_cache *s, void *object, 869 enum track_item alloc) 870 { 871 struct track *p; 872 873 p = object + get_info_end(s); 874 875 return kasan_reset_tag(p + alloc); 876 } 877 878 #ifdef CONFIG_STACKDEPOT 879 static noinline depot_stack_handle_t set_track_prepare(void) 880 { 881 depot_stack_handle_t handle; 882 unsigned long entries[TRACK_ADDRS_COUNT]; 883 unsigned int nr_entries; 884 885 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 886 handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); 887 888 return handle; 889 } 890 #else 891 static inline depot_stack_handle_t set_track_prepare(void) 892 { 893 return 0; 894 } 895 #endif 896 897 static void set_track_update(struct kmem_cache *s, void *object, 898 enum track_item alloc, unsigned long addr, 899 depot_stack_handle_t handle) 900 { 901 struct track *p = get_track(s, object, alloc); 902 903 #ifdef CONFIG_STACKDEPOT 904 p->handle = handle; 905 #endif 906 p->addr = addr; 907 p->cpu = smp_processor_id(); 908 p->pid = current->pid; 909 p->when = jiffies; 910 } 911 912 static __always_inline void set_track(struct kmem_cache *s, void *object, 913 enum track_item alloc, unsigned long addr) 914 { 915 depot_stack_handle_t handle = set_track_prepare(); 916 917 set_track_update(s, object, alloc, addr, handle); 918 } 919 920 static void init_tracking(struct kmem_cache *s, void *object) 921 { 922 struct track *p; 923 924 if (!(s->flags & SLAB_STORE_USER)) 925 return; 926 927 p = get_track(s, object, TRACK_ALLOC); 928 memset(p, 0, 2*sizeof(struct track)); 929 } 930 931 static void print_track(const char *s, struct track *t, unsigned long pr_time) 932 { 933 depot_stack_handle_t handle __maybe_unused; 934 935 if (!t->addr) 936 return; 937 938 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 939 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 940 #ifdef CONFIG_STACKDEPOT 941 handle = READ_ONCE(t->handle); 942 if (handle) 943 stack_depot_print(handle); 944 else 945 pr_err("object allocation/free stack trace missing\n"); 946 #endif 947 } 948 949 void print_tracking(struct kmem_cache *s, void *object) 950 { 951 unsigned long pr_time = jiffies; 952 if (!(s->flags & SLAB_STORE_USER)) 953 return; 954 955 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 956 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 957 } 958 959 static void print_slab_info(const struct slab *slab) 960 { 961 struct folio *folio = (struct folio *)slab_folio(slab); 962 963 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", 964 slab, slab->objects, slab->inuse, slab->freelist, 965 folio_flags(folio, 0)); 966 } 967 968 /* 969 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API 970 * family will round up the real request size to these fixed ones, so 971 * there could be an extra area than what is requested. Save the original 972 * request size in the meta data area, for better debug and sanity check. 973 */ 974 static inline void set_orig_size(struct kmem_cache *s, 975 void *object, unsigned int orig_size) 976 { 977 void *p = kasan_reset_tag(object); 978 unsigned int kasan_meta_size; 979 980 if (!slub_debug_orig_size(s)) 981 return; 982 983 /* 984 * KASAN can save its free meta data inside of the object at offset 0. 985 * If this meta data size is larger than 'orig_size', it will overlap 986 * the data redzone in [orig_size+1, object_size]. Thus, we adjust 987 * 'orig_size' to be as at least as big as KASAN's meta data. 988 */ 989 kasan_meta_size = kasan_metadata_size(s, true); 990 if (kasan_meta_size > orig_size) 991 orig_size = kasan_meta_size; 992 993 p += get_info_end(s); 994 p += sizeof(struct track) * 2; 995 996 *(unsigned int *)p = orig_size; 997 } 998 999 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) 1000 { 1001 void *p = kasan_reset_tag(object); 1002 1003 if (!slub_debug_orig_size(s)) 1004 return s->object_size; 1005 1006 p += get_info_end(s); 1007 p += sizeof(struct track) * 2; 1008 1009 return *(unsigned int *)p; 1010 } 1011 1012 void skip_orig_size_check(struct kmem_cache *s, const void *object) 1013 { 1014 set_orig_size(s, (void *)object, s->object_size); 1015 } 1016 1017 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 1018 { 1019 struct va_format vaf; 1020 va_list args; 1021 1022 va_start(args, fmt); 1023 vaf.fmt = fmt; 1024 vaf.va = &args; 1025 pr_err("=============================================================================\n"); 1026 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 1027 pr_err("-----------------------------------------------------------------------------\n\n"); 1028 va_end(args); 1029 } 1030 1031 __printf(2, 3) 1032 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 1033 { 1034 struct va_format vaf; 1035 va_list args; 1036 1037 if (slab_add_kunit_errors()) 1038 return; 1039 1040 va_start(args, fmt); 1041 vaf.fmt = fmt; 1042 vaf.va = &args; 1043 pr_err("FIX %s: %pV\n", s->name, &vaf); 1044 va_end(args); 1045 } 1046 1047 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) 1048 { 1049 unsigned int off; /* Offset of last byte */ 1050 u8 *addr = slab_address(slab); 1051 1052 print_tracking(s, p); 1053 1054 print_slab_info(slab); 1055 1056 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 1057 p, p - addr, get_freepointer(s, p)); 1058 1059 if (s->flags & SLAB_RED_ZONE) 1060 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 1061 s->red_left_pad); 1062 else if (p > addr + 16) 1063 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 1064 1065 print_section(KERN_ERR, "Object ", p, 1066 min_t(unsigned int, s->object_size, PAGE_SIZE)); 1067 if (s->flags & SLAB_RED_ZONE) 1068 print_section(KERN_ERR, "Redzone ", p + s->object_size, 1069 s->inuse - s->object_size); 1070 1071 off = get_info_end(s); 1072 1073 if (s->flags & SLAB_STORE_USER) 1074 off += 2 * sizeof(struct track); 1075 1076 if (slub_debug_orig_size(s)) 1077 off += sizeof(unsigned int); 1078 1079 off += kasan_metadata_size(s, false); 1080 1081 if (off != size_from_object(s)) 1082 /* Beginning of the filler is the free pointer */ 1083 print_section(KERN_ERR, "Padding ", p + off, 1084 size_from_object(s) - off); 1085 1086 dump_stack(); 1087 } 1088 1089 static void object_err(struct kmem_cache *s, struct slab *slab, 1090 u8 *object, char *reason) 1091 { 1092 if (slab_add_kunit_errors()) 1093 return; 1094 1095 slab_bug(s, "%s", reason); 1096 print_trailer(s, slab, object); 1097 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1098 } 1099 1100 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1101 void **freelist, void *nextfree) 1102 { 1103 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 1104 !check_valid_pointer(s, slab, nextfree) && freelist) { 1105 object_err(s, slab, *freelist, "Freechain corrupt"); 1106 *freelist = NULL; 1107 slab_fix(s, "Isolate corrupted freechain"); 1108 return true; 1109 } 1110 1111 return false; 1112 } 1113 1114 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, 1115 const char *fmt, ...) 1116 { 1117 va_list args; 1118 char buf[100]; 1119 1120 if (slab_add_kunit_errors()) 1121 return; 1122 1123 va_start(args, fmt); 1124 vsnprintf(buf, sizeof(buf), fmt, args); 1125 va_end(args); 1126 slab_bug(s, "%s", buf); 1127 print_slab_info(slab); 1128 dump_stack(); 1129 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1130 } 1131 1132 static void init_object(struct kmem_cache *s, void *object, u8 val) 1133 { 1134 u8 *p = kasan_reset_tag(object); 1135 unsigned int poison_size = s->object_size; 1136 1137 if (s->flags & SLAB_RED_ZONE) { 1138 memset(p - s->red_left_pad, val, s->red_left_pad); 1139 1140 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1141 /* 1142 * Redzone the extra allocated space by kmalloc than 1143 * requested, and the poison size will be limited to 1144 * the original request size accordingly. 1145 */ 1146 poison_size = get_orig_size(s, object); 1147 } 1148 } 1149 1150 if (s->flags & __OBJECT_POISON) { 1151 memset(p, POISON_FREE, poison_size - 1); 1152 p[poison_size - 1] = POISON_END; 1153 } 1154 1155 if (s->flags & SLAB_RED_ZONE) 1156 memset(p + poison_size, val, s->inuse - poison_size); 1157 } 1158 1159 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 1160 void *from, void *to) 1161 { 1162 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 1163 memset(from, data, to - from); 1164 } 1165 1166 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab, 1167 u8 *object, char *what, 1168 u8 *start, unsigned int value, unsigned int bytes) 1169 { 1170 u8 *fault; 1171 u8 *end; 1172 u8 *addr = slab_address(slab); 1173 1174 metadata_access_enable(); 1175 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 1176 metadata_access_disable(); 1177 if (!fault) 1178 return 1; 1179 1180 end = start + bytes; 1181 while (end > fault && end[-1] == value) 1182 end--; 1183 1184 if (slab_add_kunit_errors()) 1185 goto skip_bug_print; 1186 1187 slab_bug(s, "%s overwritten", what); 1188 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 1189 fault, end - 1, fault - addr, 1190 fault[0], value); 1191 print_trailer(s, slab, object); 1192 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1193 1194 skip_bug_print: 1195 restore_bytes(s, what, value, fault, end); 1196 return 0; 1197 } 1198 1199 /* 1200 * Object layout: 1201 * 1202 * object address 1203 * Bytes of the object to be managed. 1204 * If the freepointer may overlay the object then the free 1205 * pointer is at the middle of the object. 1206 * 1207 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 1208 * 0xa5 (POISON_END) 1209 * 1210 * object + s->object_size 1211 * Padding to reach word boundary. This is also used for Redzoning. 1212 * Padding is extended by another word if Redzoning is enabled and 1213 * object_size == inuse. 1214 * 1215 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 1216 * 0xcc (RED_ACTIVE) for objects in use. 1217 * 1218 * object + s->inuse 1219 * Meta data starts here. 1220 * 1221 * A. Free pointer (if we cannot overwrite object on free) 1222 * B. Tracking data for SLAB_STORE_USER 1223 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) 1224 * D. Padding to reach required alignment boundary or at minimum 1225 * one word if debugging is on to be able to detect writes 1226 * before the word boundary. 1227 * 1228 * Padding is done using 0x5a (POISON_INUSE) 1229 * 1230 * object + s->size 1231 * Nothing is used beyond s->size. 1232 * 1233 * If slabcaches are merged then the object_size and inuse boundaries are mostly 1234 * ignored. And therefore no slab options that rely on these boundaries 1235 * may be used with merged slabcaches. 1236 */ 1237 1238 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) 1239 { 1240 unsigned long off = get_info_end(s); /* The end of info */ 1241 1242 if (s->flags & SLAB_STORE_USER) { 1243 /* We also have user information there */ 1244 off += 2 * sizeof(struct track); 1245 1246 if (s->flags & SLAB_KMALLOC) 1247 off += sizeof(unsigned int); 1248 } 1249 1250 off += kasan_metadata_size(s, false); 1251 1252 if (size_from_object(s) == off) 1253 return 1; 1254 1255 return check_bytes_and_report(s, slab, p, "Object padding", 1256 p + off, POISON_INUSE, size_from_object(s) - off); 1257 } 1258 1259 /* Check the pad bytes at the end of a slab page */ 1260 static void slab_pad_check(struct kmem_cache *s, struct slab *slab) 1261 { 1262 u8 *start; 1263 u8 *fault; 1264 u8 *end; 1265 u8 *pad; 1266 int length; 1267 int remainder; 1268 1269 if (!(s->flags & SLAB_POISON)) 1270 return; 1271 1272 start = slab_address(slab); 1273 length = slab_size(slab); 1274 end = start + length; 1275 remainder = length % s->size; 1276 if (!remainder) 1277 return; 1278 1279 pad = end - remainder; 1280 metadata_access_enable(); 1281 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1282 metadata_access_disable(); 1283 if (!fault) 1284 return; 1285 while (end > fault && end[-1] == POISON_INUSE) 1286 end--; 1287 1288 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1289 fault, end - 1, fault - start); 1290 print_section(KERN_ERR, "Padding ", pad, remainder); 1291 1292 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1293 } 1294 1295 static int check_object(struct kmem_cache *s, struct slab *slab, 1296 void *object, u8 val) 1297 { 1298 u8 *p = object; 1299 u8 *endobject = object + s->object_size; 1300 unsigned int orig_size, kasan_meta_size; 1301 1302 if (s->flags & SLAB_RED_ZONE) { 1303 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1304 object - s->red_left_pad, val, s->red_left_pad)) 1305 return 0; 1306 1307 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1308 endobject, val, s->inuse - s->object_size)) 1309 return 0; 1310 1311 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1312 orig_size = get_orig_size(s, object); 1313 1314 if (s->object_size > orig_size && 1315 !check_bytes_and_report(s, slab, object, 1316 "kmalloc Redzone", p + orig_size, 1317 val, s->object_size - orig_size)) { 1318 return 0; 1319 } 1320 } 1321 } else { 1322 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1323 check_bytes_and_report(s, slab, p, "Alignment padding", 1324 endobject, POISON_INUSE, 1325 s->inuse - s->object_size); 1326 } 1327 } 1328 1329 if (s->flags & SLAB_POISON) { 1330 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) { 1331 /* 1332 * KASAN can save its free meta data inside of the 1333 * object at offset 0. Thus, skip checking the part of 1334 * the redzone that overlaps with the meta data. 1335 */ 1336 kasan_meta_size = kasan_metadata_size(s, true); 1337 if (kasan_meta_size < s->object_size - 1 && 1338 !check_bytes_and_report(s, slab, p, "Poison", 1339 p + kasan_meta_size, POISON_FREE, 1340 s->object_size - kasan_meta_size - 1)) 1341 return 0; 1342 if (kasan_meta_size < s->object_size && 1343 !check_bytes_and_report(s, slab, p, "End Poison", 1344 p + s->object_size - 1, POISON_END, 1)) 1345 return 0; 1346 } 1347 /* 1348 * check_pad_bytes cleans up on its own. 1349 */ 1350 check_pad_bytes(s, slab, p); 1351 } 1352 1353 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 1354 /* 1355 * Object and freepointer overlap. Cannot check 1356 * freepointer while object is allocated. 1357 */ 1358 return 1; 1359 1360 /* Check free pointer validity */ 1361 if (!check_valid_pointer(s, slab, get_freepointer(s, p))) { 1362 object_err(s, slab, p, "Freepointer corrupt"); 1363 /* 1364 * No choice but to zap it and thus lose the remainder 1365 * of the free objects in this slab. May cause 1366 * another error because the object count is now wrong. 1367 */ 1368 set_freepointer(s, p, NULL); 1369 return 0; 1370 } 1371 return 1; 1372 } 1373 1374 static int check_slab(struct kmem_cache *s, struct slab *slab) 1375 { 1376 int maxobj; 1377 1378 if (!folio_test_slab(slab_folio(slab))) { 1379 slab_err(s, slab, "Not a valid slab page"); 1380 return 0; 1381 } 1382 1383 maxobj = order_objects(slab_order(slab), s->size); 1384 if (slab->objects > maxobj) { 1385 slab_err(s, slab, "objects %u > max %u", 1386 slab->objects, maxobj); 1387 return 0; 1388 } 1389 if (slab->inuse > slab->objects) { 1390 slab_err(s, slab, "inuse %u > max %u", 1391 slab->inuse, slab->objects); 1392 return 0; 1393 } 1394 /* Slab_pad_check fixes things up after itself */ 1395 slab_pad_check(s, slab); 1396 return 1; 1397 } 1398 1399 /* 1400 * Determine if a certain object in a slab is on the freelist. Must hold the 1401 * slab lock to guarantee that the chains are in a consistent state. 1402 */ 1403 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) 1404 { 1405 int nr = 0; 1406 void *fp; 1407 void *object = NULL; 1408 int max_objects; 1409 1410 fp = slab->freelist; 1411 while (fp && nr <= slab->objects) { 1412 if (fp == search) 1413 return 1; 1414 if (!check_valid_pointer(s, slab, fp)) { 1415 if (object) { 1416 object_err(s, slab, object, 1417 "Freechain corrupt"); 1418 set_freepointer(s, object, NULL); 1419 } else { 1420 slab_err(s, slab, "Freepointer corrupt"); 1421 slab->freelist = NULL; 1422 slab->inuse = slab->objects; 1423 slab_fix(s, "Freelist cleared"); 1424 return 0; 1425 } 1426 break; 1427 } 1428 object = fp; 1429 fp = get_freepointer(s, object); 1430 nr++; 1431 } 1432 1433 max_objects = order_objects(slab_order(slab), s->size); 1434 if (max_objects > MAX_OBJS_PER_PAGE) 1435 max_objects = MAX_OBJS_PER_PAGE; 1436 1437 if (slab->objects != max_objects) { 1438 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", 1439 slab->objects, max_objects); 1440 slab->objects = max_objects; 1441 slab_fix(s, "Number of objects adjusted"); 1442 } 1443 if (slab->inuse != slab->objects - nr) { 1444 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", 1445 slab->inuse, slab->objects - nr); 1446 slab->inuse = slab->objects - nr; 1447 slab_fix(s, "Object count adjusted"); 1448 } 1449 return search == NULL; 1450 } 1451 1452 static void trace(struct kmem_cache *s, struct slab *slab, void *object, 1453 int alloc) 1454 { 1455 if (s->flags & SLAB_TRACE) { 1456 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1457 s->name, 1458 alloc ? "alloc" : "free", 1459 object, slab->inuse, 1460 slab->freelist); 1461 1462 if (!alloc) 1463 print_section(KERN_INFO, "Object ", (void *)object, 1464 s->object_size); 1465 1466 dump_stack(); 1467 } 1468 } 1469 1470 /* 1471 * Tracking of fully allocated slabs for debugging purposes. 1472 */ 1473 static void add_full(struct kmem_cache *s, 1474 struct kmem_cache_node *n, struct slab *slab) 1475 { 1476 if (!(s->flags & SLAB_STORE_USER)) 1477 return; 1478 1479 lockdep_assert_held(&n->list_lock); 1480 list_add(&slab->slab_list, &n->full); 1481 } 1482 1483 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) 1484 { 1485 if (!(s->flags & SLAB_STORE_USER)) 1486 return; 1487 1488 lockdep_assert_held(&n->list_lock); 1489 list_del(&slab->slab_list); 1490 } 1491 1492 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1493 { 1494 return atomic_long_read(&n->nr_slabs); 1495 } 1496 1497 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1498 { 1499 struct kmem_cache_node *n = get_node(s, node); 1500 1501 /* 1502 * May be called early in order to allocate a slab for the 1503 * kmem_cache_node structure. Solve the chicken-egg 1504 * dilemma by deferring the increment of the count during 1505 * bootstrap (see early_kmem_cache_node_alloc). 1506 */ 1507 if (likely(n)) { 1508 atomic_long_inc(&n->nr_slabs); 1509 atomic_long_add(objects, &n->total_objects); 1510 } 1511 } 1512 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1513 { 1514 struct kmem_cache_node *n = get_node(s, node); 1515 1516 atomic_long_dec(&n->nr_slabs); 1517 atomic_long_sub(objects, &n->total_objects); 1518 } 1519 1520 /* Object debug checks for alloc/free paths */ 1521 static void setup_object_debug(struct kmem_cache *s, void *object) 1522 { 1523 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1524 return; 1525 1526 init_object(s, object, SLUB_RED_INACTIVE); 1527 init_tracking(s, object); 1528 } 1529 1530 static 1531 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) 1532 { 1533 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1534 return; 1535 1536 metadata_access_enable(); 1537 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); 1538 metadata_access_disable(); 1539 } 1540 1541 static inline int alloc_consistency_checks(struct kmem_cache *s, 1542 struct slab *slab, void *object) 1543 { 1544 if (!check_slab(s, slab)) 1545 return 0; 1546 1547 if (!check_valid_pointer(s, slab, object)) { 1548 object_err(s, slab, object, "Freelist Pointer check fails"); 1549 return 0; 1550 } 1551 1552 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) 1553 return 0; 1554 1555 return 1; 1556 } 1557 1558 static noinline bool alloc_debug_processing(struct kmem_cache *s, 1559 struct slab *slab, void *object, int orig_size) 1560 { 1561 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1562 if (!alloc_consistency_checks(s, slab, object)) 1563 goto bad; 1564 } 1565 1566 /* Success. Perform special debug activities for allocs */ 1567 trace(s, slab, object, 1); 1568 set_orig_size(s, object, orig_size); 1569 init_object(s, object, SLUB_RED_ACTIVE); 1570 return true; 1571 1572 bad: 1573 if (folio_test_slab(slab_folio(slab))) { 1574 /* 1575 * If this is a slab page then lets do the best we can 1576 * to avoid issues in the future. Marking all objects 1577 * as used avoids touching the remaining objects. 1578 */ 1579 slab_fix(s, "Marking all objects used"); 1580 slab->inuse = slab->objects; 1581 slab->freelist = NULL; 1582 } 1583 return false; 1584 } 1585 1586 static inline int free_consistency_checks(struct kmem_cache *s, 1587 struct slab *slab, void *object, unsigned long addr) 1588 { 1589 if (!check_valid_pointer(s, slab, object)) { 1590 slab_err(s, slab, "Invalid object pointer 0x%p", object); 1591 return 0; 1592 } 1593 1594 if (on_freelist(s, slab, object)) { 1595 object_err(s, slab, object, "Object already free"); 1596 return 0; 1597 } 1598 1599 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) 1600 return 0; 1601 1602 if (unlikely(s != slab->slab_cache)) { 1603 if (!folio_test_slab(slab_folio(slab))) { 1604 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", 1605 object); 1606 } else if (!slab->slab_cache) { 1607 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1608 object); 1609 dump_stack(); 1610 } else 1611 object_err(s, slab, object, 1612 "page slab pointer corrupt."); 1613 return 0; 1614 } 1615 return 1; 1616 } 1617 1618 /* 1619 * Parse a block of slub_debug options. Blocks are delimited by ';' 1620 * 1621 * @str: start of block 1622 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1623 * @slabs: return start of list of slabs, or NULL when there's no list 1624 * @init: assume this is initial parsing and not per-kmem-create parsing 1625 * 1626 * returns the start of next block if there's any, or NULL 1627 */ 1628 static char * 1629 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1630 { 1631 bool higher_order_disable = false; 1632 1633 /* Skip any completely empty blocks */ 1634 while (*str && *str == ';') 1635 str++; 1636 1637 if (*str == ',') { 1638 /* 1639 * No options but restriction on slabs. This means full 1640 * debugging for slabs matching a pattern. 1641 */ 1642 *flags = DEBUG_DEFAULT_FLAGS; 1643 goto check_slabs; 1644 } 1645 *flags = 0; 1646 1647 /* Determine which debug features should be switched on */ 1648 for (; *str && *str != ',' && *str != ';'; str++) { 1649 switch (tolower(*str)) { 1650 case '-': 1651 *flags = 0; 1652 break; 1653 case 'f': 1654 *flags |= SLAB_CONSISTENCY_CHECKS; 1655 break; 1656 case 'z': 1657 *flags |= SLAB_RED_ZONE; 1658 break; 1659 case 'p': 1660 *flags |= SLAB_POISON; 1661 break; 1662 case 'u': 1663 *flags |= SLAB_STORE_USER; 1664 break; 1665 case 't': 1666 *flags |= SLAB_TRACE; 1667 break; 1668 case 'a': 1669 *flags |= SLAB_FAILSLAB; 1670 break; 1671 case 'o': 1672 /* 1673 * Avoid enabling debugging on caches if its minimum 1674 * order would increase as a result. 1675 */ 1676 higher_order_disable = true; 1677 break; 1678 default: 1679 if (init) 1680 pr_err("slub_debug option '%c' unknown. skipped\n", *str); 1681 } 1682 } 1683 check_slabs: 1684 if (*str == ',') 1685 *slabs = ++str; 1686 else 1687 *slabs = NULL; 1688 1689 /* Skip over the slab list */ 1690 while (*str && *str != ';') 1691 str++; 1692 1693 /* Skip any completely empty blocks */ 1694 while (*str && *str == ';') 1695 str++; 1696 1697 if (init && higher_order_disable) 1698 disable_higher_order_debug = 1; 1699 1700 if (*str) 1701 return str; 1702 else 1703 return NULL; 1704 } 1705 1706 static int __init setup_slub_debug(char *str) 1707 { 1708 slab_flags_t flags; 1709 slab_flags_t global_flags; 1710 char *saved_str; 1711 char *slab_list; 1712 bool global_slub_debug_changed = false; 1713 bool slab_list_specified = false; 1714 1715 global_flags = DEBUG_DEFAULT_FLAGS; 1716 if (*str++ != '=' || !*str) 1717 /* 1718 * No options specified. Switch on full debugging. 1719 */ 1720 goto out; 1721 1722 saved_str = str; 1723 while (str) { 1724 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1725 1726 if (!slab_list) { 1727 global_flags = flags; 1728 global_slub_debug_changed = true; 1729 } else { 1730 slab_list_specified = true; 1731 if (flags & SLAB_STORE_USER) 1732 stack_depot_request_early_init(); 1733 } 1734 } 1735 1736 /* 1737 * For backwards compatibility, a single list of flags with list of 1738 * slabs means debugging is only changed for those slabs, so the global 1739 * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1740 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1741 * long as there is no option specifying flags without a slab list. 1742 */ 1743 if (slab_list_specified) { 1744 if (!global_slub_debug_changed) 1745 global_flags = slub_debug; 1746 slub_debug_string = saved_str; 1747 } 1748 out: 1749 slub_debug = global_flags; 1750 if (slub_debug & SLAB_STORE_USER) 1751 stack_depot_request_early_init(); 1752 if (slub_debug != 0 || slub_debug_string) 1753 static_branch_enable(&slub_debug_enabled); 1754 else 1755 static_branch_disable(&slub_debug_enabled); 1756 if ((static_branch_unlikely(&init_on_alloc) || 1757 static_branch_unlikely(&init_on_free)) && 1758 (slub_debug & SLAB_POISON)) 1759 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1760 return 1; 1761 } 1762 1763 __setup("slub_debug", setup_slub_debug); 1764 1765 /* 1766 * kmem_cache_flags - apply debugging options to the cache 1767 * @object_size: the size of an object without meta data 1768 * @flags: flags to set 1769 * @name: name of the cache 1770 * 1771 * Debug option(s) are applied to @flags. In addition to the debug 1772 * option(s), if a slab name (or multiple) is specified i.e. 1773 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1774 * then only the select slabs will receive the debug option(s). 1775 */ 1776 slab_flags_t kmem_cache_flags(unsigned int object_size, 1777 slab_flags_t flags, const char *name) 1778 { 1779 char *iter; 1780 size_t len; 1781 char *next_block; 1782 slab_flags_t block_flags; 1783 slab_flags_t slub_debug_local = slub_debug; 1784 1785 if (flags & SLAB_NO_USER_FLAGS) 1786 return flags; 1787 1788 /* 1789 * If the slab cache is for debugging (e.g. kmemleak) then 1790 * don't store user (stack trace) information by default, 1791 * but let the user enable it via the command line below. 1792 */ 1793 if (flags & SLAB_NOLEAKTRACE) 1794 slub_debug_local &= ~SLAB_STORE_USER; 1795 1796 len = strlen(name); 1797 next_block = slub_debug_string; 1798 /* Go through all blocks of debug options, see if any matches our slab's name */ 1799 while (next_block) { 1800 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1801 if (!iter) 1802 continue; 1803 /* Found a block that has a slab list, search it */ 1804 while (*iter) { 1805 char *end, *glob; 1806 size_t cmplen; 1807 1808 end = strchrnul(iter, ','); 1809 if (next_block && next_block < end) 1810 end = next_block - 1; 1811 1812 glob = strnchr(iter, end - iter, '*'); 1813 if (glob) 1814 cmplen = glob - iter; 1815 else 1816 cmplen = max_t(size_t, len, (end - iter)); 1817 1818 if (!strncmp(name, iter, cmplen)) { 1819 flags |= block_flags; 1820 return flags; 1821 } 1822 1823 if (!*end || *end == ';') 1824 break; 1825 iter = end + 1; 1826 } 1827 } 1828 1829 return flags | slub_debug_local; 1830 } 1831 #else /* !CONFIG_SLUB_DEBUG */ 1832 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} 1833 static inline 1834 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} 1835 1836 static inline bool alloc_debug_processing(struct kmem_cache *s, 1837 struct slab *slab, void *object, int orig_size) { return true; } 1838 1839 static inline bool free_debug_processing(struct kmem_cache *s, 1840 struct slab *slab, void *head, void *tail, int *bulk_cnt, 1841 unsigned long addr, depot_stack_handle_t handle) { return true; } 1842 1843 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} 1844 static inline int check_object(struct kmem_cache *s, struct slab *slab, 1845 void *object, u8 val) { return 1; } 1846 static inline depot_stack_handle_t set_track_prepare(void) { return 0; } 1847 static inline void set_track(struct kmem_cache *s, void *object, 1848 enum track_item alloc, unsigned long addr) {} 1849 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1850 struct slab *slab) {} 1851 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1852 struct slab *slab) {} 1853 slab_flags_t kmem_cache_flags(unsigned int object_size, 1854 slab_flags_t flags, const char *name) 1855 { 1856 return flags; 1857 } 1858 #define slub_debug 0 1859 1860 #define disable_higher_order_debug 0 1861 1862 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1863 { return 0; } 1864 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1865 int objects) {} 1866 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1867 int objects) {} 1868 1869 #ifndef CONFIG_SLUB_TINY 1870 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1871 void **freelist, void *nextfree) 1872 { 1873 return false; 1874 } 1875 #endif 1876 #endif /* CONFIG_SLUB_DEBUG */ 1877 1878 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) 1879 { 1880 return (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1881 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; 1882 } 1883 1884 #ifdef CONFIG_MEMCG_KMEM 1885 static inline void memcg_free_slab_cgroups(struct slab *slab) 1886 { 1887 kfree(slab_objcgs(slab)); 1888 slab->memcg_data = 0; 1889 } 1890 1891 static inline size_t obj_full_size(struct kmem_cache *s) 1892 { 1893 /* 1894 * For each accounted object there is an extra space which is used 1895 * to store obj_cgroup membership. Charge it too. 1896 */ 1897 return s->size + sizeof(struct obj_cgroup *); 1898 } 1899 1900 /* 1901 * Returns false if the allocation should fail. 1902 */ 1903 static bool __memcg_slab_pre_alloc_hook(struct kmem_cache *s, 1904 struct list_lru *lru, 1905 struct obj_cgroup **objcgp, 1906 size_t objects, gfp_t flags) 1907 { 1908 /* 1909 * The obtained objcg pointer is safe to use within the current scope, 1910 * defined by current task or set_active_memcg() pair. 1911 * obj_cgroup_get() is used to get a permanent reference. 1912 */ 1913 struct obj_cgroup *objcg = current_obj_cgroup(); 1914 if (!objcg) 1915 return true; 1916 1917 if (lru) { 1918 int ret; 1919 struct mem_cgroup *memcg; 1920 1921 memcg = get_mem_cgroup_from_objcg(objcg); 1922 ret = memcg_list_lru_alloc(memcg, lru, flags); 1923 css_put(&memcg->css); 1924 1925 if (ret) 1926 return false; 1927 } 1928 1929 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) 1930 return false; 1931 1932 *objcgp = objcg; 1933 return true; 1934 } 1935 1936 /* 1937 * Returns false if the allocation should fail. 1938 */ 1939 static __fastpath_inline 1940 bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 1941 struct obj_cgroup **objcgp, size_t objects, 1942 gfp_t flags) 1943 { 1944 if (!memcg_kmem_online()) 1945 return true; 1946 1947 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))) 1948 return true; 1949 1950 return likely(__memcg_slab_pre_alloc_hook(s, lru, objcgp, objects, 1951 flags)); 1952 } 1953 1954 static void __memcg_slab_post_alloc_hook(struct kmem_cache *s, 1955 struct obj_cgroup *objcg, 1956 gfp_t flags, size_t size, 1957 void **p) 1958 { 1959 struct slab *slab; 1960 unsigned long off; 1961 size_t i; 1962 1963 flags &= gfp_allowed_mask; 1964 1965 for (i = 0; i < size; i++) { 1966 if (likely(p[i])) { 1967 slab = virt_to_slab(p[i]); 1968 1969 if (!slab_objcgs(slab) && 1970 memcg_alloc_slab_cgroups(slab, s, flags, false)) { 1971 obj_cgroup_uncharge(objcg, obj_full_size(s)); 1972 continue; 1973 } 1974 1975 off = obj_to_index(s, slab, p[i]); 1976 obj_cgroup_get(objcg); 1977 slab_objcgs(slab)[off] = objcg; 1978 mod_objcg_state(objcg, slab_pgdat(slab), 1979 cache_vmstat_idx(s), obj_full_size(s)); 1980 } else { 1981 obj_cgroup_uncharge(objcg, obj_full_size(s)); 1982 } 1983 } 1984 } 1985 1986 static __fastpath_inline 1987 void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, 1988 gfp_t flags, size_t size, void **p) 1989 { 1990 if (likely(!memcg_kmem_online() || !objcg)) 1991 return; 1992 1993 return __memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 1994 } 1995 1996 static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 1997 void **p, int objects, 1998 struct obj_cgroup **objcgs) 1999 { 2000 for (int i = 0; i < objects; i++) { 2001 struct obj_cgroup *objcg; 2002 unsigned int off; 2003 2004 off = obj_to_index(s, slab, p[i]); 2005 objcg = objcgs[off]; 2006 if (!objcg) 2007 continue; 2008 2009 objcgs[off] = NULL; 2010 obj_cgroup_uncharge(objcg, obj_full_size(s)); 2011 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), 2012 -obj_full_size(s)); 2013 obj_cgroup_put(objcg); 2014 } 2015 } 2016 2017 static __fastpath_inline 2018 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2019 int objects) 2020 { 2021 struct obj_cgroup **objcgs; 2022 2023 if (!memcg_kmem_online()) 2024 return; 2025 2026 objcgs = slab_objcgs(slab); 2027 if (likely(!objcgs)) 2028 return; 2029 2030 __memcg_slab_free_hook(s, slab, p, objects, objcgs); 2031 } 2032 2033 static inline 2034 void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects, 2035 struct obj_cgroup *objcg) 2036 { 2037 if (objcg) 2038 obj_cgroup_uncharge(objcg, objects * obj_full_size(s)); 2039 } 2040 #else /* CONFIG_MEMCG_KMEM */ 2041 static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) 2042 { 2043 return NULL; 2044 } 2045 2046 static inline void memcg_free_slab_cgroups(struct slab *slab) 2047 { 2048 } 2049 2050 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 2051 struct list_lru *lru, 2052 struct obj_cgroup **objcgp, 2053 size_t objects, gfp_t flags) 2054 { 2055 return true; 2056 } 2057 2058 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 2059 struct obj_cgroup *objcg, 2060 gfp_t flags, size_t size, 2061 void **p) 2062 { 2063 } 2064 2065 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 2066 void **p, int objects) 2067 { 2068 } 2069 2070 static inline 2071 void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects, 2072 struct obj_cgroup *objcg) 2073 { 2074 } 2075 #endif /* CONFIG_MEMCG_KMEM */ 2076 2077 /* 2078 * Hooks for other subsystems that check memory allocations. In a typical 2079 * production configuration these hooks all should produce no code at all. 2080 * 2081 * Returns true if freeing of the object can proceed, false if its reuse 2082 * was delayed by KASAN quarantine, or it was returned to KFENCE. 2083 */ 2084 static __always_inline 2085 bool slab_free_hook(struct kmem_cache *s, void *x, bool init) 2086 { 2087 kmemleak_free_recursive(x, s->flags); 2088 kmsan_slab_free(s, x); 2089 2090 debug_check_no_locks_freed(x, s->object_size); 2091 2092 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 2093 debug_check_no_obj_freed(x, s->object_size); 2094 2095 /* Use KCSAN to help debug racy use-after-free. */ 2096 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 2097 __kcsan_check_access(x, s->object_size, 2098 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 2099 2100 if (kfence_free(x)) 2101 return false; 2102 2103 /* 2104 * As memory initialization might be integrated into KASAN, 2105 * kasan_slab_free and initialization memset's must be 2106 * kept together to avoid discrepancies in behavior. 2107 * 2108 * The initialization memset's clear the object and the metadata, 2109 * but don't touch the SLAB redzone. 2110 */ 2111 if (unlikely(init)) { 2112 int rsize; 2113 2114 if (!kasan_has_integrated_init()) 2115 memset(kasan_reset_tag(x), 0, s->object_size); 2116 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 2117 memset((char *)kasan_reset_tag(x) + s->inuse, 0, 2118 s->size - s->inuse - rsize); 2119 } 2120 /* KASAN might put x into memory quarantine, delaying its reuse. */ 2121 return !kasan_slab_free(s, x, init); 2122 } 2123 2124 static inline bool slab_free_freelist_hook(struct kmem_cache *s, 2125 void **head, void **tail, 2126 int *cnt) 2127 { 2128 2129 void *object; 2130 void *next = *head; 2131 void *old_tail = *tail; 2132 bool init; 2133 2134 if (is_kfence_address(next)) { 2135 slab_free_hook(s, next, false); 2136 return false; 2137 } 2138 2139 /* Head and tail of the reconstructed freelist */ 2140 *head = NULL; 2141 *tail = NULL; 2142 2143 init = slab_want_init_on_free(s); 2144 2145 do { 2146 object = next; 2147 next = get_freepointer(s, object); 2148 2149 /* If object's reuse doesn't have to be delayed */ 2150 if (likely(slab_free_hook(s, object, init))) { 2151 /* Move object to the new freelist */ 2152 set_freepointer(s, object, *head); 2153 *head = object; 2154 if (!*tail) 2155 *tail = object; 2156 } else { 2157 /* 2158 * Adjust the reconstructed freelist depth 2159 * accordingly if object's reuse is delayed. 2160 */ 2161 --(*cnt); 2162 } 2163 } while (object != old_tail); 2164 2165 return *head != NULL; 2166 } 2167 2168 static void *setup_object(struct kmem_cache *s, void *object) 2169 { 2170 setup_object_debug(s, object); 2171 object = kasan_init_slab_obj(s, object); 2172 if (unlikely(s->ctor)) { 2173 kasan_unpoison_new_object(s, object); 2174 s->ctor(object); 2175 kasan_poison_new_object(s, object); 2176 } 2177 return object; 2178 } 2179 2180 /* 2181 * Slab allocation and freeing 2182 */ 2183 static inline struct slab *alloc_slab_page(gfp_t flags, int node, 2184 struct kmem_cache_order_objects oo) 2185 { 2186 struct folio *folio; 2187 struct slab *slab; 2188 unsigned int order = oo_order(oo); 2189 2190 folio = (struct folio *)alloc_pages_node(node, flags, order); 2191 if (!folio) 2192 return NULL; 2193 2194 slab = folio_slab(folio); 2195 __folio_set_slab(folio); 2196 /* Make the flag visible before any changes to folio->mapping */ 2197 smp_wmb(); 2198 if (folio_is_pfmemalloc(folio)) 2199 slab_set_pfmemalloc(slab); 2200 2201 return slab; 2202 } 2203 2204 #ifdef CONFIG_SLAB_FREELIST_RANDOM 2205 /* Pre-initialize the random sequence cache */ 2206 static int init_cache_random_seq(struct kmem_cache *s) 2207 { 2208 unsigned int count = oo_objects(s->oo); 2209 int err; 2210 2211 /* Bailout if already initialised */ 2212 if (s->random_seq) 2213 return 0; 2214 2215 err = cache_random_seq_create(s, count, GFP_KERNEL); 2216 if (err) { 2217 pr_err("SLUB: Unable to initialize free list for %s\n", 2218 s->name); 2219 return err; 2220 } 2221 2222 /* Transform to an offset on the set of pages */ 2223 if (s->random_seq) { 2224 unsigned int i; 2225 2226 for (i = 0; i < count; i++) 2227 s->random_seq[i] *= s->size; 2228 } 2229 return 0; 2230 } 2231 2232 /* Initialize each random sequence freelist per cache */ 2233 static void __init init_freelist_randomization(void) 2234 { 2235 struct kmem_cache *s; 2236 2237 mutex_lock(&slab_mutex); 2238 2239 list_for_each_entry(s, &slab_caches, list) 2240 init_cache_random_seq(s); 2241 2242 mutex_unlock(&slab_mutex); 2243 } 2244 2245 /* Get the next entry on the pre-computed freelist randomized */ 2246 static void *next_freelist_entry(struct kmem_cache *s, struct slab *slab, 2247 unsigned long *pos, void *start, 2248 unsigned long page_limit, 2249 unsigned long freelist_count) 2250 { 2251 unsigned int idx; 2252 2253 /* 2254 * If the target page allocation failed, the number of objects on the 2255 * page might be smaller than the usual size defined by the cache. 2256 */ 2257 do { 2258 idx = s->random_seq[*pos]; 2259 *pos += 1; 2260 if (*pos >= freelist_count) 2261 *pos = 0; 2262 } while (unlikely(idx >= page_limit)); 2263 2264 return (char *)start + idx; 2265 } 2266 2267 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 2268 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2269 { 2270 void *start; 2271 void *cur; 2272 void *next; 2273 unsigned long idx, pos, page_limit, freelist_count; 2274 2275 if (slab->objects < 2 || !s->random_seq) 2276 return false; 2277 2278 freelist_count = oo_objects(s->oo); 2279 pos = get_random_u32_below(freelist_count); 2280 2281 page_limit = slab->objects * s->size; 2282 start = fixup_red_left(s, slab_address(slab)); 2283 2284 /* First entry is used as the base of the freelist */ 2285 cur = next_freelist_entry(s, slab, &pos, start, page_limit, 2286 freelist_count); 2287 cur = setup_object(s, cur); 2288 slab->freelist = cur; 2289 2290 for (idx = 1; idx < slab->objects; idx++) { 2291 next = next_freelist_entry(s, slab, &pos, start, page_limit, 2292 freelist_count); 2293 next = setup_object(s, next); 2294 set_freepointer(s, cur, next); 2295 cur = next; 2296 } 2297 set_freepointer(s, cur, NULL); 2298 2299 return true; 2300 } 2301 #else 2302 static inline int init_cache_random_seq(struct kmem_cache *s) 2303 { 2304 return 0; 2305 } 2306 static inline void init_freelist_randomization(void) { } 2307 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2308 { 2309 return false; 2310 } 2311 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 2312 2313 static __always_inline void account_slab(struct slab *slab, int order, 2314 struct kmem_cache *s, gfp_t gfp) 2315 { 2316 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 2317 memcg_alloc_slab_cgroups(slab, s, gfp, true); 2318 2319 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2320 PAGE_SIZE << order); 2321 } 2322 2323 static __always_inline void unaccount_slab(struct slab *slab, int order, 2324 struct kmem_cache *s) 2325 { 2326 if (memcg_kmem_online()) 2327 memcg_free_slab_cgroups(slab); 2328 2329 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2330 -(PAGE_SIZE << order)); 2331 } 2332 2333 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 2334 { 2335 struct slab *slab; 2336 struct kmem_cache_order_objects oo = s->oo; 2337 gfp_t alloc_gfp; 2338 void *start, *p, *next; 2339 int idx; 2340 bool shuffle; 2341 2342 flags &= gfp_allowed_mask; 2343 2344 flags |= s->allocflags; 2345 2346 /* 2347 * Let the initial higher-order allocation fail under memory pressure 2348 * so we fall-back to the minimum order allocation. 2349 */ 2350 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 2351 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 2352 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; 2353 2354 slab = alloc_slab_page(alloc_gfp, node, oo); 2355 if (unlikely(!slab)) { 2356 oo = s->min; 2357 alloc_gfp = flags; 2358 /* 2359 * Allocation may have failed due to fragmentation. 2360 * Try a lower order alloc if possible 2361 */ 2362 slab = alloc_slab_page(alloc_gfp, node, oo); 2363 if (unlikely(!slab)) 2364 return NULL; 2365 stat(s, ORDER_FALLBACK); 2366 } 2367 2368 slab->objects = oo_objects(oo); 2369 slab->inuse = 0; 2370 slab->frozen = 0; 2371 2372 account_slab(slab, oo_order(oo), s, flags); 2373 2374 slab->slab_cache = s; 2375 2376 kasan_poison_slab(slab); 2377 2378 start = slab_address(slab); 2379 2380 setup_slab_debug(s, slab, start); 2381 2382 shuffle = shuffle_freelist(s, slab); 2383 2384 if (!shuffle) { 2385 start = fixup_red_left(s, start); 2386 start = setup_object(s, start); 2387 slab->freelist = start; 2388 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { 2389 next = p + s->size; 2390 next = setup_object(s, next); 2391 set_freepointer(s, p, next); 2392 p = next; 2393 } 2394 set_freepointer(s, p, NULL); 2395 } 2396 2397 return slab; 2398 } 2399 2400 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) 2401 { 2402 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 2403 flags = kmalloc_fix_flags(flags); 2404 2405 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2406 2407 return allocate_slab(s, 2408 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 2409 } 2410 2411 static void __free_slab(struct kmem_cache *s, struct slab *slab) 2412 { 2413 struct folio *folio = slab_folio(slab); 2414 int order = folio_order(folio); 2415 int pages = 1 << order; 2416 2417 __slab_clear_pfmemalloc(slab); 2418 folio->mapping = NULL; 2419 /* Make the mapping reset visible before clearing the flag */ 2420 smp_wmb(); 2421 __folio_clear_slab(folio); 2422 mm_account_reclaimed_pages(pages); 2423 unaccount_slab(slab, order, s); 2424 __free_pages(&folio->page, order); 2425 } 2426 2427 static void rcu_free_slab(struct rcu_head *h) 2428 { 2429 struct slab *slab = container_of(h, struct slab, rcu_head); 2430 2431 __free_slab(slab->slab_cache, slab); 2432 } 2433 2434 static void free_slab(struct kmem_cache *s, struct slab *slab) 2435 { 2436 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 2437 void *p; 2438 2439 slab_pad_check(s, slab); 2440 for_each_object(p, s, slab_address(slab), slab->objects) 2441 check_object(s, slab, p, SLUB_RED_INACTIVE); 2442 } 2443 2444 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) 2445 call_rcu(&slab->rcu_head, rcu_free_slab); 2446 else 2447 __free_slab(s, slab); 2448 } 2449 2450 static void discard_slab(struct kmem_cache *s, struct slab *slab) 2451 { 2452 dec_slabs_node(s, slab_nid(slab), slab->objects); 2453 free_slab(s, slab); 2454 } 2455 2456 /* 2457 * SLUB reuses PG_workingset bit to keep track of whether it's on 2458 * the per-node partial list. 2459 */ 2460 static inline bool slab_test_node_partial(const struct slab *slab) 2461 { 2462 return folio_test_workingset((struct folio *)slab_folio(slab)); 2463 } 2464 2465 static inline void slab_set_node_partial(struct slab *slab) 2466 { 2467 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2468 } 2469 2470 static inline void slab_clear_node_partial(struct slab *slab) 2471 { 2472 clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2473 } 2474 2475 /* 2476 * Management of partially allocated slabs. 2477 */ 2478 static inline void 2479 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 2480 { 2481 n->nr_partial++; 2482 if (tail == DEACTIVATE_TO_TAIL) 2483 list_add_tail(&slab->slab_list, &n->partial); 2484 else 2485 list_add(&slab->slab_list, &n->partial); 2486 slab_set_node_partial(slab); 2487 } 2488 2489 static inline void add_partial(struct kmem_cache_node *n, 2490 struct slab *slab, int tail) 2491 { 2492 lockdep_assert_held(&n->list_lock); 2493 __add_partial(n, slab, tail); 2494 } 2495 2496 static inline void remove_partial(struct kmem_cache_node *n, 2497 struct slab *slab) 2498 { 2499 lockdep_assert_held(&n->list_lock); 2500 list_del(&slab->slab_list); 2501 slab_clear_node_partial(slab); 2502 n->nr_partial--; 2503 } 2504 2505 /* 2506 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a 2507 * slab from the n->partial list. Remove only a single object from the slab, do 2508 * the alloc_debug_processing() checks and leave the slab on the list, or move 2509 * it to full list if it was the last free object. 2510 */ 2511 static void *alloc_single_from_partial(struct kmem_cache *s, 2512 struct kmem_cache_node *n, struct slab *slab, int orig_size) 2513 { 2514 void *object; 2515 2516 lockdep_assert_held(&n->list_lock); 2517 2518 object = slab->freelist; 2519 slab->freelist = get_freepointer(s, object); 2520 slab->inuse++; 2521 2522 if (!alloc_debug_processing(s, slab, object, orig_size)) { 2523 remove_partial(n, slab); 2524 return NULL; 2525 } 2526 2527 if (slab->inuse == slab->objects) { 2528 remove_partial(n, slab); 2529 add_full(s, n, slab); 2530 } 2531 2532 return object; 2533 } 2534 2535 /* 2536 * Called only for kmem_cache_debug() caches to allocate from a freshly 2537 * allocated slab. Allocate a single object instead of whole freelist 2538 * and put the slab to the partial (or full) list. 2539 */ 2540 static void *alloc_single_from_new_slab(struct kmem_cache *s, 2541 struct slab *slab, int orig_size) 2542 { 2543 int nid = slab_nid(slab); 2544 struct kmem_cache_node *n = get_node(s, nid); 2545 unsigned long flags; 2546 void *object; 2547 2548 2549 object = slab->freelist; 2550 slab->freelist = get_freepointer(s, object); 2551 slab->inuse = 1; 2552 2553 if (!alloc_debug_processing(s, slab, object, orig_size)) 2554 /* 2555 * It's not really expected that this would fail on a 2556 * freshly allocated slab, but a concurrent memory 2557 * corruption in theory could cause that. 2558 */ 2559 return NULL; 2560 2561 spin_lock_irqsave(&n->list_lock, flags); 2562 2563 if (slab->inuse == slab->objects) 2564 add_full(s, n, slab); 2565 else 2566 add_partial(n, slab, DEACTIVATE_TO_HEAD); 2567 2568 inc_slabs_node(s, nid, slab->objects); 2569 spin_unlock_irqrestore(&n->list_lock, flags); 2570 2571 return object; 2572 } 2573 2574 #ifdef CONFIG_SLUB_CPU_PARTIAL 2575 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); 2576 #else 2577 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, 2578 int drain) { } 2579 #endif 2580 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); 2581 2582 /* 2583 * Try to allocate a partial slab from a specific node. 2584 */ 2585 static struct slab *get_partial_node(struct kmem_cache *s, 2586 struct kmem_cache_node *n, 2587 struct partial_context *pc) 2588 { 2589 struct slab *slab, *slab2, *partial = NULL; 2590 unsigned long flags; 2591 unsigned int partial_slabs = 0; 2592 2593 /* 2594 * Racy check. If we mistakenly see no partial slabs then we 2595 * just allocate an empty slab. If we mistakenly try to get a 2596 * partial slab and there is none available then get_partial() 2597 * will return NULL. 2598 */ 2599 if (!n || !n->nr_partial) 2600 return NULL; 2601 2602 spin_lock_irqsave(&n->list_lock, flags); 2603 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { 2604 if (!pfmemalloc_match(slab, pc->flags)) 2605 continue; 2606 2607 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 2608 void *object = alloc_single_from_partial(s, n, slab, 2609 pc->orig_size); 2610 if (object) { 2611 partial = slab; 2612 pc->object = object; 2613 break; 2614 } 2615 continue; 2616 } 2617 2618 remove_partial(n, slab); 2619 2620 if (!partial) { 2621 partial = slab; 2622 stat(s, ALLOC_FROM_PARTIAL); 2623 } else { 2624 put_cpu_partial(s, slab, 0); 2625 stat(s, CPU_PARTIAL_NODE); 2626 partial_slabs++; 2627 } 2628 #ifdef CONFIG_SLUB_CPU_PARTIAL 2629 if (!kmem_cache_has_cpu_partial(s) 2630 || partial_slabs > s->cpu_partial_slabs / 2) 2631 break; 2632 #else 2633 break; 2634 #endif 2635 2636 } 2637 spin_unlock_irqrestore(&n->list_lock, flags); 2638 return partial; 2639 } 2640 2641 /* 2642 * Get a slab from somewhere. Search in increasing NUMA distances. 2643 */ 2644 static struct slab *get_any_partial(struct kmem_cache *s, 2645 struct partial_context *pc) 2646 { 2647 #ifdef CONFIG_NUMA 2648 struct zonelist *zonelist; 2649 struct zoneref *z; 2650 struct zone *zone; 2651 enum zone_type highest_zoneidx = gfp_zone(pc->flags); 2652 struct slab *slab; 2653 unsigned int cpuset_mems_cookie; 2654 2655 /* 2656 * The defrag ratio allows a configuration of the tradeoffs between 2657 * inter node defragmentation and node local allocations. A lower 2658 * defrag_ratio increases the tendency to do local allocations 2659 * instead of attempting to obtain partial slabs from other nodes. 2660 * 2661 * If the defrag_ratio is set to 0 then kmalloc() always 2662 * returns node local objects. If the ratio is higher then kmalloc() 2663 * may return off node objects because partial slabs are obtained 2664 * from other nodes and filled up. 2665 * 2666 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2667 * (which makes defrag_ratio = 1000) then every (well almost) 2668 * allocation will first attempt to defrag slab caches on other nodes. 2669 * This means scanning over all nodes to look for partial slabs which 2670 * may be expensive if we do it every time we are trying to find a slab 2671 * with available objects. 2672 */ 2673 if (!s->remote_node_defrag_ratio || 2674 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2675 return NULL; 2676 2677 do { 2678 cpuset_mems_cookie = read_mems_allowed_begin(); 2679 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); 2680 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2681 struct kmem_cache_node *n; 2682 2683 n = get_node(s, zone_to_nid(zone)); 2684 2685 if (n && cpuset_zone_allowed(zone, pc->flags) && 2686 n->nr_partial > s->min_partial) { 2687 slab = get_partial_node(s, n, pc); 2688 if (slab) { 2689 /* 2690 * Don't check read_mems_allowed_retry() 2691 * here - if mems_allowed was updated in 2692 * parallel, that was a harmless race 2693 * between allocation and the cpuset 2694 * update 2695 */ 2696 return slab; 2697 } 2698 } 2699 } 2700 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2701 #endif /* CONFIG_NUMA */ 2702 return NULL; 2703 } 2704 2705 /* 2706 * Get a partial slab, lock it and return it. 2707 */ 2708 static struct slab *get_partial(struct kmem_cache *s, int node, 2709 struct partial_context *pc) 2710 { 2711 struct slab *slab; 2712 int searchnode = node; 2713 2714 if (node == NUMA_NO_NODE) 2715 searchnode = numa_mem_id(); 2716 2717 slab = get_partial_node(s, get_node(s, searchnode), pc); 2718 if (slab || node != NUMA_NO_NODE) 2719 return slab; 2720 2721 return get_any_partial(s, pc); 2722 } 2723 2724 #ifndef CONFIG_SLUB_TINY 2725 2726 #ifdef CONFIG_PREEMPTION 2727 /* 2728 * Calculate the next globally unique transaction for disambiguation 2729 * during cmpxchg. The transactions start with the cpu number and are then 2730 * incremented by CONFIG_NR_CPUS. 2731 */ 2732 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2733 #else 2734 /* 2735 * No preemption supported therefore also no need to check for 2736 * different cpus. 2737 */ 2738 #define TID_STEP 1 2739 #endif /* CONFIG_PREEMPTION */ 2740 2741 static inline unsigned long next_tid(unsigned long tid) 2742 { 2743 return tid + TID_STEP; 2744 } 2745 2746 #ifdef SLUB_DEBUG_CMPXCHG 2747 static inline unsigned int tid_to_cpu(unsigned long tid) 2748 { 2749 return tid % TID_STEP; 2750 } 2751 2752 static inline unsigned long tid_to_event(unsigned long tid) 2753 { 2754 return tid / TID_STEP; 2755 } 2756 #endif 2757 2758 static inline unsigned int init_tid(int cpu) 2759 { 2760 return cpu; 2761 } 2762 2763 static inline void note_cmpxchg_failure(const char *n, 2764 const struct kmem_cache *s, unsigned long tid) 2765 { 2766 #ifdef SLUB_DEBUG_CMPXCHG 2767 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2768 2769 pr_info("%s %s: cmpxchg redo ", n, s->name); 2770 2771 #ifdef CONFIG_PREEMPTION 2772 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2773 pr_warn("due to cpu change %d -> %d\n", 2774 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2775 else 2776 #endif 2777 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2778 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2779 tid_to_event(tid), tid_to_event(actual_tid)); 2780 else 2781 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2782 actual_tid, tid, next_tid(tid)); 2783 #endif 2784 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2785 } 2786 2787 static void init_kmem_cache_cpus(struct kmem_cache *s) 2788 { 2789 int cpu; 2790 struct kmem_cache_cpu *c; 2791 2792 for_each_possible_cpu(cpu) { 2793 c = per_cpu_ptr(s->cpu_slab, cpu); 2794 local_lock_init(&c->lock); 2795 c->tid = init_tid(cpu); 2796 } 2797 } 2798 2799 /* 2800 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, 2801 * unfreezes the slabs and puts it on the proper list. 2802 * Assumes the slab has been already safely taken away from kmem_cache_cpu 2803 * by the caller. 2804 */ 2805 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, 2806 void *freelist) 2807 { 2808 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 2809 int free_delta = 0; 2810 void *nextfree, *freelist_iter, *freelist_tail; 2811 int tail = DEACTIVATE_TO_HEAD; 2812 unsigned long flags = 0; 2813 struct slab new; 2814 struct slab old; 2815 2816 if (slab->freelist) { 2817 stat(s, DEACTIVATE_REMOTE_FREES); 2818 tail = DEACTIVATE_TO_TAIL; 2819 } 2820 2821 /* 2822 * Stage one: Count the objects on cpu's freelist as free_delta and 2823 * remember the last object in freelist_tail for later splicing. 2824 */ 2825 freelist_tail = NULL; 2826 freelist_iter = freelist; 2827 while (freelist_iter) { 2828 nextfree = get_freepointer(s, freelist_iter); 2829 2830 /* 2831 * If 'nextfree' is invalid, it is possible that the object at 2832 * 'freelist_iter' is already corrupted. So isolate all objects 2833 * starting at 'freelist_iter' by skipping them. 2834 */ 2835 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) 2836 break; 2837 2838 freelist_tail = freelist_iter; 2839 free_delta++; 2840 2841 freelist_iter = nextfree; 2842 } 2843 2844 /* 2845 * Stage two: Unfreeze the slab while splicing the per-cpu 2846 * freelist to the head of slab's freelist. 2847 */ 2848 do { 2849 old.freelist = READ_ONCE(slab->freelist); 2850 old.counters = READ_ONCE(slab->counters); 2851 VM_BUG_ON(!old.frozen); 2852 2853 /* Determine target state of the slab */ 2854 new.counters = old.counters; 2855 new.frozen = 0; 2856 if (freelist_tail) { 2857 new.inuse -= free_delta; 2858 set_freepointer(s, freelist_tail, old.freelist); 2859 new.freelist = freelist; 2860 } else { 2861 new.freelist = old.freelist; 2862 } 2863 } while (!slab_update_freelist(s, slab, 2864 old.freelist, old.counters, 2865 new.freelist, new.counters, 2866 "unfreezing slab")); 2867 2868 /* 2869 * Stage three: Manipulate the slab list based on the updated state. 2870 */ 2871 if (!new.inuse && n->nr_partial >= s->min_partial) { 2872 stat(s, DEACTIVATE_EMPTY); 2873 discard_slab(s, slab); 2874 stat(s, FREE_SLAB); 2875 } else if (new.freelist) { 2876 spin_lock_irqsave(&n->list_lock, flags); 2877 add_partial(n, slab, tail); 2878 spin_unlock_irqrestore(&n->list_lock, flags); 2879 stat(s, tail); 2880 } else { 2881 stat(s, DEACTIVATE_FULL); 2882 } 2883 } 2884 2885 #ifdef CONFIG_SLUB_CPU_PARTIAL 2886 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab) 2887 { 2888 struct kmem_cache_node *n = NULL, *n2 = NULL; 2889 struct slab *slab, *slab_to_discard = NULL; 2890 unsigned long flags = 0; 2891 2892 while (partial_slab) { 2893 slab = partial_slab; 2894 partial_slab = slab->next; 2895 2896 n2 = get_node(s, slab_nid(slab)); 2897 if (n != n2) { 2898 if (n) 2899 spin_unlock_irqrestore(&n->list_lock, flags); 2900 2901 n = n2; 2902 spin_lock_irqsave(&n->list_lock, flags); 2903 } 2904 2905 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { 2906 slab->next = slab_to_discard; 2907 slab_to_discard = slab; 2908 } else { 2909 add_partial(n, slab, DEACTIVATE_TO_TAIL); 2910 stat(s, FREE_ADD_PARTIAL); 2911 } 2912 } 2913 2914 if (n) 2915 spin_unlock_irqrestore(&n->list_lock, flags); 2916 2917 while (slab_to_discard) { 2918 slab = slab_to_discard; 2919 slab_to_discard = slab_to_discard->next; 2920 2921 stat(s, DEACTIVATE_EMPTY); 2922 discard_slab(s, slab); 2923 stat(s, FREE_SLAB); 2924 } 2925 } 2926 2927 /* 2928 * Put all the cpu partial slabs to the node partial list. 2929 */ 2930 static void put_partials(struct kmem_cache *s) 2931 { 2932 struct slab *partial_slab; 2933 unsigned long flags; 2934 2935 local_lock_irqsave(&s->cpu_slab->lock, flags); 2936 partial_slab = this_cpu_read(s->cpu_slab->partial); 2937 this_cpu_write(s->cpu_slab->partial, NULL); 2938 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2939 2940 if (partial_slab) 2941 __put_partials(s, partial_slab); 2942 } 2943 2944 static void put_partials_cpu(struct kmem_cache *s, 2945 struct kmem_cache_cpu *c) 2946 { 2947 struct slab *partial_slab; 2948 2949 partial_slab = slub_percpu_partial(c); 2950 c->partial = NULL; 2951 2952 if (partial_slab) 2953 __put_partials(s, partial_slab); 2954 } 2955 2956 /* 2957 * Put a slab into a partial slab slot if available. 2958 * 2959 * If we did not find a slot then simply move all the partials to the 2960 * per node partial list. 2961 */ 2962 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) 2963 { 2964 struct slab *oldslab; 2965 struct slab *slab_to_put = NULL; 2966 unsigned long flags; 2967 int slabs = 0; 2968 2969 local_lock_irqsave(&s->cpu_slab->lock, flags); 2970 2971 oldslab = this_cpu_read(s->cpu_slab->partial); 2972 2973 if (oldslab) { 2974 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { 2975 /* 2976 * Partial array is full. Move the existing set to the 2977 * per node partial list. Postpone the actual unfreezing 2978 * outside of the critical section. 2979 */ 2980 slab_to_put = oldslab; 2981 oldslab = NULL; 2982 } else { 2983 slabs = oldslab->slabs; 2984 } 2985 } 2986 2987 slabs++; 2988 2989 slab->slabs = slabs; 2990 slab->next = oldslab; 2991 2992 this_cpu_write(s->cpu_slab->partial, slab); 2993 2994 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2995 2996 if (slab_to_put) { 2997 __put_partials(s, slab_to_put); 2998 stat(s, CPU_PARTIAL_DRAIN); 2999 } 3000 } 3001 3002 #else /* CONFIG_SLUB_CPU_PARTIAL */ 3003 3004 static inline void put_partials(struct kmem_cache *s) { } 3005 static inline void put_partials_cpu(struct kmem_cache *s, 3006 struct kmem_cache_cpu *c) { } 3007 3008 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 3009 3010 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 3011 { 3012 unsigned long flags; 3013 struct slab *slab; 3014 void *freelist; 3015 3016 local_lock_irqsave(&s->cpu_slab->lock, flags); 3017 3018 slab = c->slab; 3019 freelist = c->freelist; 3020 3021 c->slab = NULL; 3022 c->freelist = NULL; 3023 c->tid = next_tid(c->tid); 3024 3025 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3026 3027 if (slab) { 3028 deactivate_slab(s, slab, freelist); 3029 stat(s, CPUSLAB_FLUSH); 3030 } 3031 } 3032 3033 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 3034 { 3035 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3036 void *freelist = c->freelist; 3037 struct slab *slab = c->slab; 3038 3039 c->slab = NULL; 3040 c->freelist = NULL; 3041 c->tid = next_tid(c->tid); 3042 3043 if (slab) { 3044 deactivate_slab(s, slab, freelist); 3045 stat(s, CPUSLAB_FLUSH); 3046 } 3047 3048 put_partials_cpu(s, c); 3049 } 3050 3051 struct slub_flush_work { 3052 struct work_struct work; 3053 struct kmem_cache *s; 3054 bool skip; 3055 }; 3056 3057 /* 3058 * Flush cpu slab. 3059 * 3060 * Called from CPU work handler with migration disabled. 3061 */ 3062 static void flush_cpu_slab(struct work_struct *w) 3063 { 3064 struct kmem_cache *s; 3065 struct kmem_cache_cpu *c; 3066 struct slub_flush_work *sfw; 3067 3068 sfw = container_of(w, struct slub_flush_work, work); 3069 3070 s = sfw->s; 3071 c = this_cpu_ptr(s->cpu_slab); 3072 3073 if (c->slab) 3074 flush_slab(s, c); 3075 3076 put_partials(s); 3077 } 3078 3079 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 3080 { 3081 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3082 3083 return c->slab || slub_percpu_partial(c); 3084 } 3085 3086 static DEFINE_MUTEX(flush_lock); 3087 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 3088 3089 static void flush_all_cpus_locked(struct kmem_cache *s) 3090 { 3091 struct slub_flush_work *sfw; 3092 unsigned int cpu; 3093 3094 lockdep_assert_cpus_held(); 3095 mutex_lock(&flush_lock); 3096 3097 for_each_online_cpu(cpu) { 3098 sfw = &per_cpu(slub_flush, cpu); 3099 if (!has_cpu_slab(cpu, s)) { 3100 sfw->skip = true; 3101 continue; 3102 } 3103 INIT_WORK(&sfw->work, flush_cpu_slab); 3104 sfw->skip = false; 3105 sfw->s = s; 3106 queue_work_on(cpu, flushwq, &sfw->work); 3107 } 3108 3109 for_each_online_cpu(cpu) { 3110 sfw = &per_cpu(slub_flush, cpu); 3111 if (sfw->skip) 3112 continue; 3113 flush_work(&sfw->work); 3114 } 3115 3116 mutex_unlock(&flush_lock); 3117 } 3118 3119 static void flush_all(struct kmem_cache *s) 3120 { 3121 cpus_read_lock(); 3122 flush_all_cpus_locked(s); 3123 cpus_read_unlock(); 3124 } 3125 3126 /* 3127 * Use the cpu notifier to insure that the cpu slabs are flushed when 3128 * necessary. 3129 */ 3130 static int slub_cpu_dead(unsigned int cpu) 3131 { 3132 struct kmem_cache *s; 3133 3134 mutex_lock(&slab_mutex); 3135 list_for_each_entry(s, &slab_caches, list) 3136 __flush_cpu_slab(s, cpu); 3137 mutex_unlock(&slab_mutex); 3138 return 0; 3139 } 3140 3141 #else /* CONFIG_SLUB_TINY */ 3142 static inline void flush_all_cpus_locked(struct kmem_cache *s) { } 3143 static inline void flush_all(struct kmem_cache *s) { } 3144 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { } 3145 static inline int slub_cpu_dead(unsigned int cpu) { return 0; } 3146 #endif /* CONFIG_SLUB_TINY */ 3147 3148 /* 3149 * Check if the objects in a per cpu structure fit numa 3150 * locality expectations. 3151 */ 3152 static inline int node_match(struct slab *slab, int node) 3153 { 3154 #ifdef CONFIG_NUMA 3155 if (node != NUMA_NO_NODE && slab_nid(slab) != node) 3156 return 0; 3157 #endif 3158 return 1; 3159 } 3160 3161 #ifdef CONFIG_SLUB_DEBUG 3162 static int count_free(struct slab *slab) 3163 { 3164 return slab->objects - slab->inuse; 3165 } 3166 3167 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 3168 { 3169 return atomic_long_read(&n->total_objects); 3170 } 3171 3172 /* Supports checking bulk free of a constructed freelist */ 3173 static inline bool free_debug_processing(struct kmem_cache *s, 3174 struct slab *slab, void *head, void *tail, int *bulk_cnt, 3175 unsigned long addr, depot_stack_handle_t handle) 3176 { 3177 bool checks_ok = false; 3178 void *object = head; 3179 int cnt = 0; 3180 3181 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3182 if (!check_slab(s, slab)) 3183 goto out; 3184 } 3185 3186 if (slab->inuse < *bulk_cnt) { 3187 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", 3188 slab->inuse, *bulk_cnt); 3189 goto out; 3190 } 3191 3192 next_object: 3193 3194 if (++cnt > *bulk_cnt) 3195 goto out_cnt; 3196 3197 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3198 if (!free_consistency_checks(s, slab, object, addr)) 3199 goto out; 3200 } 3201 3202 if (s->flags & SLAB_STORE_USER) 3203 set_track_update(s, object, TRACK_FREE, addr, handle); 3204 trace(s, slab, object, 0); 3205 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 3206 init_object(s, object, SLUB_RED_INACTIVE); 3207 3208 /* Reached end of constructed freelist yet? */ 3209 if (object != tail) { 3210 object = get_freepointer(s, object); 3211 goto next_object; 3212 } 3213 checks_ok = true; 3214 3215 out_cnt: 3216 if (cnt != *bulk_cnt) { 3217 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", 3218 *bulk_cnt, cnt); 3219 *bulk_cnt = cnt; 3220 } 3221 3222 out: 3223 3224 if (!checks_ok) 3225 slab_fix(s, "Object at 0x%p not freed", object); 3226 3227 return checks_ok; 3228 } 3229 #endif /* CONFIG_SLUB_DEBUG */ 3230 3231 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS) 3232 static unsigned long count_partial(struct kmem_cache_node *n, 3233 int (*get_count)(struct slab *)) 3234 { 3235 unsigned long flags; 3236 unsigned long x = 0; 3237 struct slab *slab; 3238 3239 spin_lock_irqsave(&n->list_lock, flags); 3240 list_for_each_entry(slab, &n->partial, slab_list) 3241 x += get_count(slab); 3242 spin_unlock_irqrestore(&n->list_lock, flags); 3243 return x; 3244 } 3245 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */ 3246 3247 #ifdef CONFIG_SLUB_DEBUG 3248 static noinline void 3249 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 3250 { 3251 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 3252 DEFAULT_RATELIMIT_BURST); 3253 int node; 3254 struct kmem_cache_node *n; 3255 3256 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 3257 return; 3258 3259 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 3260 nid, gfpflags, &gfpflags); 3261 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 3262 s->name, s->object_size, s->size, oo_order(s->oo), 3263 oo_order(s->min)); 3264 3265 if (oo_order(s->min) > get_order(s->object_size)) 3266 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", 3267 s->name); 3268 3269 for_each_kmem_cache_node(s, node, n) { 3270 unsigned long nr_slabs; 3271 unsigned long nr_objs; 3272 unsigned long nr_free; 3273 3274 nr_free = count_partial(n, count_free); 3275 nr_slabs = node_nr_slabs(n); 3276 nr_objs = node_nr_objs(n); 3277 3278 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 3279 node, nr_slabs, nr_objs, nr_free); 3280 } 3281 } 3282 #else /* CONFIG_SLUB_DEBUG */ 3283 static inline void 3284 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } 3285 #endif 3286 3287 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) 3288 { 3289 if (unlikely(slab_test_pfmemalloc(slab))) 3290 return gfp_pfmemalloc_allowed(gfpflags); 3291 3292 return true; 3293 } 3294 3295 #ifndef CONFIG_SLUB_TINY 3296 static inline bool 3297 __update_cpu_freelist_fast(struct kmem_cache *s, 3298 void *freelist_old, void *freelist_new, 3299 unsigned long tid) 3300 { 3301 freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; 3302 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; 3303 3304 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, 3305 &old.full, new.full); 3306 } 3307 3308 /* 3309 * Check the slab->freelist and either transfer the freelist to the 3310 * per cpu freelist or deactivate the slab. 3311 * 3312 * The slab is still frozen if the return value is not NULL. 3313 * 3314 * If this function returns NULL then the slab has been unfrozen. 3315 */ 3316 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) 3317 { 3318 struct slab new; 3319 unsigned long counters; 3320 void *freelist; 3321 3322 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3323 3324 do { 3325 freelist = slab->freelist; 3326 counters = slab->counters; 3327 3328 new.counters = counters; 3329 VM_BUG_ON(!new.frozen); 3330 3331 new.inuse = slab->objects; 3332 new.frozen = freelist != NULL; 3333 3334 } while (!__slab_update_freelist(s, slab, 3335 freelist, counters, 3336 NULL, new.counters, 3337 "get_freelist")); 3338 3339 return freelist; 3340 } 3341 3342 /* 3343 * Freeze the partial slab and return the pointer to the freelist. 3344 */ 3345 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab) 3346 { 3347 struct slab new; 3348 unsigned long counters; 3349 void *freelist; 3350 3351 do { 3352 freelist = slab->freelist; 3353 counters = slab->counters; 3354 3355 new.counters = counters; 3356 VM_BUG_ON(new.frozen); 3357 3358 new.inuse = slab->objects; 3359 new.frozen = 1; 3360 3361 } while (!slab_update_freelist(s, slab, 3362 freelist, counters, 3363 NULL, new.counters, 3364 "freeze_slab")); 3365 3366 return freelist; 3367 } 3368 3369 /* 3370 * Slow path. The lockless freelist is empty or we need to perform 3371 * debugging duties. 3372 * 3373 * Processing is still very fast if new objects have been freed to the 3374 * regular freelist. In that case we simply take over the regular freelist 3375 * as the lockless freelist and zap the regular freelist. 3376 * 3377 * If that is not working then we fall back to the partial lists. We take the 3378 * first element of the freelist as the object to allocate now and move the 3379 * rest of the freelist to the lockless freelist. 3380 * 3381 * And if we were unable to get a new slab from the partial slab lists then 3382 * we need to allocate a new slab. This is the slowest path since it involves 3383 * a call to the page allocator and the setup of a new slab. 3384 * 3385 * Version of __slab_alloc to use when we know that preemption is 3386 * already disabled (which is the case for bulk allocation). 3387 */ 3388 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3389 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3390 { 3391 void *freelist; 3392 struct slab *slab; 3393 unsigned long flags; 3394 struct partial_context pc; 3395 3396 stat(s, ALLOC_SLOWPATH); 3397 3398 reread_slab: 3399 3400 slab = READ_ONCE(c->slab); 3401 if (!slab) { 3402 /* 3403 * if the node is not online or has no normal memory, just 3404 * ignore the node constraint 3405 */ 3406 if (unlikely(node != NUMA_NO_NODE && 3407 !node_isset(node, slab_nodes))) 3408 node = NUMA_NO_NODE; 3409 goto new_slab; 3410 } 3411 3412 if (unlikely(!node_match(slab, node))) { 3413 /* 3414 * same as above but node_match() being false already 3415 * implies node != NUMA_NO_NODE 3416 */ 3417 if (!node_isset(node, slab_nodes)) { 3418 node = NUMA_NO_NODE; 3419 } else { 3420 stat(s, ALLOC_NODE_MISMATCH); 3421 goto deactivate_slab; 3422 } 3423 } 3424 3425 /* 3426 * By rights, we should be searching for a slab page that was 3427 * PFMEMALLOC but right now, we are losing the pfmemalloc 3428 * information when the page leaves the per-cpu allocator 3429 */ 3430 if (unlikely(!pfmemalloc_match(slab, gfpflags))) 3431 goto deactivate_slab; 3432 3433 /* must check again c->slab in case we got preempted and it changed */ 3434 local_lock_irqsave(&s->cpu_slab->lock, flags); 3435 if (unlikely(slab != c->slab)) { 3436 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3437 goto reread_slab; 3438 } 3439 freelist = c->freelist; 3440 if (freelist) 3441 goto load_freelist; 3442 3443 freelist = get_freelist(s, slab); 3444 3445 if (!freelist) { 3446 c->slab = NULL; 3447 c->tid = next_tid(c->tid); 3448 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3449 stat(s, DEACTIVATE_BYPASS); 3450 goto new_slab; 3451 } 3452 3453 stat(s, ALLOC_REFILL); 3454 3455 load_freelist: 3456 3457 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3458 3459 /* 3460 * freelist is pointing to the list of objects to be used. 3461 * slab is pointing to the slab from which the objects are obtained. 3462 * That slab must be frozen for per cpu allocations to work. 3463 */ 3464 VM_BUG_ON(!c->slab->frozen); 3465 c->freelist = get_freepointer(s, freelist); 3466 c->tid = next_tid(c->tid); 3467 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3468 return freelist; 3469 3470 deactivate_slab: 3471 3472 local_lock_irqsave(&s->cpu_slab->lock, flags); 3473 if (slab != c->slab) { 3474 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3475 goto reread_slab; 3476 } 3477 freelist = c->freelist; 3478 c->slab = NULL; 3479 c->freelist = NULL; 3480 c->tid = next_tid(c->tid); 3481 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3482 deactivate_slab(s, slab, freelist); 3483 3484 new_slab: 3485 3486 #ifdef CONFIG_SLUB_CPU_PARTIAL 3487 while (slub_percpu_partial(c)) { 3488 local_lock_irqsave(&s->cpu_slab->lock, flags); 3489 if (unlikely(c->slab)) { 3490 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3491 goto reread_slab; 3492 } 3493 if (unlikely(!slub_percpu_partial(c))) { 3494 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3495 /* we were preempted and partial list got empty */ 3496 goto new_objects; 3497 } 3498 3499 slab = slub_percpu_partial(c); 3500 slub_set_percpu_partial(c, slab); 3501 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3502 stat(s, CPU_PARTIAL_ALLOC); 3503 3504 if (unlikely(!node_match(slab, node) || 3505 !pfmemalloc_match(slab, gfpflags))) { 3506 slab->next = NULL; 3507 __put_partials(s, slab); 3508 continue; 3509 } 3510 3511 freelist = freeze_slab(s, slab); 3512 goto retry_load_slab; 3513 } 3514 #endif 3515 3516 new_objects: 3517 3518 pc.flags = gfpflags; 3519 pc.orig_size = orig_size; 3520 slab = get_partial(s, node, &pc); 3521 if (slab) { 3522 if (kmem_cache_debug(s)) { 3523 freelist = pc.object; 3524 /* 3525 * For debug caches here we had to go through 3526 * alloc_single_from_partial() so just store the 3527 * tracking info and return the object. 3528 */ 3529 if (s->flags & SLAB_STORE_USER) 3530 set_track(s, freelist, TRACK_ALLOC, addr); 3531 3532 return freelist; 3533 } 3534 3535 freelist = freeze_slab(s, slab); 3536 goto retry_load_slab; 3537 } 3538 3539 slub_put_cpu_ptr(s->cpu_slab); 3540 slab = new_slab(s, gfpflags, node); 3541 c = slub_get_cpu_ptr(s->cpu_slab); 3542 3543 if (unlikely(!slab)) { 3544 slab_out_of_memory(s, gfpflags, node); 3545 return NULL; 3546 } 3547 3548 stat(s, ALLOC_SLAB); 3549 3550 if (kmem_cache_debug(s)) { 3551 freelist = alloc_single_from_new_slab(s, slab, orig_size); 3552 3553 if (unlikely(!freelist)) 3554 goto new_objects; 3555 3556 if (s->flags & SLAB_STORE_USER) 3557 set_track(s, freelist, TRACK_ALLOC, addr); 3558 3559 return freelist; 3560 } 3561 3562 /* 3563 * No other reference to the slab yet so we can 3564 * muck around with it freely without cmpxchg 3565 */ 3566 freelist = slab->freelist; 3567 slab->freelist = NULL; 3568 slab->inuse = slab->objects; 3569 slab->frozen = 1; 3570 3571 inc_slabs_node(s, slab_nid(slab), slab->objects); 3572 3573 if (unlikely(!pfmemalloc_match(slab, gfpflags))) { 3574 /* 3575 * For !pfmemalloc_match() case we don't load freelist so that 3576 * we don't make further mismatched allocations easier. 3577 */ 3578 deactivate_slab(s, slab, get_freepointer(s, freelist)); 3579 return freelist; 3580 } 3581 3582 retry_load_slab: 3583 3584 local_lock_irqsave(&s->cpu_slab->lock, flags); 3585 if (unlikely(c->slab)) { 3586 void *flush_freelist = c->freelist; 3587 struct slab *flush_slab = c->slab; 3588 3589 c->slab = NULL; 3590 c->freelist = NULL; 3591 c->tid = next_tid(c->tid); 3592 3593 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3594 3595 deactivate_slab(s, flush_slab, flush_freelist); 3596 3597 stat(s, CPUSLAB_FLUSH); 3598 3599 goto retry_load_slab; 3600 } 3601 c->slab = slab; 3602 3603 goto load_freelist; 3604 } 3605 3606 /* 3607 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 3608 * disabled. Compensates for possible cpu changes by refetching the per cpu area 3609 * pointer. 3610 */ 3611 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3612 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3613 { 3614 void *p; 3615 3616 #ifdef CONFIG_PREEMPT_COUNT 3617 /* 3618 * We may have been preempted and rescheduled on a different 3619 * cpu before disabling preemption. Need to reload cpu area 3620 * pointer. 3621 */ 3622 c = slub_get_cpu_ptr(s->cpu_slab); 3623 #endif 3624 3625 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); 3626 #ifdef CONFIG_PREEMPT_COUNT 3627 slub_put_cpu_ptr(s->cpu_slab); 3628 #endif 3629 return p; 3630 } 3631 3632 static __always_inline void *__slab_alloc_node(struct kmem_cache *s, 3633 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3634 { 3635 struct kmem_cache_cpu *c; 3636 struct slab *slab; 3637 unsigned long tid; 3638 void *object; 3639 3640 redo: 3641 /* 3642 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 3643 * enabled. We may switch back and forth between cpus while 3644 * reading from one cpu area. That does not matter as long 3645 * as we end up on the original cpu again when doing the cmpxchg. 3646 * 3647 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 3648 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 3649 * the tid. If we are preempted and switched to another cpu between the 3650 * two reads, it's OK as the two are still associated with the same cpu 3651 * and cmpxchg later will validate the cpu. 3652 */ 3653 c = raw_cpu_ptr(s->cpu_slab); 3654 tid = READ_ONCE(c->tid); 3655 3656 /* 3657 * Irqless object alloc/free algorithm used here depends on sequence 3658 * of fetching cpu_slab's data. tid should be fetched before anything 3659 * on c to guarantee that object and slab associated with previous tid 3660 * won't be used with current tid. If we fetch tid first, object and 3661 * slab could be one associated with next tid and our alloc/free 3662 * request will be failed. In this case, we will retry. So, no problem. 3663 */ 3664 barrier(); 3665 3666 /* 3667 * The transaction ids are globally unique per cpu and per operation on 3668 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 3669 * occurs on the right processor and that there was no operation on the 3670 * linked list in between. 3671 */ 3672 3673 object = c->freelist; 3674 slab = c->slab; 3675 3676 if (!USE_LOCKLESS_FAST_PATH() || 3677 unlikely(!object || !slab || !node_match(slab, node))) { 3678 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); 3679 } else { 3680 void *next_object = get_freepointer_safe(s, object); 3681 3682 /* 3683 * The cmpxchg will only match if there was no additional 3684 * operation and if we are on the right processor. 3685 * 3686 * The cmpxchg does the following atomically (without lock 3687 * semantics!) 3688 * 1. Relocate first pointer to the current per cpu area. 3689 * 2. Verify that tid and freelist have not been changed 3690 * 3. If they were not changed replace tid and freelist 3691 * 3692 * Since this is without lock semantics the protection is only 3693 * against code executing on this cpu *not* from access by 3694 * other cpus. 3695 */ 3696 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { 3697 note_cmpxchg_failure("slab_alloc", s, tid); 3698 goto redo; 3699 } 3700 prefetch_freepointer(s, next_object); 3701 stat(s, ALLOC_FASTPATH); 3702 } 3703 3704 return object; 3705 } 3706 #else /* CONFIG_SLUB_TINY */ 3707 static void *__slab_alloc_node(struct kmem_cache *s, 3708 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3709 { 3710 struct partial_context pc; 3711 struct slab *slab; 3712 void *object; 3713 3714 pc.flags = gfpflags; 3715 pc.orig_size = orig_size; 3716 slab = get_partial(s, node, &pc); 3717 3718 if (slab) 3719 return pc.object; 3720 3721 slab = new_slab(s, gfpflags, node); 3722 if (unlikely(!slab)) { 3723 slab_out_of_memory(s, gfpflags, node); 3724 return NULL; 3725 } 3726 3727 object = alloc_single_from_new_slab(s, slab, orig_size); 3728 3729 return object; 3730 } 3731 #endif /* CONFIG_SLUB_TINY */ 3732 3733 /* 3734 * If the object has been wiped upon free, make sure it's fully initialized by 3735 * zeroing out freelist pointer. 3736 */ 3737 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 3738 void *obj) 3739 { 3740 if (unlikely(slab_want_init_on_free(s)) && obj) 3741 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 3742 0, sizeof(void *)); 3743 } 3744 3745 noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags) 3746 { 3747 if (__should_failslab(s, gfpflags)) 3748 return -ENOMEM; 3749 return 0; 3750 } 3751 ALLOW_ERROR_INJECTION(should_failslab, ERRNO); 3752 3753 static __fastpath_inline 3754 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 3755 struct list_lru *lru, 3756 struct obj_cgroup **objcgp, 3757 size_t size, gfp_t flags) 3758 { 3759 flags &= gfp_allowed_mask; 3760 3761 might_alloc(flags); 3762 3763 if (unlikely(should_failslab(s, flags))) 3764 return NULL; 3765 3766 if (unlikely(!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))) 3767 return NULL; 3768 3769 return s; 3770 } 3771 3772 static __fastpath_inline 3773 void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, 3774 gfp_t flags, size_t size, void **p, bool init, 3775 unsigned int orig_size) 3776 { 3777 unsigned int zero_size = s->object_size; 3778 bool kasan_init = init; 3779 size_t i; 3780 gfp_t init_flags = flags & gfp_allowed_mask; 3781 3782 /* 3783 * For kmalloc object, the allocated memory size(object_size) is likely 3784 * larger than the requested size(orig_size). If redzone check is 3785 * enabled for the extra space, don't zero it, as it will be redzoned 3786 * soon. The redzone operation for this extra space could be seen as a 3787 * replacement of current poisoning under certain debug option, and 3788 * won't break other sanity checks. 3789 */ 3790 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 3791 (s->flags & SLAB_KMALLOC)) 3792 zero_size = orig_size; 3793 3794 /* 3795 * When slub_debug is enabled, avoid memory initialization integrated 3796 * into KASAN and instead zero out the memory via the memset below with 3797 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and 3798 * cause false-positive reports. This does not lead to a performance 3799 * penalty on production builds, as slub_debug is not intended to be 3800 * enabled there. 3801 */ 3802 if (__slub_debug_enabled()) 3803 kasan_init = false; 3804 3805 /* 3806 * As memory initialization might be integrated into KASAN, 3807 * kasan_slab_alloc and initialization memset must be 3808 * kept together to avoid discrepancies in behavior. 3809 * 3810 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 3811 */ 3812 for (i = 0; i < size; i++) { 3813 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init); 3814 if (p[i] && init && (!kasan_init || 3815 !kasan_has_integrated_init())) 3816 memset(p[i], 0, zero_size); 3817 kmemleak_alloc_recursive(p[i], s->object_size, 1, 3818 s->flags, init_flags); 3819 kmsan_slab_alloc(s, p[i], init_flags); 3820 } 3821 3822 memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 3823 } 3824 3825 /* 3826 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 3827 * have the fastpath folded into their functions. So no function call 3828 * overhead for requests that can be satisfied on the fastpath. 3829 * 3830 * The fastpath works by first checking if the lockless freelist can be used. 3831 * If not then __slab_alloc is called for slow processing. 3832 * 3833 * Otherwise we can simply pick the next object from the lockless free list. 3834 */ 3835 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, 3836 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3837 { 3838 void *object; 3839 struct obj_cgroup *objcg = NULL; 3840 bool init = false; 3841 3842 s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags); 3843 if (unlikely(!s)) 3844 return NULL; 3845 3846 object = kfence_alloc(s, orig_size, gfpflags); 3847 if (unlikely(object)) 3848 goto out; 3849 3850 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); 3851 3852 maybe_wipe_obj_freeptr(s, object); 3853 init = slab_want_init_on_alloc(gfpflags, s); 3854 3855 out: 3856 /* 3857 * When init equals 'true', like for kzalloc() family, only 3858 * @orig_size bytes might be zeroed instead of s->object_size 3859 */ 3860 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size); 3861 3862 return object; 3863 } 3864 3865 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 3866 { 3867 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, 3868 s->object_size); 3869 3870 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 3871 3872 return ret; 3873 } 3874 EXPORT_SYMBOL(kmem_cache_alloc); 3875 3876 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, 3877 gfp_t gfpflags) 3878 { 3879 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, 3880 s->object_size); 3881 3882 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 3883 3884 return ret; 3885 } 3886 EXPORT_SYMBOL(kmem_cache_alloc_lru); 3887 3888 /** 3889 * kmem_cache_alloc_node - Allocate an object on the specified node 3890 * @s: The cache to allocate from. 3891 * @gfpflags: See kmalloc(). 3892 * @node: node number of the target node. 3893 * 3894 * Identical to kmem_cache_alloc but it will allocate memory on the given 3895 * node, which can improve the performance for cpu bound structures. 3896 * 3897 * Fallback to other node is possible if __GFP_THISNODE is not set. 3898 * 3899 * Return: pointer to the new object or %NULL in case of error 3900 */ 3901 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 3902 { 3903 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); 3904 3905 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); 3906 3907 return ret; 3908 } 3909 EXPORT_SYMBOL(kmem_cache_alloc_node); 3910 3911 /* 3912 * To avoid unnecessary overhead, we pass through large allocation requests 3913 * directly to the page allocator. We use __GFP_COMP, because we will need to 3914 * know the allocation order to free the pages properly in kfree. 3915 */ 3916 static void *__kmalloc_large_node(size_t size, gfp_t flags, int node) 3917 { 3918 struct folio *folio; 3919 void *ptr = NULL; 3920 unsigned int order = get_order(size); 3921 3922 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 3923 flags = kmalloc_fix_flags(flags); 3924 3925 flags |= __GFP_COMP; 3926 folio = (struct folio *)alloc_pages_node(node, flags, order); 3927 if (folio) { 3928 ptr = folio_address(folio); 3929 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 3930 PAGE_SIZE << order); 3931 } 3932 3933 ptr = kasan_kmalloc_large(ptr, size, flags); 3934 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 3935 kmemleak_alloc(ptr, size, 1, flags); 3936 kmsan_kmalloc_large(ptr, size, flags); 3937 3938 return ptr; 3939 } 3940 3941 void *kmalloc_large(size_t size, gfp_t flags) 3942 { 3943 void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE); 3944 3945 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 3946 flags, NUMA_NO_NODE); 3947 return ret; 3948 } 3949 EXPORT_SYMBOL(kmalloc_large); 3950 3951 void *kmalloc_large_node(size_t size, gfp_t flags, int node) 3952 { 3953 void *ret = __kmalloc_large_node(size, flags, node); 3954 3955 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 3956 flags, node); 3957 return ret; 3958 } 3959 EXPORT_SYMBOL(kmalloc_large_node); 3960 3961 static __always_inline 3962 void *__do_kmalloc_node(size_t size, gfp_t flags, int node, 3963 unsigned long caller) 3964 { 3965 struct kmem_cache *s; 3966 void *ret; 3967 3968 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 3969 ret = __kmalloc_large_node(size, flags, node); 3970 trace_kmalloc(caller, ret, size, 3971 PAGE_SIZE << get_order(size), flags, node); 3972 return ret; 3973 } 3974 3975 if (unlikely(!size)) 3976 return ZERO_SIZE_PTR; 3977 3978 s = kmalloc_slab(size, flags, caller); 3979 3980 ret = slab_alloc_node(s, NULL, flags, node, caller, size); 3981 ret = kasan_kmalloc(s, ret, size, flags); 3982 trace_kmalloc(caller, ret, size, s->size, flags, node); 3983 return ret; 3984 } 3985 3986 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3987 { 3988 return __do_kmalloc_node(size, flags, node, _RET_IP_); 3989 } 3990 EXPORT_SYMBOL(__kmalloc_node); 3991 3992 void *__kmalloc(size_t size, gfp_t flags) 3993 { 3994 return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_); 3995 } 3996 EXPORT_SYMBOL(__kmalloc); 3997 3998 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3999 int node, unsigned long caller) 4000 { 4001 return __do_kmalloc_node(size, flags, node, caller); 4002 } 4003 EXPORT_SYMBOL(__kmalloc_node_track_caller); 4004 4005 void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 4006 { 4007 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, 4008 _RET_IP_, size); 4009 4010 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); 4011 4012 ret = kasan_kmalloc(s, ret, size, gfpflags); 4013 return ret; 4014 } 4015 EXPORT_SYMBOL(kmalloc_trace); 4016 4017 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, 4018 int node, size_t size) 4019 { 4020 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); 4021 4022 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); 4023 4024 ret = kasan_kmalloc(s, ret, size, gfpflags); 4025 return ret; 4026 } 4027 EXPORT_SYMBOL(kmalloc_node_trace); 4028 4029 static noinline void free_to_partial_list( 4030 struct kmem_cache *s, struct slab *slab, 4031 void *head, void *tail, int bulk_cnt, 4032 unsigned long addr) 4033 { 4034 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 4035 struct slab *slab_free = NULL; 4036 int cnt = bulk_cnt; 4037 unsigned long flags; 4038 depot_stack_handle_t handle = 0; 4039 4040 if (s->flags & SLAB_STORE_USER) 4041 handle = set_track_prepare(); 4042 4043 spin_lock_irqsave(&n->list_lock, flags); 4044 4045 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { 4046 void *prior = slab->freelist; 4047 4048 /* Perform the actual freeing while we still hold the locks */ 4049 slab->inuse -= cnt; 4050 set_freepointer(s, tail, prior); 4051 slab->freelist = head; 4052 4053 /* 4054 * If the slab is empty, and node's partial list is full, 4055 * it should be discarded anyway no matter it's on full or 4056 * partial list. 4057 */ 4058 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) 4059 slab_free = slab; 4060 4061 if (!prior) { 4062 /* was on full list */ 4063 remove_full(s, n, slab); 4064 if (!slab_free) { 4065 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4066 stat(s, FREE_ADD_PARTIAL); 4067 } 4068 } else if (slab_free) { 4069 remove_partial(n, slab); 4070 stat(s, FREE_REMOVE_PARTIAL); 4071 } 4072 } 4073 4074 if (slab_free) { 4075 /* 4076 * Update the counters while still holding n->list_lock to 4077 * prevent spurious validation warnings 4078 */ 4079 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); 4080 } 4081 4082 spin_unlock_irqrestore(&n->list_lock, flags); 4083 4084 if (slab_free) { 4085 stat(s, FREE_SLAB); 4086 free_slab(s, slab_free); 4087 } 4088 } 4089 4090 /* 4091 * Slow path handling. This may still be called frequently since objects 4092 * have a longer lifetime than the cpu slabs in most processing loads. 4093 * 4094 * So we still attempt to reduce cache line usage. Just take the slab 4095 * lock and free the item. If there is no additional partial slab 4096 * handling required then we can return immediately. 4097 */ 4098 static void __slab_free(struct kmem_cache *s, struct slab *slab, 4099 void *head, void *tail, int cnt, 4100 unsigned long addr) 4101 4102 { 4103 void *prior; 4104 int was_frozen; 4105 struct slab new; 4106 unsigned long counters; 4107 struct kmem_cache_node *n = NULL; 4108 unsigned long flags; 4109 bool on_node_partial; 4110 4111 stat(s, FREE_SLOWPATH); 4112 4113 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 4114 free_to_partial_list(s, slab, head, tail, cnt, addr); 4115 return; 4116 } 4117 4118 do { 4119 if (unlikely(n)) { 4120 spin_unlock_irqrestore(&n->list_lock, flags); 4121 n = NULL; 4122 } 4123 prior = slab->freelist; 4124 counters = slab->counters; 4125 set_freepointer(s, tail, prior); 4126 new.counters = counters; 4127 was_frozen = new.frozen; 4128 new.inuse -= cnt; 4129 if ((!new.inuse || !prior) && !was_frozen) { 4130 /* Needs to be taken off a list */ 4131 if (!kmem_cache_has_cpu_partial(s) || prior) { 4132 4133 n = get_node(s, slab_nid(slab)); 4134 /* 4135 * Speculatively acquire the list_lock. 4136 * If the cmpxchg does not succeed then we may 4137 * drop the list_lock without any processing. 4138 * 4139 * Otherwise the list_lock will synchronize with 4140 * other processors updating the list of slabs. 4141 */ 4142 spin_lock_irqsave(&n->list_lock, flags); 4143 4144 on_node_partial = slab_test_node_partial(slab); 4145 } 4146 } 4147 4148 } while (!slab_update_freelist(s, slab, 4149 prior, counters, 4150 head, new.counters, 4151 "__slab_free")); 4152 4153 if (likely(!n)) { 4154 4155 if (likely(was_frozen)) { 4156 /* 4157 * The list lock was not taken therefore no list 4158 * activity can be necessary. 4159 */ 4160 stat(s, FREE_FROZEN); 4161 } else if (kmem_cache_has_cpu_partial(s) && !prior) { 4162 /* 4163 * If we started with a full slab then put it onto the 4164 * per cpu partial list. 4165 */ 4166 put_cpu_partial(s, slab, 1); 4167 stat(s, CPU_PARTIAL_FREE); 4168 } 4169 4170 return; 4171 } 4172 4173 /* 4174 * This slab was partially empty but not on the per-node partial list, 4175 * in which case we shouldn't manipulate its list, just return. 4176 */ 4177 if (prior && !on_node_partial) { 4178 spin_unlock_irqrestore(&n->list_lock, flags); 4179 return; 4180 } 4181 4182 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 4183 goto slab_empty; 4184 4185 /* 4186 * Objects left in the slab. If it was not on the partial list before 4187 * then add it. 4188 */ 4189 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 4190 remove_full(s, n, slab); 4191 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4192 stat(s, FREE_ADD_PARTIAL); 4193 } 4194 spin_unlock_irqrestore(&n->list_lock, flags); 4195 return; 4196 4197 slab_empty: 4198 if (prior) { 4199 /* 4200 * Slab on the partial list. 4201 */ 4202 remove_partial(n, slab); 4203 stat(s, FREE_REMOVE_PARTIAL); 4204 } else { 4205 /* Slab must be on the full list */ 4206 remove_full(s, n, slab); 4207 } 4208 4209 spin_unlock_irqrestore(&n->list_lock, flags); 4210 stat(s, FREE_SLAB); 4211 discard_slab(s, slab); 4212 } 4213 4214 #ifndef CONFIG_SLUB_TINY 4215 /* 4216 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 4217 * can perform fastpath freeing without additional function calls. 4218 * 4219 * The fastpath is only possible if we are freeing to the current cpu slab 4220 * of this processor. This typically the case if we have just allocated 4221 * the item before. 4222 * 4223 * If fastpath is not possible then fall back to __slab_free where we deal 4224 * with all sorts of special processing. 4225 * 4226 * Bulk free of a freelist with several objects (all pointing to the 4227 * same slab) possible by specifying head and tail ptr, plus objects 4228 * count (cnt). Bulk free indicated by tail pointer being set. 4229 */ 4230 static __always_inline void do_slab_free(struct kmem_cache *s, 4231 struct slab *slab, void *head, void *tail, 4232 int cnt, unsigned long addr) 4233 { 4234 struct kmem_cache_cpu *c; 4235 unsigned long tid; 4236 void **freelist; 4237 4238 redo: 4239 /* 4240 * Determine the currently cpus per cpu slab. 4241 * The cpu may change afterward. However that does not matter since 4242 * data is retrieved via this pointer. If we are on the same cpu 4243 * during the cmpxchg then the free will succeed. 4244 */ 4245 c = raw_cpu_ptr(s->cpu_slab); 4246 tid = READ_ONCE(c->tid); 4247 4248 /* Same with comment on barrier() in slab_alloc_node() */ 4249 barrier(); 4250 4251 if (unlikely(slab != c->slab)) { 4252 __slab_free(s, slab, head, tail, cnt, addr); 4253 return; 4254 } 4255 4256 if (USE_LOCKLESS_FAST_PATH()) { 4257 freelist = READ_ONCE(c->freelist); 4258 4259 set_freepointer(s, tail, freelist); 4260 4261 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { 4262 note_cmpxchg_failure("slab_free", s, tid); 4263 goto redo; 4264 } 4265 } else { 4266 /* Update the free list under the local lock */ 4267 local_lock(&s->cpu_slab->lock); 4268 c = this_cpu_ptr(s->cpu_slab); 4269 if (unlikely(slab != c->slab)) { 4270 local_unlock(&s->cpu_slab->lock); 4271 goto redo; 4272 } 4273 tid = c->tid; 4274 freelist = c->freelist; 4275 4276 set_freepointer(s, tail, freelist); 4277 c->freelist = head; 4278 c->tid = next_tid(tid); 4279 4280 local_unlock(&s->cpu_slab->lock); 4281 } 4282 stat_add(s, FREE_FASTPATH, cnt); 4283 } 4284 #else /* CONFIG_SLUB_TINY */ 4285 static void do_slab_free(struct kmem_cache *s, 4286 struct slab *slab, void *head, void *tail, 4287 int cnt, unsigned long addr) 4288 { 4289 __slab_free(s, slab, head, tail, cnt, addr); 4290 } 4291 #endif /* CONFIG_SLUB_TINY */ 4292 4293 static __fastpath_inline 4294 void slab_free(struct kmem_cache *s, struct slab *slab, void *object, 4295 unsigned long addr) 4296 { 4297 memcg_slab_free_hook(s, slab, &object, 1); 4298 4299 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s)))) 4300 do_slab_free(s, slab, object, object, 1, addr); 4301 } 4302 4303 static __fastpath_inline 4304 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, 4305 void *tail, void **p, int cnt, unsigned long addr) 4306 { 4307 memcg_slab_free_hook(s, slab, p, cnt); 4308 /* 4309 * With KASAN enabled slab_free_freelist_hook modifies the freelist 4310 * to remove objects, whose reuse must be delayed. 4311 */ 4312 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) 4313 do_slab_free(s, slab, head, tail, cnt, addr); 4314 } 4315 4316 #ifdef CONFIG_KASAN_GENERIC 4317 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 4318 { 4319 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr); 4320 } 4321 #endif 4322 4323 static inline struct kmem_cache *virt_to_cache(const void *obj) 4324 { 4325 struct slab *slab; 4326 4327 slab = virt_to_slab(obj); 4328 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__)) 4329 return NULL; 4330 return slab->slab_cache; 4331 } 4332 4333 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 4334 { 4335 struct kmem_cache *cachep; 4336 4337 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 4338 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 4339 return s; 4340 4341 cachep = virt_to_cache(x); 4342 if (WARN(cachep && cachep != s, 4343 "%s: Wrong slab cache. %s but object is from %s\n", 4344 __func__, s->name, cachep->name)) 4345 print_tracking(cachep, x); 4346 return cachep; 4347 } 4348 4349 /** 4350 * kmem_cache_free - Deallocate an object 4351 * @s: The cache the allocation was from. 4352 * @x: The previously allocated object. 4353 * 4354 * Free an object which was previously allocated from this 4355 * cache. 4356 */ 4357 void kmem_cache_free(struct kmem_cache *s, void *x) 4358 { 4359 s = cache_from_obj(s, x); 4360 if (!s) 4361 return; 4362 trace_kmem_cache_free(_RET_IP_, x, s); 4363 slab_free(s, virt_to_slab(x), x, _RET_IP_); 4364 } 4365 EXPORT_SYMBOL(kmem_cache_free); 4366 4367 static void free_large_kmalloc(struct folio *folio, void *object) 4368 { 4369 unsigned int order = folio_order(folio); 4370 4371 if (WARN_ON_ONCE(order == 0)) 4372 pr_warn_once("object pointer: 0x%p\n", object); 4373 4374 kmemleak_free(object); 4375 kasan_kfree_large(object); 4376 kmsan_kfree_large(object); 4377 4378 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4379 -(PAGE_SIZE << order)); 4380 folio_put(folio); 4381 } 4382 4383 /** 4384 * kfree - free previously allocated memory 4385 * @object: pointer returned by kmalloc() or kmem_cache_alloc() 4386 * 4387 * If @object is NULL, no operation is performed. 4388 */ 4389 void kfree(const void *object) 4390 { 4391 struct folio *folio; 4392 struct slab *slab; 4393 struct kmem_cache *s; 4394 void *x = (void *)object; 4395 4396 trace_kfree(_RET_IP_, object); 4397 4398 if (unlikely(ZERO_OR_NULL_PTR(object))) 4399 return; 4400 4401 folio = virt_to_folio(object); 4402 if (unlikely(!folio_test_slab(folio))) { 4403 free_large_kmalloc(folio, (void *)object); 4404 return; 4405 } 4406 4407 slab = folio_slab(folio); 4408 s = slab->slab_cache; 4409 slab_free(s, slab, x, _RET_IP_); 4410 } 4411 EXPORT_SYMBOL(kfree); 4412 4413 struct detached_freelist { 4414 struct slab *slab; 4415 void *tail; 4416 void *freelist; 4417 int cnt; 4418 struct kmem_cache *s; 4419 }; 4420 4421 /* 4422 * This function progressively scans the array with free objects (with 4423 * a limited look ahead) and extract objects belonging to the same 4424 * slab. It builds a detached freelist directly within the given 4425 * slab/objects. This can happen without any need for 4426 * synchronization, because the objects are owned by running process. 4427 * The freelist is build up as a single linked list in the objects. 4428 * The idea is, that this detached freelist can then be bulk 4429 * transferred to the real freelist(s), but only requiring a single 4430 * synchronization primitive. Look ahead in the array is limited due 4431 * to performance reasons. 4432 */ 4433 static inline 4434 int build_detached_freelist(struct kmem_cache *s, size_t size, 4435 void **p, struct detached_freelist *df) 4436 { 4437 int lookahead = 3; 4438 void *object; 4439 struct folio *folio; 4440 size_t same; 4441 4442 object = p[--size]; 4443 folio = virt_to_folio(object); 4444 if (!s) { 4445 /* Handle kalloc'ed objects */ 4446 if (unlikely(!folio_test_slab(folio))) { 4447 free_large_kmalloc(folio, object); 4448 df->slab = NULL; 4449 return size; 4450 } 4451 /* Derive kmem_cache from object */ 4452 df->slab = folio_slab(folio); 4453 df->s = df->slab->slab_cache; 4454 } else { 4455 df->slab = folio_slab(folio); 4456 df->s = cache_from_obj(s, object); /* Support for memcg */ 4457 } 4458 4459 /* Start new detached freelist */ 4460 df->tail = object; 4461 df->freelist = object; 4462 df->cnt = 1; 4463 4464 if (is_kfence_address(object)) 4465 return size; 4466 4467 set_freepointer(df->s, object, NULL); 4468 4469 same = size; 4470 while (size) { 4471 object = p[--size]; 4472 /* df->slab is always set at this point */ 4473 if (df->slab == virt_to_slab(object)) { 4474 /* Opportunity build freelist */ 4475 set_freepointer(df->s, object, df->freelist); 4476 df->freelist = object; 4477 df->cnt++; 4478 same--; 4479 if (size != same) 4480 swap(p[size], p[same]); 4481 continue; 4482 } 4483 4484 /* Limit look ahead search */ 4485 if (!--lookahead) 4486 break; 4487 } 4488 4489 return same; 4490 } 4491 4492 /* 4493 * Internal bulk free of objects that were not initialised by the post alloc 4494 * hooks and thus should not be processed by the free hooks 4495 */ 4496 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4497 { 4498 if (!size) 4499 return; 4500 4501 do { 4502 struct detached_freelist df; 4503 4504 size = build_detached_freelist(s, size, p, &df); 4505 if (!df.slab) 4506 continue; 4507 4508 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, 4509 _RET_IP_); 4510 } while (likely(size)); 4511 } 4512 4513 /* Note that interrupts must be enabled when calling this function. */ 4514 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4515 { 4516 if (!size) 4517 return; 4518 4519 do { 4520 struct detached_freelist df; 4521 4522 size = build_detached_freelist(s, size, p, &df); 4523 if (!df.slab) 4524 continue; 4525 4526 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size], 4527 df.cnt, _RET_IP_); 4528 } while (likely(size)); 4529 } 4530 EXPORT_SYMBOL(kmem_cache_free_bulk); 4531 4532 #ifndef CONFIG_SLUB_TINY 4533 static inline 4534 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 4535 void **p) 4536 { 4537 struct kmem_cache_cpu *c; 4538 unsigned long irqflags; 4539 int i; 4540 4541 /* 4542 * Drain objects in the per cpu slab, while disabling local 4543 * IRQs, which protects against PREEMPT and interrupts 4544 * handlers invoking normal fastpath. 4545 */ 4546 c = slub_get_cpu_ptr(s->cpu_slab); 4547 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 4548 4549 for (i = 0; i < size; i++) { 4550 void *object = kfence_alloc(s, s->object_size, flags); 4551 4552 if (unlikely(object)) { 4553 p[i] = object; 4554 continue; 4555 } 4556 4557 object = c->freelist; 4558 if (unlikely(!object)) { 4559 /* 4560 * We may have removed an object from c->freelist using 4561 * the fastpath in the previous iteration; in that case, 4562 * c->tid has not been bumped yet. 4563 * Since ___slab_alloc() may reenable interrupts while 4564 * allocating memory, we should bump c->tid now. 4565 */ 4566 c->tid = next_tid(c->tid); 4567 4568 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 4569 4570 /* 4571 * Invoking slow path likely have side-effect 4572 * of re-populating per CPU c->freelist 4573 */ 4574 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 4575 _RET_IP_, c, s->object_size); 4576 if (unlikely(!p[i])) 4577 goto error; 4578 4579 c = this_cpu_ptr(s->cpu_slab); 4580 maybe_wipe_obj_freeptr(s, p[i]); 4581 4582 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 4583 4584 continue; /* goto for-loop */ 4585 } 4586 c->freelist = get_freepointer(s, object); 4587 p[i] = object; 4588 maybe_wipe_obj_freeptr(s, p[i]); 4589 stat(s, ALLOC_FASTPATH); 4590 } 4591 c->tid = next_tid(c->tid); 4592 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 4593 slub_put_cpu_ptr(s->cpu_slab); 4594 4595 return i; 4596 4597 error: 4598 slub_put_cpu_ptr(s->cpu_slab); 4599 __kmem_cache_free_bulk(s, i, p); 4600 return 0; 4601 4602 } 4603 #else /* CONFIG_SLUB_TINY */ 4604 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 4605 size_t size, void **p) 4606 { 4607 int i; 4608 4609 for (i = 0; i < size; i++) { 4610 void *object = kfence_alloc(s, s->object_size, flags); 4611 4612 if (unlikely(object)) { 4613 p[i] = object; 4614 continue; 4615 } 4616 4617 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE, 4618 _RET_IP_, s->object_size); 4619 if (unlikely(!p[i])) 4620 goto error; 4621 4622 maybe_wipe_obj_freeptr(s, p[i]); 4623 } 4624 4625 return i; 4626 4627 error: 4628 __kmem_cache_free_bulk(s, i, p); 4629 return 0; 4630 } 4631 #endif /* CONFIG_SLUB_TINY */ 4632 4633 /* Note that interrupts must be enabled when calling this function. */ 4634 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 4635 void **p) 4636 { 4637 int i; 4638 struct obj_cgroup *objcg = NULL; 4639 4640 if (!size) 4641 return 0; 4642 4643 /* memcg and kmem_cache debug support */ 4644 s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); 4645 if (unlikely(!s)) 4646 return 0; 4647 4648 i = __kmem_cache_alloc_bulk(s, flags, size, p); 4649 4650 /* 4651 * memcg and kmem_cache debug support and memory initialization. 4652 * Done outside of the IRQ disabled fastpath loop. 4653 */ 4654 if (likely(i != 0)) { 4655 slab_post_alloc_hook(s, objcg, flags, size, p, 4656 slab_want_init_on_alloc(flags, s), s->object_size); 4657 } else { 4658 memcg_slab_alloc_error_hook(s, size, objcg); 4659 } 4660 4661 return i; 4662 } 4663 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 4664 4665 4666 /* 4667 * Object placement in a slab is made very easy because we always start at 4668 * offset 0. If we tune the size of the object to the alignment then we can 4669 * get the required alignment by putting one properly sized object after 4670 * another. 4671 * 4672 * Notice that the allocation order determines the sizes of the per cpu 4673 * caches. Each processor has always one slab available for allocations. 4674 * Increasing the allocation order reduces the number of times that slabs 4675 * must be moved on and off the partial lists and is therefore a factor in 4676 * locking overhead. 4677 */ 4678 4679 /* 4680 * Minimum / Maximum order of slab pages. This influences locking overhead 4681 * and slab fragmentation. A higher order reduces the number of partial slabs 4682 * and increases the number of allocations possible without having to 4683 * take the list_lock. 4684 */ 4685 static unsigned int slub_min_order; 4686 static unsigned int slub_max_order = 4687 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; 4688 static unsigned int slub_min_objects; 4689 4690 /* 4691 * Calculate the order of allocation given an slab object size. 4692 * 4693 * The order of allocation has significant impact on performance and other 4694 * system components. Generally order 0 allocations should be preferred since 4695 * order 0 does not cause fragmentation in the page allocator. Larger objects 4696 * be problematic to put into order 0 slabs because there may be too much 4697 * unused space left. We go to a higher order if more than 1/16th of the slab 4698 * would be wasted. 4699 * 4700 * In order to reach satisfactory performance we must ensure that a minimum 4701 * number of objects is in one slab. Otherwise we may generate too much 4702 * activity on the partial lists which requires taking the list_lock. This is 4703 * less a concern for large slabs though which are rarely used. 4704 * 4705 * slub_max_order specifies the order where we begin to stop considering the 4706 * number of objects in a slab as critical. If we reach slub_max_order then 4707 * we try to keep the page order as low as possible. So we accept more waste 4708 * of space in favor of a small page order. 4709 * 4710 * Higher order allocations also allow the placement of more objects in a 4711 * slab and thereby reduce object handling overhead. If the user has 4712 * requested a higher minimum order then we start with that one instead of 4713 * the smallest order which will fit the object. 4714 */ 4715 static inline unsigned int calc_slab_order(unsigned int size, 4716 unsigned int min_order, unsigned int max_order, 4717 unsigned int fract_leftover) 4718 { 4719 unsigned int order; 4720 4721 for (order = min_order; order <= max_order; order++) { 4722 4723 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 4724 unsigned int rem; 4725 4726 rem = slab_size % size; 4727 4728 if (rem <= slab_size / fract_leftover) 4729 break; 4730 } 4731 4732 return order; 4733 } 4734 4735 static inline int calculate_order(unsigned int size) 4736 { 4737 unsigned int order; 4738 unsigned int min_objects; 4739 unsigned int max_objects; 4740 unsigned int min_order; 4741 4742 min_objects = slub_min_objects; 4743 if (!min_objects) { 4744 /* 4745 * Some architectures will only update present cpus when 4746 * onlining them, so don't trust the number if it's just 1. But 4747 * we also don't want to use nr_cpu_ids always, as on some other 4748 * architectures, there can be many possible cpus, but never 4749 * onlined. Here we compromise between trying to avoid too high 4750 * order on systems that appear larger than they are, and too 4751 * low order on systems that appear smaller than they are. 4752 */ 4753 unsigned int nr_cpus = num_present_cpus(); 4754 if (nr_cpus <= 1) 4755 nr_cpus = nr_cpu_ids; 4756 min_objects = 4 * (fls(nr_cpus) + 1); 4757 } 4758 /* min_objects can't be 0 because get_order(0) is undefined */ 4759 max_objects = max(order_objects(slub_max_order, size), 1U); 4760 min_objects = min(min_objects, max_objects); 4761 4762 min_order = max_t(unsigned int, slub_min_order, 4763 get_order(min_objects * size)); 4764 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 4765 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 4766 4767 /* 4768 * Attempt to find best configuration for a slab. This works by first 4769 * attempting to generate a layout with the best possible configuration 4770 * and backing off gradually. 4771 * 4772 * We start with accepting at most 1/16 waste and try to find the 4773 * smallest order from min_objects-derived/slub_min_order up to 4774 * slub_max_order that will satisfy the constraint. Note that increasing 4775 * the order can only result in same or less fractional waste, not more. 4776 * 4777 * If that fails, we increase the acceptable fraction of waste and try 4778 * again. The last iteration with fraction of 1/2 would effectively 4779 * accept any waste and give us the order determined by min_objects, as 4780 * long as at least single object fits within slub_max_order. 4781 */ 4782 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { 4783 order = calc_slab_order(size, min_order, slub_max_order, 4784 fraction); 4785 if (order <= slub_max_order) 4786 return order; 4787 } 4788 4789 /* 4790 * Doh this slab cannot be placed using slub_max_order. 4791 */ 4792 order = get_order(size); 4793 if (order <= MAX_PAGE_ORDER) 4794 return order; 4795 return -ENOSYS; 4796 } 4797 4798 static void 4799 init_kmem_cache_node(struct kmem_cache_node *n) 4800 { 4801 n->nr_partial = 0; 4802 spin_lock_init(&n->list_lock); 4803 INIT_LIST_HEAD(&n->partial); 4804 #ifdef CONFIG_SLUB_DEBUG 4805 atomic_long_set(&n->nr_slabs, 0); 4806 atomic_long_set(&n->total_objects, 0); 4807 INIT_LIST_HEAD(&n->full); 4808 #endif 4809 } 4810 4811 #ifndef CONFIG_SLUB_TINY 4812 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 4813 { 4814 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 4815 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * 4816 sizeof(struct kmem_cache_cpu)); 4817 4818 /* 4819 * Must align to double word boundary for the double cmpxchg 4820 * instructions to work; see __pcpu_double_call_return_bool(). 4821 */ 4822 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 4823 2 * sizeof(void *)); 4824 4825 if (!s->cpu_slab) 4826 return 0; 4827 4828 init_kmem_cache_cpus(s); 4829 4830 return 1; 4831 } 4832 #else 4833 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 4834 { 4835 return 1; 4836 } 4837 #endif /* CONFIG_SLUB_TINY */ 4838 4839 static struct kmem_cache *kmem_cache_node; 4840 4841 /* 4842 * No kmalloc_node yet so do it by hand. We know that this is the first 4843 * slab on the node for this slabcache. There are no concurrent accesses 4844 * possible. 4845 * 4846 * Note that this function only works on the kmem_cache_node 4847 * when allocating for the kmem_cache_node. This is used for bootstrapping 4848 * memory on a fresh node that has no slab structures yet. 4849 */ 4850 static void early_kmem_cache_node_alloc(int node) 4851 { 4852 struct slab *slab; 4853 struct kmem_cache_node *n; 4854 4855 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 4856 4857 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); 4858 4859 BUG_ON(!slab); 4860 inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects); 4861 if (slab_nid(slab) != node) { 4862 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 4863 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 4864 } 4865 4866 n = slab->freelist; 4867 BUG_ON(!n); 4868 #ifdef CONFIG_SLUB_DEBUG 4869 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 4870 init_tracking(kmem_cache_node, n); 4871 #endif 4872 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 4873 slab->freelist = get_freepointer(kmem_cache_node, n); 4874 slab->inuse = 1; 4875 kmem_cache_node->node[node] = n; 4876 init_kmem_cache_node(n); 4877 inc_slabs_node(kmem_cache_node, node, slab->objects); 4878 4879 /* 4880 * No locks need to be taken here as it has just been 4881 * initialized and there is no concurrent access. 4882 */ 4883 __add_partial(n, slab, DEACTIVATE_TO_HEAD); 4884 } 4885 4886 static void free_kmem_cache_nodes(struct kmem_cache *s) 4887 { 4888 int node; 4889 struct kmem_cache_node *n; 4890 4891 for_each_kmem_cache_node(s, node, n) { 4892 s->node[node] = NULL; 4893 kmem_cache_free(kmem_cache_node, n); 4894 } 4895 } 4896 4897 void __kmem_cache_release(struct kmem_cache *s) 4898 { 4899 cache_random_seq_destroy(s); 4900 #ifndef CONFIG_SLUB_TINY 4901 free_percpu(s->cpu_slab); 4902 #endif 4903 free_kmem_cache_nodes(s); 4904 } 4905 4906 static int init_kmem_cache_nodes(struct kmem_cache *s) 4907 { 4908 int node; 4909 4910 for_each_node_mask(node, slab_nodes) { 4911 struct kmem_cache_node *n; 4912 4913 if (slab_state == DOWN) { 4914 early_kmem_cache_node_alloc(node); 4915 continue; 4916 } 4917 n = kmem_cache_alloc_node(kmem_cache_node, 4918 GFP_KERNEL, node); 4919 4920 if (!n) { 4921 free_kmem_cache_nodes(s); 4922 return 0; 4923 } 4924 4925 init_kmem_cache_node(n); 4926 s->node[node] = n; 4927 } 4928 return 1; 4929 } 4930 4931 static void set_cpu_partial(struct kmem_cache *s) 4932 { 4933 #ifdef CONFIG_SLUB_CPU_PARTIAL 4934 unsigned int nr_objects; 4935 4936 /* 4937 * cpu_partial determined the maximum number of objects kept in the 4938 * per cpu partial lists of a processor. 4939 * 4940 * Per cpu partial lists mainly contain slabs that just have one 4941 * object freed. If they are used for allocation then they can be 4942 * filled up again with minimal effort. The slab will never hit the 4943 * per node partial lists and therefore no locking will be required. 4944 * 4945 * For backwards compatibility reasons, this is determined as number 4946 * of objects, even though we now limit maximum number of pages, see 4947 * slub_set_cpu_partial() 4948 */ 4949 if (!kmem_cache_has_cpu_partial(s)) 4950 nr_objects = 0; 4951 else if (s->size >= PAGE_SIZE) 4952 nr_objects = 6; 4953 else if (s->size >= 1024) 4954 nr_objects = 24; 4955 else if (s->size >= 256) 4956 nr_objects = 52; 4957 else 4958 nr_objects = 120; 4959 4960 slub_set_cpu_partial(s, nr_objects); 4961 #endif 4962 } 4963 4964 /* 4965 * calculate_sizes() determines the order and the distribution of data within 4966 * a slab object. 4967 */ 4968 static int calculate_sizes(struct kmem_cache *s) 4969 { 4970 slab_flags_t flags = s->flags; 4971 unsigned int size = s->object_size; 4972 unsigned int order; 4973 4974 /* 4975 * Round up object size to the next word boundary. We can only 4976 * place the free pointer at word boundaries and this determines 4977 * the possible location of the free pointer. 4978 */ 4979 size = ALIGN(size, sizeof(void *)); 4980 4981 #ifdef CONFIG_SLUB_DEBUG 4982 /* 4983 * Determine if we can poison the object itself. If the user of 4984 * the slab may touch the object after free or before allocation 4985 * then we should never poison the object itself. 4986 */ 4987 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 4988 !s->ctor) 4989 s->flags |= __OBJECT_POISON; 4990 else 4991 s->flags &= ~__OBJECT_POISON; 4992 4993 4994 /* 4995 * If we are Redzoning then check if there is some space between the 4996 * end of the object and the free pointer. If not then add an 4997 * additional word to have some bytes to store Redzone information. 4998 */ 4999 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 5000 size += sizeof(void *); 5001 #endif 5002 5003 /* 5004 * With that we have determined the number of bytes in actual use 5005 * by the object and redzoning. 5006 */ 5007 s->inuse = size; 5008 5009 if (slub_debug_orig_size(s) || 5010 (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 5011 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || 5012 s->ctor) { 5013 /* 5014 * Relocate free pointer after the object if it is not 5015 * permitted to overwrite the first word of the object on 5016 * kmem_cache_free. 5017 * 5018 * This is the case if we do RCU, have a constructor or 5019 * destructor, are poisoning the objects, or are 5020 * redzoning an object smaller than sizeof(void *). 5021 * 5022 * The assumption that s->offset >= s->inuse means free 5023 * pointer is outside of the object is used in the 5024 * freeptr_outside_object() function. If that is no 5025 * longer true, the function needs to be modified. 5026 */ 5027 s->offset = size; 5028 size += sizeof(void *); 5029 } else { 5030 /* 5031 * Store freelist pointer near middle of object to keep 5032 * it away from the edges of the object to avoid small 5033 * sized over/underflows from neighboring allocations. 5034 */ 5035 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 5036 } 5037 5038 #ifdef CONFIG_SLUB_DEBUG 5039 if (flags & SLAB_STORE_USER) { 5040 /* 5041 * Need to store information about allocs and frees after 5042 * the object. 5043 */ 5044 size += 2 * sizeof(struct track); 5045 5046 /* Save the original kmalloc request size */ 5047 if (flags & SLAB_KMALLOC) 5048 size += sizeof(unsigned int); 5049 } 5050 #endif 5051 5052 kasan_cache_create(s, &size, &s->flags); 5053 #ifdef CONFIG_SLUB_DEBUG 5054 if (flags & SLAB_RED_ZONE) { 5055 /* 5056 * Add some empty padding so that we can catch 5057 * overwrites from earlier objects rather than let 5058 * tracking information or the free pointer be 5059 * corrupted if a user writes before the start 5060 * of the object. 5061 */ 5062 size += sizeof(void *); 5063 5064 s->red_left_pad = sizeof(void *); 5065 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 5066 size += s->red_left_pad; 5067 } 5068 #endif 5069 5070 /* 5071 * SLUB stores one object immediately after another beginning from 5072 * offset 0. In order to align the objects we have to simply size 5073 * each object to conform to the alignment. 5074 */ 5075 size = ALIGN(size, s->align); 5076 s->size = size; 5077 s->reciprocal_size = reciprocal_value(size); 5078 order = calculate_order(size); 5079 5080 if ((int)order < 0) 5081 return 0; 5082 5083 s->allocflags = 0; 5084 if (order) 5085 s->allocflags |= __GFP_COMP; 5086 5087 if (s->flags & SLAB_CACHE_DMA) 5088 s->allocflags |= GFP_DMA; 5089 5090 if (s->flags & SLAB_CACHE_DMA32) 5091 s->allocflags |= GFP_DMA32; 5092 5093 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5094 s->allocflags |= __GFP_RECLAIMABLE; 5095 5096 /* 5097 * Determine the number of objects per slab 5098 */ 5099 s->oo = oo_make(order, size); 5100 s->min = oo_make(get_order(size), size); 5101 5102 return !!oo_objects(s->oo); 5103 } 5104 5105 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 5106 { 5107 s->flags = kmem_cache_flags(s->size, flags, s->name); 5108 #ifdef CONFIG_SLAB_FREELIST_HARDENED 5109 s->random = get_random_long(); 5110 #endif 5111 5112 if (!calculate_sizes(s)) 5113 goto error; 5114 if (disable_higher_order_debug) { 5115 /* 5116 * Disable debugging flags that store metadata if the min slab 5117 * order increased. 5118 */ 5119 if (get_order(s->size) > get_order(s->object_size)) { 5120 s->flags &= ~DEBUG_METADATA_FLAGS; 5121 s->offset = 0; 5122 if (!calculate_sizes(s)) 5123 goto error; 5124 } 5125 } 5126 5127 #ifdef system_has_freelist_aba 5128 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { 5129 /* Enable fast mode */ 5130 s->flags |= __CMPXCHG_DOUBLE; 5131 } 5132 #endif 5133 5134 /* 5135 * The larger the object size is, the more slabs we want on the partial 5136 * list to avoid pounding the page allocator excessively. 5137 */ 5138 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); 5139 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); 5140 5141 set_cpu_partial(s); 5142 5143 #ifdef CONFIG_NUMA 5144 s->remote_node_defrag_ratio = 1000; 5145 #endif 5146 5147 /* Initialize the pre-computed randomized freelist if slab is up */ 5148 if (slab_state >= UP) { 5149 if (init_cache_random_seq(s)) 5150 goto error; 5151 } 5152 5153 if (!init_kmem_cache_nodes(s)) 5154 goto error; 5155 5156 if (alloc_kmem_cache_cpus(s)) 5157 return 0; 5158 5159 error: 5160 __kmem_cache_release(s); 5161 return -EINVAL; 5162 } 5163 5164 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, 5165 const char *text) 5166 { 5167 #ifdef CONFIG_SLUB_DEBUG 5168 void *addr = slab_address(slab); 5169 void *p; 5170 5171 slab_err(s, slab, text, s->name); 5172 5173 spin_lock(&object_map_lock); 5174 __fill_map(object_map, s, slab); 5175 5176 for_each_object(p, s, addr, slab->objects) { 5177 5178 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { 5179 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 5180 print_tracking(s, p); 5181 } 5182 } 5183 spin_unlock(&object_map_lock); 5184 #endif 5185 } 5186 5187 /* 5188 * Attempt to free all partial slabs on a node. 5189 * This is called from __kmem_cache_shutdown(). We must take list_lock 5190 * because sysfs file might still access partial list after the shutdowning. 5191 */ 5192 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 5193 { 5194 LIST_HEAD(discard); 5195 struct slab *slab, *h; 5196 5197 BUG_ON(irqs_disabled()); 5198 spin_lock_irq(&n->list_lock); 5199 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { 5200 if (!slab->inuse) { 5201 remove_partial(n, slab); 5202 list_add(&slab->slab_list, &discard); 5203 } else { 5204 list_slab_objects(s, slab, 5205 "Objects remaining in %s on __kmem_cache_shutdown()"); 5206 } 5207 } 5208 spin_unlock_irq(&n->list_lock); 5209 5210 list_for_each_entry_safe(slab, h, &discard, slab_list) 5211 discard_slab(s, slab); 5212 } 5213 5214 bool __kmem_cache_empty(struct kmem_cache *s) 5215 { 5216 int node; 5217 struct kmem_cache_node *n; 5218 5219 for_each_kmem_cache_node(s, node, n) 5220 if (n->nr_partial || node_nr_slabs(n)) 5221 return false; 5222 return true; 5223 } 5224 5225 /* 5226 * Release all resources used by a slab cache. 5227 */ 5228 int __kmem_cache_shutdown(struct kmem_cache *s) 5229 { 5230 int node; 5231 struct kmem_cache_node *n; 5232 5233 flush_all_cpus_locked(s); 5234 /* Attempt to free all objects */ 5235 for_each_kmem_cache_node(s, node, n) { 5236 free_partial(s, n); 5237 if (n->nr_partial || node_nr_slabs(n)) 5238 return 1; 5239 } 5240 return 0; 5241 } 5242 5243 #ifdef CONFIG_PRINTK 5244 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 5245 { 5246 void *base; 5247 int __maybe_unused i; 5248 unsigned int objnr; 5249 void *objp; 5250 void *objp0; 5251 struct kmem_cache *s = slab->slab_cache; 5252 struct track __maybe_unused *trackp; 5253 5254 kpp->kp_ptr = object; 5255 kpp->kp_slab = slab; 5256 kpp->kp_slab_cache = s; 5257 base = slab_address(slab); 5258 objp0 = kasan_reset_tag(object); 5259 #ifdef CONFIG_SLUB_DEBUG 5260 objp = restore_red_left(s, objp0); 5261 #else 5262 objp = objp0; 5263 #endif 5264 objnr = obj_to_index(s, slab, objp); 5265 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 5266 objp = base + s->size * objnr; 5267 kpp->kp_objp = objp; 5268 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size 5269 || (objp - base) % s->size) || 5270 !(s->flags & SLAB_STORE_USER)) 5271 return; 5272 #ifdef CONFIG_SLUB_DEBUG 5273 objp = fixup_red_left(s, objp); 5274 trackp = get_track(s, objp, TRACK_ALLOC); 5275 kpp->kp_ret = (void *)trackp->addr; 5276 #ifdef CONFIG_STACKDEPOT 5277 { 5278 depot_stack_handle_t handle; 5279 unsigned long *entries; 5280 unsigned int nr_entries; 5281 5282 handle = READ_ONCE(trackp->handle); 5283 if (handle) { 5284 nr_entries = stack_depot_fetch(handle, &entries); 5285 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5286 kpp->kp_stack[i] = (void *)entries[i]; 5287 } 5288 5289 trackp = get_track(s, objp, TRACK_FREE); 5290 handle = READ_ONCE(trackp->handle); 5291 if (handle) { 5292 nr_entries = stack_depot_fetch(handle, &entries); 5293 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5294 kpp->kp_free_stack[i] = (void *)entries[i]; 5295 } 5296 } 5297 #endif 5298 #endif 5299 } 5300 #endif 5301 5302 /******************************************************************** 5303 * Kmalloc subsystem 5304 *******************************************************************/ 5305 5306 static int __init setup_slub_min_order(char *str) 5307 { 5308 get_option(&str, (int *)&slub_min_order); 5309 5310 if (slub_min_order > slub_max_order) 5311 slub_max_order = slub_min_order; 5312 5313 return 1; 5314 } 5315 5316 __setup("slub_min_order=", setup_slub_min_order); 5317 5318 static int __init setup_slub_max_order(char *str) 5319 { 5320 get_option(&str, (int *)&slub_max_order); 5321 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER); 5322 5323 if (slub_min_order > slub_max_order) 5324 slub_min_order = slub_max_order; 5325 5326 return 1; 5327 } 5328 5329 __setup("slub_max_order=", setup_slub_max_order); 5330 5331 static int __init setup_slub_min_objects(char *str) 5332 { 5333 get_option(&str, (int *)&slub_min_objects); 5334 5335 return 1; 5336 } 5337 5338 __setup("slub_min_objects=", setup_slub_min_objects); 5339 5340 #ifdef CONFIG_HARDENED_USERCOPY 5341 /* 5342 * Rejects incorrectly sized objects and objects that are to be copied 5343 * to/from userspace but do not fall entirely within the containing slab 5344 * cache's usercopy region. 5345 * 5346 * Returns NULL if check passes, otherwise const char * to name of cache 5347 * to indicate an error. 5348 */ 5349 void __check_heap_object(const void *ptr, unsigned long n, 5350 const struct slab *slab, bool to_user) 5351 { 5352 struct kmem_cache *s; 5353 unsigned int offset; 5354 bool is_kfence = is_kfence_address(ptr); 5355 5356 ptr = kasan_reset_tag(ptr); 5357 5358 /* Find object and usable object size. */ 5359 s = slab->slab_cache; 5360 5361 /* Reject impossible pointers. */ 5362 if (ptr < slab_address(slab)) 5363 usercopy_abort("SLUB object not in SLUB page?!", NULL, 5364 to_user, 0, n); 5365 5366 /* Find offset within object. */ 5367 if (is_kfence) 5368 offset = ptr - kfence_object_start(ptr); 5369 else 5370 offset = (ptr - slab_address(slab)) % s->size; 5371 5372 /* Adjust for redzone and reject if within the redzone. */ 5373 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 5374 if (offset < s->red_left_pad) 5375 usercopy_abort("SLUB object in left red zone", 5376 s->name, to_user, offset, n); 5377 offset -= s->red_left_pad; 5378 } 5379 5380 /* Allow address range falling entirely within usercopy region. */ 5381 if (offset >= s->useroffset && 5382 offset - s->useroffset <= s->usersize && 5383 n <= s->useroffset - offset + s->usersize) 5384 return; 5385 5386 usercopy_abort("SLUB object", s->name, to_user, offset, n); 5387 } 5388 #endif /* CONFIG_HARDENED_USERCOPY */ 5389 5390 #define SHRINK_PROMOTE_MAX 32 5391 5392 /* 5393 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 5394 * up most to the head of the partial lists. New allocations will then 5395 * fill those up and thus they can be removed from the partial lists. 5396 * 5397 * The slabs with the least items are placed last. This results in them 5398 * being allocated from last increasing the chance that the last objects 5399 * are freed in them. 5400 */ 5401 static int __kmem_cache_do_shrink(struct kmem_cache *s) 5402 { 5403 int node; 5404 int i; 5405 struct kmem_cache_node *n; 5406 struct slab *slab; 5407 struct slab *t; 5408 struct list_head discard; 5409 struct list_head promote[SHRINK_PROMOTE_MAX]; 5410 unsigned long flags; 5411 int ret = 0; 5412 5413 for_each_kmem_cache_node(s, node, n) { 5414 INIT_LIST_HEAD(&discard); 5415 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 5416 INIT_LIST_HEAD(promote + i); 5417 5418 spin_lock_irqsave(&n->list_lock, flags); 5419 5420 /* 5421 * Build lists of slabs to discard or promote. 5422 * 5423 * Note that concurrent frees may occur while we hold the 5424 * list_lock. slab->inuse here is the upper limit. 5425 */ 5426 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { 5427 int free = slab->objects - slab->inuse; 5428 5429 /* Do not reread slab->inuse */ 5430 barrier(); 5431 5432 /* We do not keep full slabs on the list */ 5433 BUG_ON(free <= 0); 5434 5435 if (free == slab->objects) { 5436 list_move(&slab->slab_list, &discard); 5437 slab_clear_node_partial(slab); 5438 n->nr_partial--; 5439 dec_slabs_node(s, node, slab->objects); 5440 } else if (free <= SHRINK_PROMOTE_MAX) 5441 list_move(&slab->slab_list, promote + free - 1); 5442 } 5443 5444 /* 5445 * Promote the slabs filled up most to the head of the 5446 * partial list. 5447 */ 5448 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 5449 list_splice(promote + i, &n->partial); 5450 5451 spin_unlock_irqrestore(&n->list_lock, flags); 5452 5453 /* Release empty slabs */ 5454 list_for_each_entry_safe(slab, t, &discard, slab_list) 5455 free_slab(s, slab); 5456 5457 if (node_nr_slabs(n)) 5458 ret = 1; 5459 } 5460 5461 return ret; 5462 } 5463 5464 int __kmem_cache_shrink(struct kmem_cache *s) 5465 { 5466 flush_all(s); 5467 return __kmem_cache_do_shrink(s); 5468 } 5469 5470 static int slab_mem_going_offline_callback(void *arg) 5471 { 5472 struct kmem_cache *s; 5473 5474 mutex_lock(&slab_mutex); 5475 list_for_each_entry(s, &slab_caches, list) { 5476 flush_all_cpus_locked(s); 5477 __kmem_cache_do_shrink(s); 5478 } 5479 mutex_unlock(&slab_mutex); 5480 5481 return 0; 5482 } 5483 5484 static void slab_mem_offline_callback(void *arg) 5485 { 5486 struct memory_notify *marg = arg; 5487 int offline_node; 5488 5489 offline_node = marg->status_change_nid_normal; 5490 5491 /* 5492 * If the node still has available memory. we need kmem_cache_node 5493 * for it yet. 5494 */ 5495 if (offline_node < 0) 5496 return; 5497 5498 mutex_lock(&slab_mutex); 5499 node_clear(offline_node, slab_nodes); 5500 /* 5501 * We no longer free kmem_cache_node structures here, as it would be 5502 * racy with all get_node() users, and infeasible to protect them with 5503 * slab_mutex. 5504 */ 5505 mutex_unlock(&slab_mutex); 5506 } 5507 5508 static int slab_mem_going_online_callback(void *arg) 5509 { 5510 struct kmem_cache_node *n; 5511 struct kmem_cache *s; 5512 struct memory_notify *marg = arg; 5513 int nid = marg->status_change_nid_normal; 5514 int ret = 0; 5515 5516 /* 5517 * If the node's memory is already available, then kmem_cache_node is 5518 * already created. Nothing to do. 5519 */ 5520 if (nid < 0) 5521 return 0; 5522 5523 /* 5524 * We are bringing a node online. No memory is available yet. We must 5525 * allocate a kmem_cache_node structure in order to bring the node 5526 * online. 5527 */ 5528 mutex_lock(&slab_mutex); 5529 list_for_each_entry(s, &slab_caches, list) { 5530 /* 5531 * The structure may already exist if the node was previously 5532 * onlined and offlined. 5533 */ 5534 if (get_node(s, nid)) 5535 continue; 5536 /* 5537 * XXX: kmem_cache_alloc_node will fallback to other nodes 5538 * since memory is not yet available from the node that 5539 * is brought up. 5540 */ 5541 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 5542 if (!n) { 5543 ret = -ENOMEM; 5544 goto out; 5545 } 5546 init_kmem_cache_node(n); 5547 s->node[nid] = n; 5548 } 5549 /* 5550 * Any cache created after this point will also have kmem_cache_node 5551 * initialized for the new node. 5552 */ 5553 node_set(nid, slab_nodes); 5554 out: 5555 mutex_unlock(&slab_mutex); 5556 return ret; 5557 } 5558 5559 static int slab_memory_callback(struct notifier_block *self, 5560 unsigned long action, void *arg) 5561 { 5562 int ret = 0; 5563 5564 switch (action) { 5565 case MEM_GOING_ONLINE: 5566 ret = slab_mem_going_online_callback(arg); 5567 break; 5568 case MEM_GOING_OFFLINE: 5569 ret = slab_mem_going_offline_callback(arg); 5570 break; 5571 case MEM_OFFLINE: 5572 case MEM_CANCEL_ONLINE: 5573 slab_mem_offline_callback(arg); 5574 break; 5575 case MEM_ONLINE: 5576 case MEM_CANCEL_OFFLINE: 5577 break; 5578 } 5579 if (ret) 5580 ret = notifier_from_errno(ret); 5581 else 5582 ret = NOTIFY_OK; 5583 return ret; 5584 } 5585 5586 /******************************************************************** 5587 * Basic setup of slabs 5588 *******************************************************************/ 5589 5590 /* 5591 * Used for early kmem_cache structures that were allocated using 5592 * the page allocator. Allocate them properly then fix up the pointers 5593 * that may be pointing to the wrong kmem_cache structure. 5594 */ 5595 5596 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 5597 { 5598 int node; 5599 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 5600 struct kmem_cache_node *n; 5601 5602 memcpy(s, static_cache, kmem_cache->object_size); 5603 5604 /* 5605 * This runs very early, and only the boot processor is supposed to be 5606 * up. Even if it weren't true, IRQs are not up so we couldn't fire 5607 * IPIs around. 5608 */ 5609 __flush_cpu_slab(s, smp_processor_id()); 5610 for_each_kmem_cache_node(s, node, n) { 5611 struct slab *p; 5612 5613 list_for_each_entry(p, &n->partial, slab_list) 5614 p->slab_cache = s; 5615 5616 #ifdef CONFIG_SLUB_DEBUG 5617 list_for_each_entry(p, &n->full, slab_list) 5618 p->slab_cache = s; 5619 #endif 5620 } 5621 list_add(&s->list, &slab_caches); 5622 return s; 5623 } 5624 5625 void __init kmem_cache_init(void) 5626 { 5627 static __initdata struct kmem_cache boot_kmem_cache, 5628 boot_kmem_cache_node; 5629 int node; 5630 5631 if (debug_guardpage_minorder()) 5632 slub_max_order = 0; 5633 5634 /* Print slub debugging pointers without hashing */ 5635 if (__slub_debug_enabled()) 5636 no_hash_pointers_enable(NULL); 5637 5638 kmem_cache_node = &boot_kmem_cache_node; 5639 kmem_cache = &boot_kmem_cache; 5640 5641 /* 5642 * Initialize the nodemask for which we will allocate per node 5643 * structures. Here we don't need taking slab_mutex yet. 5644 */ 5645 for_each_node_state(node, N_NORMAL_MEMORY) 5646 node_set(node, slab_nodes); 5647 5648 create_boot_cache(kmem_cache_node, "kmem_cache_node", 5649 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); 5650 5651 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 5652 5653 /* Able to allocate the per node structures */ 5654 slab_state = PARTIAL; 5655 5656 create_boot_cache(kmem_cache, "kmem_cache", 5657 offsetof(struct kmem_cache, node) + 5658 nr_node_ids * sizeof(struct kmem_cache_node *), 5659 SLAB_HWCACHE_ALIGN, 0, 0); 5660 5661 kmem_cache = bootstrap(&boot_kmem_cache); 5662 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 5663 5664 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 5665 setup_kmalloc_cache_index_table(); 5666 create_kmalloc_caches(0); 5667 5668 /* Setup random freelists for each cache */ 5669 init_freelist_randomization(); 5670 5671 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 5672 slub_cpu_dead); 5673 5674 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 5675 cache_line_size(), 5676 slub_min_order, slub_max_order, slub_min_objects, 5677 nr_cpu_ids, nr_node_ids); 5678 } 5679 5680 void __init kmem_cache_init_late(void) 5681 { 5682 #ifndef CONFIG_SLUB_TINY 5683 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); 5684 WARN_ON(!flushwq); 5685 #endif 5686 } 5687 5688 struct kmem_cache * 5689 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 5690 slab_flags_t flags, void (*ctor)(void *)) 5691 { 5692 struct kmem_cache *s; 5693 5694 s = find_mergeable(size, align, flags, name, ctor); 5695 if (s) { 5696 if (sysfs_slab_alias(s, name)) 5697 return NULL; 5698 5699 s->refcount++; 5700 5701 /* 5702 * Adjust the object sizes so that we clear 5703 * the complete object on kzalloc. 5704 */ 5705 s->object_size = max(s->object_size, size); 5706 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 5707 } 5708 5709 return s; 5710 } 5711 5712 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 5713 { 5714 int err; 5715 5716 err = kmem_cache_open(s, flags); 5717 if (err) 5718 return err; 5719 5720 /* Mutex is not taken during early boot */ 5721 if (slab_state <= UP) 5722 return 0; 5723 5724 err = sysfs_slab_add(s); 5725 if (err) { 5726 __kmem_cache_release(s); 5727 return err; 5728 } 5729 5730 if (s->flags & SLAB_STORE_USER) 5731 debugfs_slab_add(s); 5732 5733 return 0; 5734 } 5735 5736 #ifdef SLAB_SUPPORTS_SYSFS 5737 static int count_inuse(struct slab *slab) 5738 { 5739 return slab->inuse; 5740 } 5741 5742 static int count_total(struct slab *slab) 5743 { 5744 return slab->objects; 5745 } 5746 #endif 5747 5748 #ifdef CONFIG_SLUB_DEBUG 5749 static void validate_slab(struct kmem_cache *s, struct slab *slab, 5750 unsigned long *obj_map) 5751 { 5752 void *p; 5753 void *addr = slab_address(slab); 5754 5755 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) 5756 return; 5757 5758 /* Now we know that a valid freelist exists */ 5759 __fill_map(obj_map, s, slab); 5760 for_each_object(p, s, addr, slab->objects) { 5761 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 5762 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 5763 5764 if (!check_object(s, slab, p, val)) 5765 break; 5766 } 5767 } 5768 5769 static int validate_slab_node(struct kmem_cache *s, 5770 struct kmem_cache_node *n, unsigned long *obj_map) 5771 { 5772 unsigned long count = 0; 5773 struct slab *slab; 5774 unsigned long flags; 5775 5776 spin_lock_irqsave(&n->list_lock, flags); 5777 5778 list_for_each_entry(slab, &n->partial, slab_list) { 5779 validate_slab(s, slab, obj_map); 5780 count++; 5781 } 5782 if (count != n->nr_partial) { 5783 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 5784 s->name, count, n->nr_partial); 5785 slab_add_kunit_errors(); 5786 } 5787 5788 if (!(s->flags & SLAB_STORE_USER)) 5789 goto out; 5790 5791 list_for_each_entry(slab, &n->full, slab_list) { 5792 validate_slab(s, slab, obj_map); 5793 count++; 5794 } 5795 if (count != node_nr_slabs(n)) { 5796 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 5797 s->name, count, node_nr_slabs(n)); 5798 slab_add_kunit_errors(); 5799 } 5800 5801 out: 5802 spin_unlock_irqrestore(&n->list_lock, flags); 5803 return count; 5804 } 5805 5806 long validate_slab_cache(struct kmem_cache *s) 5807 { 5808 int node; 5809 unsigned long count = 0; 5810 struct kmem_cache_node *n; 5811 unsigned long *obj_map; 5812 5813 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 5814 if (!obj_map) 5815 return -ENOMEM; 5816 5817 flush_all(s); 5818 for_each_kmem_cache_node(s, node, n) 5819 count += validate_slab_node(s, n, obj_map); 5820 5821 bitmap_free(obj_map); 5822 5823 return count; 5824 } 5825 EXPORT_SYMBOL(validate_slab_cache); 5826 5827 #ifdef CONFIG_DEBUG_FS 5828 /* 5829 * Generate lists of code addresses where slabcache objects are allocated 5830 * and freed. 5831 */ 5832 5833 struct location { 5834 depot_stack_handle_t handle; 5835 unsigned long count; 5836 unsigned long addr; 5837 unsigned long waste; 5838 long long sum_time; 5839 long min_time; 5840 long max_time; 5841 long min_pid; 5842 long max_pid; 5843 DECLARE_BITMAP(cpus, NR_CPUS); 5844 nodemask_t nodes; 5845 }; 5846 5847 struct loc_track { 5848 unsigned long max; 5849 unsigned long count; 5850 struct location *loc; 5851 loff_t idx; 5852 }; 5853 5854 static struct dentry *slab_debugfs_root; 5855 5856 static void free_loc_track(struct loc_track *t) 5857 { 5858 if (t->max) 5859 free_pages((unsigned long)t->loc, 5860 get_order(sizeof(struct location) * t->max)); 5861 } 5862 5863 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 5864 { 5865 struct location *l; 5866 int order; 5867 5868 order = get_order(sizeof(struct location) * max); 5869 5870 l = (void *)__get_free_pages(flags, order); 5871 if (!l) 5872 return 0; 5873 5874 if (t->count) { 5875 memcpy(l, t->loc, sizeof(struct location) * t->count); 5876 free_loc_track(t); 5877 } 5878 t->max = max; 5879 t->loc = l; 5880 return 1; 5881 } 5882 5883 static int add_location(struct loc_track *t, struct kmem_cache *s, 5884 const struct track *track, 5885 unsigned int orig_size) 5886 { 5887 long start, end, pos; 5888 struct location *l; 5889 unsigned long caddr, chandle, cwaste; 5890 unsigned long age = jiffies - track->when; 5891 depot_stack_handle_t handle = 0; 5892 unsigned int waste = s->object_size - orig_size; 5893 5894 #ifdef CONFIG_STACKDEPOT 5895 handle = READ_ONCE(track->handle); 5896 #endif 5897 start = -1; 5898 end = t->count; 5899 5900 for ( ; ; ) { 5901 pos = start + (end - start + 1) / 2; 5902 5903 /* 5904 * There is nothing at "end". If we end up there 5905 * we need to add something to before end. 5906 */ 5907 if (pos == end) 5908 break; 5909 5910 l = &t->loc[pos]; 5911 caddr = l->addr; 5912 chandle = l->handle; 5913 cwaste = l->waste; 5914 if ((track->addr == caddr) && (handle == chandle) && 5915 (waste == cwaste)) { 5916 5917 l->count++; 5918 if (track->when) { 5919 l->sum_time += age; 5920 if (age < l->min_time) 5921 l->min_time = age; 5922 if (age > l->max_time) 5923 l->max_time = age; 5924 5925 if (track->pid < l->min_pid) 5926 l->min_pid = track->pid; 5927 if (track->pid > l->max_pid) 5928 l->max_pid = track->pid; 5929 5930 cpumask_set_cpu(track->cpu, 5931 to_cpumask(l->cpus)); 5932 } 5933 node_set(page_to_nid(virt_to_page(track)), l->nodes); 5934 return 1; 5935 } 5936 5937 if (track->addr < caddr) 5938 end = pos; 5939 else if (track->addr == caddr && handle < chandle) 5940 end = pos; 5941 else if (track->addr == caddr && handle == chandle && 5942 waste < cwaste) 5943 end = pos; 5944 else 5945 start = pos; 5946 } 5947 5948 /* 5949 * Not found. Insert new tracking element. 5950 */ 5951 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 5952 return 0; 5953 5954 l = t->loc + pos; 5955 if (pos < t->count) 5956 memmove(l + 1, l, 5957 (t->count - pos) * sizeof(struct location)); 5958 t->count++; 5959 l->count = 1; 5960 l->addr = track->addr; 5961 l->sum_time = age; 5962 l->min_time = age; 5963 l->max_time = age; 5964 l->min_pid = track->pid; 5965 l->max_pid = track->pid; 5966 l->handle = handle; 5967 l->waste = waste; 5968 cpumask_clear(to_cpumask(l->cpus)); 5969 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 5970 nodes_clear(l->nodes); 5971 node_set(page_to_nid(virt_to_page(track)), l->nodes); 5972 return 1; 5973 } 5974 5975 static void process_slab(struct loc_track *t, struct kmem_cache *s, 5976 struct slab *slab, enum track_item alloc, 5977 unsigned long *obj_map) 5978 { 5979 void *addr = slab_address(slab); 5980 bool is_alloc = (alloc == TRACK_ALLOC); 5981 void *p; 5982 5983 __fill_map(obj_map, s, slab); 5984 5985 for_each_object(p, s, addr, slab->objects) 5986 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 5987 add_location(t, s, get_track(s, p, alloc), 5988 is_alloc ? get_orig_size(s, p) : 5989 s->object_size); 5990 } 5991 #endif /* CONFIG_DEBUG_FS */ 5992 #endif /* CONFIG_SLUB_DEBUG */ 5993 5994 #ifdef SLAB_SUPPORTS_SYSFS 5995 enum slab_stat_type { 5996 SL_ALL, /* All slabs */ 5997 SL_PARTIAL, /* Only partially allocated slabs */ 5998 SL_CPU, /* Only slabs used for cpu caches */ 5999 SL_OBJECTS, /* Determine allocated objects not slabs */ 6000 SL_TOTAL /* Determine object capacity not slabs */ 6001 }; 6002 6003 #define SO_ALL (1 << SL_ALL) 6004 #define SO_PARTIAL (1 << SL_PARTIAL) 6005 #define SO_CPU (1 << SL_CPU) 6006 #define SO_OBJECTS (1 << SL_OBJECTS) 6007 #define SO_TOTAL (1 << SL_TOTAL) 6008 6009 static ssize_t show_slab_objects(struct kmem_cache *s, 6010 char *buf, unsigned long flags) 6011 { 6012 unsigned long total = 0; 6013 int node; 6014 int x; 6015 unsigned long *nodes; 6016 int len = 0; 6017 6018 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 6019 if (!nodes) 6020 return -ENOMEM; 6021 6022 if (flags & SO_CPU) { 6023 int cpu; 6024 6025 for_each_possible_cpu(cpu) { 6026 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 6027 cpu); 6028 int node; 6029 struct slab *slab; 6030 6031 slab = READ_ONCE(c->slab); 6032 if (!slab) 6033 continue; 6034 6035 node = slab_nid(slab); 6036 if (flags & SO_TOTAL) 6037 x = slab->objects; 6038 else if (flags & SO_OBJECTS) 6039 x = slab->inuse; 6040 else 6041 x = 1; 6042 6043 total += x; 6044 nodes[node] += x; 6045 6046 #ifdef CONFIG_SLUB_CPU_PARTIAL 6047 slab = slub_percpu_partial_read_once(c); 6048 if (slab) { 6049 node = slab_nid(slab); 6050 if (flags & SO_TOTAL) 6051 WARN_ON_ONCE(1); 6052 else if (flags & SO_OBJECTS) 6053 WARN_ON_ONCE(1); 6054 else 6055 x = slab->slabs; 6056 total += x; 6057 nodes[node] += x; 6058 } 6059 #endif 6060 } 6061 } 6062 6063 /* 6064 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 6065 * already held which will conflict with an existing lock order: 6066 * 6067 * mem_hotplug_lock->slab_mutex->kernfs_mutex 6068 * 6069 * We don't really need mem_hotplug_lock (to hold off 6070 * slab_mem_going_offline_callback) here because slab's memory hot 6071 * unplug code doesn't destroy the kmem_cache->node[] data. 6072 */ 6073 6074 #ifdef CONFIG_SLUB_DEBUG 6075 if (flags & SO_ALL) { 6076 struct kmem_cache_node *n; 6077 6078 for_each_kmem_cache_node(s, node, n) { 6079 6080 if (flags & SO_TOTAL) 6081 x = node_nr_objs(n); 6082 else if (flags & SO_OBJECTS) 6083 x = node_nr_objs(n) - count_partial(n, count_free); 6084 else 6085 x = node_nr_slabs(n); 6086 total += x; 6087 nodes[node] += x; 6088 } 6089 6090 } else 6091 #endif 6092 if (flags & SO_PARTIAL) { 6093 struct kmem_cache_node *n; 6094 6095 for_each_kmem_cache_node(s, node, n) { 6096 if (flags & SO_TOTAL) 6097 x = count_partial(n, count_total); 6098 else if (flags & SO_OBJECTS) 6099 x = count_partial(n, count_inuse); 6100 else 6101 x = n->nr_partial; 6102 total += x; 6103 nodes[node] += x; 6104 } 6105 } 6106 6107 len += sysfs_emit_at(buf, len, "%lu", total); 6108 #ifdef CONFIG_NUMA 6109 for (node = 0; node < nr_node_ids; node++) { 6110 if (nodes[node]) 6111 len += sysfs_emit_at(buf, len, " N%d=%lu", 6112 node, nodes[node]); 6113 } 6114 #endif 6115 len += sysfs_emit_at(buf, len, "\n"); 6116 kfree(nodes); 6117 6118 return len; 6119 } 6120 6121 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 6122 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 6123 6124 struct slab_attribute { 6125 struct attribute attr; 6126 ssize_t (*show)(struct kmem_cache *s, char *buf); 6127 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 6128 }; 6129 6130 #define SLAB_ATTR_RO(_name) \ 6131 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) 6132 6133 #define SLAB_ATTR(_name) \ 6134 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) 6135 6136 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 6137 { 6138 return sysfs_emit(buf, "%u\n", s->size); 6139 } 6140 SLAB_ATTR_RO(slab_size); 6141 6142 static ssize_t align_show(struct kmem_cache *s, char *buf) 6143 { 6144 return sysfs_emit(buf, "%u\n", s->align); 6145 } 6146 SLAB_ATTR_RO(align); 6147 6148 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 6149 { 6150 return sysfs_emit(buf, "%u\n", s->object_size); 6151 } 6152 SLAB_ATTR_RO(object_size); 6153 6154 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 6155 { 6156 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 6157 } 6158 SLAB_ATTR_RO(objs_per_slab); 6159 6160 static ssize_t order_show(struct kmem_cache *s, char *buf) 6161 { 6162 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 6163 } 6164 SLAB_ATTR_RO(order); 6165 6166 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 6167 { 6168 return sysfs_emit(buf, "%lu\n", s->min_partial); 6169 } 6170 6171 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 6172 size_t length) 6173 { 6174 unsigned long min; 6175 int err; 6176 6177 err = kstrtoul(buf, 10, &min); 6178 if (err) 6179 return err; 6180 6181 s->min_partial = min; 6182 return length; 6183 } 6184 SLAB_ATTR(min_partial); 6185 6186 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 6187 { 6188 unsigned int nr_partial = 0; 6189 #ifdef CONFIG_SLUB_CPU_PARTIAL 6190 nr_partial = s->cpu_partial; 6191 #endif 6192 6193 return sysfs_emit(buf, "%u\n", nr_partial); 6194 } 6195 6196 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 6197 size_t length) 6198 { 6199 unsigned int objects; 6200 int err; 6201 6202 err = kstrtouint(buf, 10, &objects); 6203 if (err) 6204 return err; 6205 if (objects && !kmem_cache_has_cpu_partial(s)) 6206 return -EINVAL; 6207 6208 slub_set_cpu_partial(s, objects); 6209 flush_all(s); 6210 return length; 6211 } 6212 SLAB_ATTR(cpu_partial); 6213 6214 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 6215 { 6216 if (!s->ctor) 6217 return 0; 6218 return sysfs_emit(buf, "%pS\n", s->ctor); 6219 } 6220 SLAB_ATTR_RO(ctor); 6221 6222 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 6223 { 6224 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 6225 } 6226 SLAB_ATTR_RO(aliases); 6227 6228 static ssize_t partial_show(struct kmem_cache *s, char *buf) 6229 { 6230 return show_slab_objects(s, buf, SO_PARTIAL); 6231 } 6232 SLAB_ATTR_RO(partial); 6233 6234 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 6235 { 6236 return show_slab_objects(s, buf, SO_CPU); 6237 } 6238 SLAB_ATTR_RO(cpu_slabs); 6239 6240 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 6241 { 6242 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 6243 } 6244 SLAB_ATTR_RO(objects_partial); 6245 6246 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 6247 { 6248 int objects = 0; 6249 int slabs = 0; 6250 int cpu __maybe_unused; 6251 int len = 0; 6252 6253 #ifdef CONFIG_SLUB_CPU_PARTIAL 6254 for_each_online_cpu(cpu) { 6255 struct slab *slab; 6256 6257 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6258 6259 if (slab) 6260 slabs += slab->slabs; 6261 } 6262 #endif 6263 6264 /* Approximate half-full slabs, see slub_set_cpu_partial() */ 6265 objects = (slabs * oo_objects(s->oo)) / 2; 6266 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); 6267 6268 #ifdef CONFIG_SLUB_CPU_PARTIAL 6269 for_each_online_cpu(cpu) { 6270 struct slab *slab; 6271 6272 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6273 if (slab) { 6274 slabs = READ_ONCE(slab->slabs); 6275 objects = (slabs * oo_objects(s->oo)) / 2; 6276 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 6277 cpu, objects, slabs); 6278 } 6279 } 6280 #endif 6281 len += sysfs_emit_at(buf, len, "\n"); 6282 6283 return len; 6284 } 6285 SLAB_ATTR_RO(slabs_cpu_partial); 6286 6287 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 6288 { 6289 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 6290 } 6291 SLAB_ATTR_RO(reclaim_account); 6292 6293 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 6294 { 6295 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 6296 } 6297 SLAB_ATTR_RO(hwcache_align); 6298 6299 #ifdef CONFIG_ZONE_DMA 6300 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 6301 { 6302 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 6303 } 6304 SLAB_ATTR_RO(cache_dma); 6305 #endif 6306 6307 #ifdef CONFIG_HARDENED_USERCOPY 6308 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 6309 { 6310 return sysfs_emit(buf, "%u\n", s->usersize); 6311 } 6312 SLAB_ATTR_RO(usersize); 6313 #endif 6314 6315 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 6316 { 6317 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 6318 } 6319 SLAB_ATTR_RO(destroy_by_rcu); 6320 6321 #ifdef CONFIG_SLUB_DEBUG 6322 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 6323 { 6324 return show_slab_objects(s, buf, SO_ALL); 6325 } 6326 SLAB_ATTR_RO(slabs); 6327 6328 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 6329 { 6330 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 6331 } 6332 SLAB_ATTR_RO(total_objects); 6333 6334 static ssize_t objects_show(struct kmem_cache *s, char *buf) 6335 { 6336 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 6337 } 6338 SLAB_ATTR_RO(objects); 6339 6340 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 6341 { 6342 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 6343 } 6344 SLAB_ATTR_RO(sanity_checks); 6345 6346 static ssize_t trace_show(struct kmem_cache *s, char *buf) 6347 { 6348 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 6349 } 6350 SLAB_ATTR_RO(trace); 6351 6352 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 6353 { 6354 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 6355 } 6356 6357 SLAB_ATTR_RO(red_zone); 6358 6359 static ssize_t poison_show(struct kmem_cache *s, char *buf) 6360 { 6361 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 6362 } 6363 6364 SLAB_ATTR_RO(poison); 6365 6366 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 6367 { 6368 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 6369 } 6370 6371 SLAB_ATTR_RO(store_user); 6372 6373 static ssize_t validate_show(struct kmem_cache *s, char *buf) 6374 { 6375 return 0; 6376 } 6377 6378 static ssize_t validate_store(struct kmem_cache *s, 6379 const char *buf, size_t length) 6380 { 6381 int ret = -EINVAL; 6382 6383 if (buf[0] == '1' && kmem_cache_debug(s)) { 6384 ret = validate_slab_cache(s); 6385 if (ret >= 0) 6386 ret = length; 6387 } 6388 return ret; 6389 } 6390 SLAB_ATTR(validate); 6391 6392 #endif /* CONFIG_SLUB_DEBUG */ 6393 6394 #ifdef CONFIG_FAILSLAB 6395 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 6396 { 6397 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 6398 } 6399 6400 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 6401 size_t length) 6402 { 6403 if (s->refcount > 1) 6404 return -EINVAL; 6405 6406 if (buf[0] == '1') 6407 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); 6408 else 6409 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); 6410 6411 return length; 6412 } 6413 SLAB_ATTR(failslab); 6414 #endif 6415 6416 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 6417 { 6418 return 0; 6419 } 6420 6421 static ssize_t shrink_store(struct kmem_cache *s, 6422 const char *buf, size_t length) 6423 { 6424 if (buf[0] == '1') 6425 kmem_cache_shrink(s); 6426 else 6427 return -EINVAL; 6428 return length; 6429 } 6430 SLAB_ATTR(shrink); 6431 6432 #ifdef CONFIG_NUMA 6433 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 6434 { 6435 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 6436 } 6437 6438 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 6439 const char *buf, size_t length) 6440 { 6441 unsigned int ratio; 6442 int err; 6443 6444 err = kstrtouint(buf, 10, &ratio); 6445 if (err) 6446 return err; 6447 if (ratio > 100) 6448 return -ERANGE; 6449 6450 s->remote_node_defrag_ratio = ratio * 10; 6451 6452 return length; 6453 } 6454 SLAB_ATTR(remote_node_defrag_ratio); 6455 #endif 6456 6457 #ifdef CONFIG_SLUB_STATS 6458 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 6459 { 6460 unsigned long sum = 0; 6461 int cpu; 6462 int len = 0; 6463 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 6464 6465 if (!data) 6466 return -ENOMEM; 6467 6468 for_each_online_cpu(cpu) { 6469 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 6470 6471 data[cpu] = x; 6472 sum += x; 6473 } 6474 6475 len += sysfs_emit_at(buf, len, "%lu", sum); 6476 6477 #ifdef CONFIG_SMP 6478 for_each_online_cpu(cpu) { 6479 if (data[cpu]) 6480 len += sysfs_emit_at(buf, len, " C%d=%u", 6481 cpu, data[cpu]); 6482 } 6483 #endif 6484 kfree(data); 6485 len += sysfs_emit_at(buf, len, "\n"); 6486 6487 return len; 6488 } 6489 6490 static void clear_stat(struct kmem_cache *s, enum stat_item si) 6491 { 6492 int cpu; 6493 6494 for_each_online_cpu(cpu) 6495 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 6496 } 6497 6498 #define STAT_ATTR(si, text) \ 6499 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 6500 { \ 6501 return show_stat(s, buf, si); \ 6502 } \ 6503 static ssize_t text##_store(struct kmem_cache *s, \ 6504 const char *buf, size_t length) \ 6505 { \ 6506 if (buf[0] != '0') \ 6507 return -EINVAL; \ 6508 clear_stat(s, si); \ 6509 return length; \ 6510 } \ 6511 SLAB_ATTR(text); \ 6512 6513 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 6514 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 6515 STAT_ATTR(FREE_FASTPATH, free_fastpath); 6516 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 6517 STAT_ATTR(FREE_FROZEN, free_frozen); 6518 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 6519 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 6520 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 6521 STAT_ATTR(ALLOC_SLAB, alloc_slab); 6522 STAT_ATTR(ALLOC_REFILL, alloc_refill); 6523 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 6524 STAT_ATTR(FREE_SLAB, free_slab); 6525 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 6526 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 6527 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 6528 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 6529 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 6530 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 6531 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 6532 STAT_ATTR(ORDER_FALLBACK, order_fallback); 6533 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 6534 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 6535 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 6536 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 6537 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 6538 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 6539 #endif /* CONFIG_SLUB_STATS */ 6540 6541 #ifdef CONFIG_KFENCE 6542 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) 6543 { 6544 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); 6545 } 6546 6547 static ssize_t skip_kfence_store(struct kmem_cache *s, 6548 const char *buf, size_t length) 6549 { 6550 int ret = length; 6551 6552 if (buf[0] == '0') 6553 s->flags &= ~SLAB_SKIP_KFENCE; 6554 else if (buf[0] == '1') 6555 s->flags |= SLAB_SKIP_KFENCE; 6556 else 6557 ret = -EINVAL; 6558 6559 return ret; 6560 } 6561 SLAB_ATTR(skip_kfence); 6562 #endif 6563 6564 static struct attribute *slab_attrs[] = { 6565 &slab_size_attr.attr, 6566 &object_size_attr.attr, 6567 &objs_per_slab_attr.attr, 6568 &order_attr.attr, 6569 &min_partial_attr.attr, 6570 &cpu_partial_attr.attr, 6571 &objects_partial_attr.attr, 6572 &partial_attr.attr, 6573 &cpu_slabs_attr.attr, 6574 &ctor_attr.attr, 6575 &aliases_attr.attr, 6576 &align_attr.attr, 6577 &hwcache_align_attr.attr, 6578 &reclaim_account_attr.attr, 6579 &destroy_by_rcu_attr.attr, 6580 &shrink_attr.attr, 6581 &slabs_cpu_partial_attr.attr, 6582 #ifdef CONFIG_SLUB_DEBUG 6583 &total_objects_attr.attr, 6584 &objects_attr.attr, 6585 &slabs_attr.attr, 6586 &sanity_checks_attr.attr, 6587 &trace_attr.attr, 6588 &red_zone_attr.attr, 6589 &poison_attr.attr, 6590 &store_user_attr.attr, 6591 &validate_attr.attr, 6592 #endif 6593 #ifdef CONFIG_ZONE_DMA 6594 &cache_dma_attr.attr, 6595 #endif 6596 #ifdef CONFIG_NUMA 6597 &remote_node_defrag_ratio_attr.attr, 6598 #endif 6599 #ifdef CONFIG_SLUB_STATS 6600 &alloc_fastpath_attr.attr, 6601 &alloc_slowpath_attr.attr, 6602 &free_fastpath_attr.attr, 6603 &free_slowpath_attr.attr, 6604 &free_frozen_attr.attr, 6605 &free_add_partial_attr.attr, 6606 &free_remove_partial_attr.attr, 6607 &alloc_from_partial_attr.attr, 6608 &alloc_slab_attr.attr, 6609 &alloc_refill_attr.attr, 6610 &alloc_node_mismatch_attr.attr, 6611 &free_slab_attr.attr, 6612 &cpuslab_flush_attr.attr, 6613 &deactivate_full_attr.attr, 6614 &deactivate_empty_attr.attr, 6615 &deactivate_to_head_attr.attr, 6616 &deactivate_to_tail_attr.attr, 6617 &deactivate_remote_frees_attr.attr, 6618 &deactivate_bypass_attr.attr, 6619 &order_fallback_attr.attr, 6620 &cmpxchg_double_fail_attr.attr, 6621 &cmpxchg_double_cpu_fail_attr.attr, 6622 &cpu_partial_alloc_attr.attr, 6623 &cpu_partial_free_attr.attr, 6624 &cpu_partial_node_attr.attr, 6625 &cpu_partial_drain_attr.attr, 6626 #endif 6627 #ifdef CONFIG_FAILSLAB 6628 &failslab_attr.attr, 6629 #endif 6630 #ifdef CONFIG_HARDENED_USERCOPY 6631 &usersize_attr.attr, 6632 #endif 6633 #ifdef CONFIG_KFENCE 6634 &skip_kfence_attr.attr, 6635 #endif 6636 6637 NULL 6638 }; 6639 6640 static const struct attribute_group slab_attr_group = { 6641 .attrs = slab_attrs, 6642 }; 6643 6644 static ssize_t slab_attr_show(struct kobject *kobj, 6645 struct attribute *attr, 6646 char *buf) 6647 { 6648 struct slab_attribute *attribute; 6649 struct kmem_cache *s; 6650 6651 attribute = to_slab_attr(attr); 6652 s = to_slab(kobj); 6653 6654 if (!attribute->show) 6655 return -EIO; 6656 6657 return attribute->show(s, buf); 6658 } 6659 6660 static ssize_t slab_attr_store(struct kobject *kobj, 6661 struct attribute *attr, 6662 const char *buf, size_t len) 6663 { 6664 struct slab_attribute *attribute; 6665 struct kmem_cache *s; 6666 6667 attribute = to_slab_attr(attr); 6668 s = to_slab(kobj); 6669 6670 if (!attribute->store) 6671 return -EIO; 6672 6673 return attribute->store(s, buf, len); 6674 } 6675 6676 static void kmem_cache_release(struct kobject *k) 6677 { 6678 slab_kmem_cache_release(to_slab(k)); 6679 } 6680 6681 static const struct sysfs_ops slab_sysfs_ops = { 6682 .show = slab_attr_show, 6683 .store = slab_attr_store, 6684 }; 6685 6686 static const struct kobj_type slab_ktype = { 6687 .sysfs_ops = &slab_sysfs_ops, 6688 .release = kmem_cache_release, 6689 }; 6690 6691 static struct kset *slab_kset; 6692 6693 static inline struct kset *cache_kset(struct kmem_cache *s) 6694 { 6695 return slab_kset; 6696 } 6697 6698 #define ID_STR_LENGTH 32 6699 6700 /* Create a unique string id for a slab cache: 6701 * 6702 * Format :[flags-]size 6703 */ 6704 static char *create_unique_id(struct kmem_cache *s) 6705 { 6706 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 6707 char *p = name; 6708 6709 if (!name) 6710 return ERR_PTR(-ENOMEM); 6711 6712 *p++ = ':'; 6713 /* 6714 * First flags affecting slabcache operations. We will only 6715 * get here for aliasable slabs so we do not need to support 6716 * too many flags. The flags here must cover all flags that 6717 * are matched during merging to guarantee that the id is 6718 * unique. 6719 */ 6720 if (s->flags & SLAB_CACHE_DMA) 6721 *p++ = 'd'; 6722 if (s->flags & SLAB_CACHE_DMA32) 6723 *p++ = 'D'; 6724 if (s->flags & SLAB_RECLAIM_ACCOUNT) 6725 *p++ = 'a'; 6726 if (s->flags & SLAB_CONSISTENCY_CHECKS) 6727 *p++ = 'F'; 6728 if (s->flags & SLAB_ACCOUNT) 6729 *p++ = 'A'; 6730 if (p != name + 1) 6731 *p++ = '-'; 6732 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); 6733 6734 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { 6735 kfree(name); 6736 return ERR_PTR(-EINVAL); 6737 } 6738 kmsan_unpoison_memory(name, p - name); 6739 return name; 6740 } 6741 6742 static int sysfs_slab_add(struct kmem_cache *s) 6743 { 6744 int err; 6745 const char *name; 6746 struct kset *kset = cache_kset(s); 6747 int unmergeable = slab_unmergeable(s); 6748 6749 if (!unmergeable && disable_higher_order_debug && 6750 (slub_debug & DEBUG_METADATA_FLAGS)) 6751 unmergeable = 1; 6752 6753 if (unmergeable) { 6754 /* 6755 * Slabcache can never be merged so we can use the name proper. 6756 * This is typically the case for debug situations. In that 6757 * case we can catch duplicate names easily. 6758 */ 6759 sysfs_remove_link(&slab_kset->kobj, s->name); 6760 name = s->name; 6761 } else { 6762 /* 6763 * Create a unique name for the slab as a target 6764 * for the symlinks. 6765 */ 6766 name = create_unique_id(s); 6767 if (IS_ERR(name)) 6768 return PTR_ERR(name); 6769 } 6770 6771 s->kobj.kset = kset; 6772 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 6773 if (err) 6774 goto out; 6775 6776 err = sysfs_create_group(&s->kobj, &slab_attr_group); 6777 if (err) 6778 goto out_del_kobj; 6779 6780 if (!unmergeable) { 6781 /* Setup first alias */ 6782 sysfs_slab_alias(s, s->name); 6783 } 6784 out: 6785 if (!unmergeable) 6786 kfree(name); 6787 return err; 6788 out_del_kobj: 6789 kobject_del(&s->kobj); 6790 goto out; 6791 } 6792 6793 void sysfs_slab_unlink(struct kmem_cache *s) 6794 { 6795 if (slab_state >= FULL) 6796 kobject_del(&s->kobj); 6797 } 6798 6799 void sysfs_slab_release(struct kmem_cache *s) 6800 { 6801 if (slab_state >= FULL) 6802 kobject_put(&s->kobj); 6803 } 6804 6805 /* 6806 * Need to buffer aliases during bootup until sysfs becomes 6807 * available lest we lose that information. 6808 */ 6809 struct saved_alias { 6810 struct kmem_cache *s; 6811 const char *name; 6812 struct saved_alias *next; 6813 }; 6814 6815 static struct saved_alias *alias_list; 6816 6817 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 6818 { 6819 struct saved_alias *al; 6820 6821 if (slab_state == FULL) { 6822 /* 6823 * If we have a leftover link then remove it. 6824 */ 6825 sysfs_remove_link(&slab_kset->kobj, name); 6826 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 6827 } 6828 6829 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 6830 if (!al) 6831 return -ENOMEM; 6832 6833 al->s = s; 6834 al->name = name; 6835 al->next = alias_list; 6836 alias_list = al; 6837 kmsan_unpoison_memory(al, sizeof(*al)); 6838 return 0; 6839 } 6840 6841 static int __init slab_sysfs_init(void) 6842 { 6843 struct kmem_cache *s; 6844 int err; 6845 6846 mutex_lock(&slab_mutex); 6847 6848 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 6849 if (!slab_kset) { 6850 mutex_unlock(&slab_mutex); 6851 pr_err("Cannot register slab subsystem.\n"); 6852 return -ENOMEM; 6853 } 6854 6855 slab_state = FULL; 6856 6857 list_for_each_entry(s, &slab_caches, list) { 6858 err = sysfs_slab_add(s); 6859 if (err) 6860 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 6861 s->name); 6862 } 6863 6864 while (alias_list) { 6865 struct saved_alias *al = alias_list; 6866 6867 alias_list = alias_list->next; 6868 err = sysfs_slab_alias(al->s, al->name); 6869 if (err) 6870 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 6871 al->name); 6872 kfree(al); 6873 } 6874 6875 mutex_unlock(&slab_mutex); 6876 return 0; 6877 } 6878 late_initcall(slab_sysfs_init); 6879 #endif /* SLAB_SUPPORTS_SYSFS */ 6880 6881 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 6882 static int slab_debugfs_show(struct seq_file *seq, void *v) 6883 { 6884 struct loc_track *t = seq->private; 6885 struct location *l; 6886 unsigned long idx; 6887 6888 idx = (unsigned long) t->idx; 6889 if (idx < t->count) { 6890 l = &t->loc[idx]; 6891 6892 seq_printf(seq, "%7ld ", l->count); 6893 6894 if (l->addr) 6895 seq_printf(seq, "%pS", (void *)l->addr); 6896 else 6897 seq_puts(seq, "<not-available>"); 6898 6899 if (l->waste) 6900 seq_printf(seq, " waste=%lu/%lu", 6901 l->count * l->waste, l->waste); 6902 6903 if (l->sum_time != l->min_time) { 6904 seq_printf(seq, " age=%ld/%llu/%ld", 6905 l->min_time, div_u64(l->sum_time, l->count), 6906 l->max_time); 6907 } else 6908 seq_printf(seq, " age=%ld", l->min_time); 6909 6910 if (l->min_pid != l->max_pid) 6911 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 6912 else 6913 seq_printf(seq, " pid=%ld", 6914 l->min_pid); 6915 6916 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 6917 seq_printf(seq, " cpus=%*pbl", 6918 cpumask_pr_args(to_cpumask(l->cpus))); 6919 6920 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 6921 seq_printf(seq, " nodes=%*pbl", 6922 nodemask_pr_args(&l->nodes)); 6923 6924 #ifdef CONFIG_STACKDEPOT 6925 { 6926 depot_stack_handle_t handle; 6927 unsigned long *entries; 6928 unsigned int nr_entries, j; 6929 6930 handle = READ_ONCE(l->handle); 6931 if (handle) { 6932 nr_entries = stack_depot_fetch(handle, &entries); 6933 seq_puts(seq, "\n"); 6934 for (j = 0; j < nr_entries; j++) 6935 seq_printf(seq, " %pS\n", (void *)entries[j]); 6936 } 6937 } 6938 #endif 6939 seq_puts(seq, "\n"); 6940 } 6941 6942 if (!idx && !t->count) 6943 seq_puts(seq, "No data\n"); 6944 6945 return 0; 6946 } 6947 6948 static void slab_debugfs_stop(struct seq_file *seq, void *v) 6949 { 6950 } 6951 6952 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 6953 { 6954 struct loc_track *t = seq->private; 6955 6956 t->idx = ++(*ppos); 6957 if (*ppos <= t->count) 6958 return ppos; 6959 6960 return NULL; 6961 } 6962 6963 static int cmp_loc_by_count(const void *a, const void *b, const void *data) 6964 { 6965 struct location *loc1 = (struct location *)a; 6966 struct location *loc2 = (struct location *)b; 6967 6968 if (loc1->count > loc2->count) 6969 return -1; 6970 else 6971 return 1; 6972 } 6973 6974 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 6975 { 6976 struct loc_track *t = seq->private; 6977 6978 t->idx = *ppos; 6979 return ppos; 6980 } 6981 6982 static const struct seq_operations slab_debugfs_sops = { 6983 .start = slab_debugfs_start, 6984 .next = slab_debugfs_next, 6985 .stop = slab_debugfs_stop, 6986 .show = slab_debugfs_show, 6987 }; 6988 6989 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 6990 { 6991 6992 struct kmem_cache_node *n; 6993 enum track_item alloc; 6994 int node; 6995 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 6996 sizeof(struct loc_track)); 6997 struct kmem_cache *s = file_inode(filep)->i_private; 6998 unsigned long *obj_map; 6999 7000 if (!t) 7001 return -ENOMEM; 7002 7003 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 7004 if (!obj_map) { 7005 seq_release_private(inode, filep); 7006 return -ENOMEM; 7007 } 7008 7009 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 7010 alloc = TRACK_ALLOC; 7011 else 7012 alloc = TRACK_FREE; 7013 7014 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 7015 bitmap_free(obj_map); 7016 seq_release_private(inode, filep); 7017 return -ENOMEM; 7018 } 7019 7020 for_each_kmem_cache_node(s, node, n) { 7021 unsigned long flags; 7022 struct slab *slab; 7023 7024 if (!node_nr_slabs(n)) 7025 continue; 7026 7027 spin_lock_irqsave(&n->list_lock, flags); 7028 list_for_each_entry(slab, &n->partial, slab_list) 7029 process_slab(t, s, slab, alloc, obj_map); 7030 list_for_each_entry(slab, &n->full, slab_list) 7031 process_slab(t, s, slab, alloc, obj_map); 7032 spin_unlock_irqrestore(&n->list_lock, flags); 7033 } 7034 7035 /* Sort locations by count */ 7036 sort_r(t->loc, t->count, sizeof(struct location), 7037 cmp_loc_by_count, NULL, NULL); 7038 7039 bitmap_free(obj_map); 7040 return 0; 7041 } 7042 7043 static int slab_debug_trace_release(struct inode *inode, struct file *file) 7044 { 7045 struct seq_file *seq = file->private_data; 7046 struct loc_track *t = seq->private; 7047 7048 free_loc_track(t); 7049 return seq_release_private(inode, file); 7050 } 7051 7052 static const struct file_operations slab_debugfs_fops = { 7053 .open = slab_debug_trace_open, 7054 .read = seq_read, 7055 .llseek = seq_lseek, 7056 .release = slab_debug_trace_release, 7057 }; 7058 7059 static void debugfs_slab_add(struct kmem_cache *s) 7060 { 7061 struct dentry *slab_cache_dir; 7062 7063 if (unlikely(!slab_debugfs_root)) 7064 return; 7065 7066 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 7067 7068 debugfs_create_file("alloc_traces", 0400, 7069 slab_cache_dir, s, &slab_debugfs_fops); 7070 7071 debugfs_create_file("free_traces", 0400, 7072 slab_cache_dir, s, &slab_debugfs_fops); 7073 } 7074 7075 void debugfs_slab_release(struct kmem_cache *s) 7076 { 7077 debugfs_lookup_and_remove(s->name, slab_debugfs_root); 7078 } 7079 7080 static int __init slab_debugfs_init(void) 7081 { 7082 struct kmem_cache *s; 7083 7084 slab_debugfs_root = debugfs_create_dir("slab", NULL); 7085 7086 list_for_each_entry(s, &slab_caches, list) 7087 if (s->flags & SLAB_STORE_USER) 7088 debugfs_slab_add(s); 7089 7090 return 0; 7091 7092 } 7093 __initcall(slab_debugfs_init); 7094 #endif 7095 /* 7096 * The /proc/slabinfo ABI 7097 */ 7098 #ifdef CONFIG_SLUB_DEBUG 7099 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 7100 { 7101 unsigned long nr_slabs = 0; 7102 unsigned long nr_objs = 0; 7103 unsigned long nr_free = 0; 7104 int node; 7105 struct kmem_cache_node *n; 7106 7107 for_each_kmem_cache_node(s, node, n) { 7108 nr_slabs += node_nr_slabs(n); 7109 nr_objs += node_nr_objs(n); 7110 nr_free += count_partial(n, count_free); 7111 } 7112 7113 sinfo->active_objs = nr_objs - nr_free; 7114 sinfo->num_objs = nr_objs; 7115 sinfo->active_slabs = nr_slabs; 7116 sinfo->num_slabs = nr_slabs; 7117 sinfo->objects_per_slab = oo_objects(s->oo); 7118 sinfo->cache_order = oo_order(s->oo); 7119 } 7120 7121 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) 7122 { 7123 } 7124 7125 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 7126 size_t count, loff_t *ppos) 7127 { 7128 return -EIO; 7129 } 7130 #endif /* CONFIG_SLUB_DEBUG */ 7131