1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/vmalloc.h> 23 #include <linux/proc_fs.h> 24 #include <linux/seq_file.h> 25 #include <linux/kasan.h> 26 #include <linux/node.h> 27 #include <linux/kmsan.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/mempolicy.h> 31 #include <linux/ctype.h> 32 #include <linux/stackdepot.h> 33 #include <linux/debugobjects.h> 34 #include <linux/kallsyms.h> 35 #include <linux/kfence.h> 36 #include <linux/memory.h> 37 #include <linux/math64.h> 38 #include <linux/fault-inject.h> 39 #include <linux/kmemleak.h> 40 #include <linux/stacktrace.h> 41 #include <linux/prefetch.h> 42 #include <linux/memcontrol.h> 43 #include <linux/random.h> 44 #include <kunit/test.h> 45 #include <kunit/test-bug.h> 46 #include <linux/sort.h> 47 #include <linux/irq_work.h> 48 #include <linux/kprobes.h> 49 #include <linux/debugfs.h> 50 #include <trace/events/kmem.h> 51 52 #include "internal.h" 53 54 /* 55 * Lock order: 56 * 1. slab_mutex (Global Mutex) 57 * 2. node->list_lock (Spinlock) 58 * 3. kmem_cache->cpu_slab->lock (Local lock) 59 * 4. slab_lock(slab) (Only on some arches) 60 * 5. object_map_lock (Only for debugging) 61 * 62 * slab_mutex 63 * 64 * The role of the slab_mutex is to protect the list of all the slabs 65 * and to synchronize major metadata changes to slab cache structures. 66 * Also synchronizes memory hotplug callbacks. 67 * 68 * slab_lock 69 * 70 * The slab_lock is a wrapper around the page lock, thus it is a bit 71 * spinlock. 72 * 73 * The slab_lock is only used on arches that do not have the ability 74 * to do a cmpxchg_double. It only protects: 75 * 76 * A. slab->freelist -> List of free objects in a slab 77 * B. slab->inuse -> Number of objects in use 78 * C. slab->objects -> Number of objects in slab 79 * D. slab->frozen -> frozen state 80 * 81 * Frozen slabs 82 * 83 * If a slab is frozen then it is exempt from list management. It is 84 * the cpu slab which is actively allocated from by the processor that 85 * froze it and it is not on any list. The processor that froze the 86 * slab is the one who can perform list operations on the slab. Other 87 * processors may put objects onto the freelist but the processor that 88 * froze the slab is the only one that can retrieve the objects from the 89 * slab's freelist. 90 * 91 * CPU partial slabs 92 * 93 * The partially empty slabs cached on the CPU partial list are used 94 * for performance reasons, which speeds up the allocation process. 95 * These slabs are not frozen, but are also exempt from list management, 96 * by clearing the SL_partial flag when moving out of the node 97 * partial list. Please see __slab_free() for more details. 98 * 99 * To sum up, the current scheme is: 100 * - node partial slab: SL_partial && !frozen 101 * - cpu partial slab: !SL_partial && !frozen 102 * - cpu slab: !SL_partial && frozen 103 * - full slab: !SL_partial && !frozen 104 * 105 * list_lock 106 * 107 * The list_lock protects the partial and full list on each node and 108 * the partial slab counter. If taken then no new slabs may be added or 109 * removed from the lists nor make the number of partial slabs be modified. 110 * (Note that the total number of slabs is an atomic value that may be 111 * modified without taking the list lock). 112 * 113 * The list_lock is a centralized lock and thus we avoid taking it as 114 * much as possible. As long as SLUB does not have to handle partial 115 * slabs, operations can continue without any centralized lock. F.e. 116 * allocating a long series of objects that fill up slabs does not require 117 * the list lock. 118 * 119 * For debug caches, all allocations are forced to go through a list_lock 120 * protected region to serialize against concurrent validation. 121 * 122 * cpu_slab->lock local lock 123 * 124 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 125 * except the stat counters. This is a percpu structure manipulated only by 126 * the local cpu, so the lock protects against being preempted or interrupted 127 * by an irq. Fast path operations rely on lockless operations instead. 128 * 129 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption 130 * which means the lockless fastpath cannot be used as it might interfere with 131 * an in-progress slow path operations. In this case the local lock is always 132 * taken but it still utilizes the freelist for the common operations. 133 * 134 * lockless fastpaths 135 * 136 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 137 * are fully lockless when satisfied from the percpu slab (and when 138 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 139 * They also don't disable preemption or migration or irqs. They rely on 140 * the transaction id (tid) field to detect being preempted or moved to 141 * another cpu. 142 * 143 * irq, preemption, migration considerations 144 * 145 * Interrupts are disabled as part of list_lock or local_lock operations, or 146 * around the slab_lock operation, in order to make the slab allocator safe 147 * to use in the context of an irq. 148 * 149 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 150 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 151 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 152 * doesn't have to be revalidated in each section protected by the local lock. 153 * 154 * SLUB assigns one slab for allocation to each processor. 155 * Allocations only occur from these slabs called cpu slabs. 156 * 157 * Slabs with free elements are kept on a partial list and during regular 158 * operations no list for full slabs is used. If an object in a full slab is 159 * freed then the slab will show up again on the partial lists. 160 * We track full slabs for debugging purposes though because otherwise we 161 * cannot scan all objects. 162 * 163 * Slabs are freed when they become empty. Teardown and setup is 164 * minimal so we rely on the page allocators per cpu caches for 165 * fast frees and allocs. 166 * 167 * slab->frozen The slab is frozen and exempt from list processing. 168 * This means that the slab is dedicated to a purpose 169 * such as satisfying allocations for a specific 170 * processor. Objects may be freed in the slab while 171 * it is frozen but slab_free will then skip the usual 172 * list operations. It is up to the processor holding 173 * the slab to integrate the slab into the slab lists 174 * when the slab is no longer needed. 175 * 176 * One use of this flag is to mark slabs that are 177 * used for allocations. Then such a slab becomes a cpu 178 * slab. The cpu slab may be equipped with an additional 179 * freelist that allows lockless access to 180 * free objects in addition to the regular freelist 181 * that requires the slab lock. 182 * 183 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 184 * options set. This moves slab handling out of 185 * the fast path and disables lockless freelists. 186 */ 187 188 /** 189 * enum slab_flags - How the slab flags bits are used. 190 * @SL_locked: Is locked with slab_lock() 191 * @SL_partial: On the per-node partial list 192 * @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves 193 * 194 * The slab flags share space with the page flags but some bits have 195 * different interpretations. The high bits are used for information 196 * like zone/node/section. 197 */ 198 enum slab_flags { 199 SL_locked = PG_locked, 200 SL_partial = PG_workingset, /* Historical reasons for this bit */ 201 SL_pfmemalloc = PG_active, /* Historical reasons for this bit */ 202 }; 203 204 /* 205 * We could simply use migrate_disable()/enable() but as long as it's a 206 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 207 */ 208 #ifndef CONFIG_PREEMPT_RT 209 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 210 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 211 #define USE_LOCKLESS_FAST_PATH() (true) 212 #else 213 #define slub_get_cpu_ptr(var) \ 214 ({ \ 215 migrate_disable(); \ 216 this_cpu_ptr(var); \ 217 }) 218 #define slub_put_cpu_ptr(var) \ 219 do { \ 220 (void)(var); \ 221 migrate_enable(); \ 222 } while (0) 223 #define USE_LOCKLESS_FAST_PATH() (false) 224 #endif 225 226 #ifndef CONFIG_SLUB_TINY 227 #define __fastpath_inline __always_inline 228 #else 229 #define __fastpath_inline 230 #endif 231 232 #ifdef CONFIG_SLUB_DEBUG 233 #ifdef CONFIG_SLUB_DEBUG_ON 234 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 235 #else 236 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 237 #endif 238 #endif /* CONFIG_SLUB_DEBUG */ 239 240 #ifdef CONFIG_NUMA 241 static DEFINE_STATIC_KEY_FALSE(strict_numa); 242 #endif 243 244 /* Structure holding parameters for get_partial() call chain */ 245 struct partial_context { 246 gfp_t flags; 247 unsigned int orig_size; 248 void *object; 249 }; 250 251 static inline bool kmem_cache_debug(struct kmem_cache *s) 252 { 253 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 254 } 255 256 void *fixup_red_left(struct kmem_cache *s, void *p) 257 { 258 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 259 p += s->red_left_pad; 260 261 return p; 262 } 263 264 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 265 { 266 #ifdef CONFIG_SLUB_CPU_PARTIAL 267 return !kmem_cache_debug(s); 268 #else 269 return false; 270 #endif 271 } 272 273 /* 274 * Issues still to be resolved: 275 * 276 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 277 * 278 * - Variable sizing of the per node arrays 279 */ 280 281 /* Enable to log cmpxchg failures */ 282 #undef SLUB_DEBUG_CMPXCHG 283 284 #ifndef CONFIG_SLUB_TINY 285 /* 286 * Minimum number of partial slabs. These will be left on the partial 287 * lists even if they are empty. kmem_cache_shrink may reclaim them. 288 */ 289 #define MIN_PARTIAL 5 290 291 /* 292 * Maximum number of desirable partial slabs. 293 * The existence of more partial slabs makes kmem_cache_shrink 294 * sort the partial list by the number of objects in use. 295 */ 296 #define MAX_PARTIAL 10 297 #else 298 #define MIN_PARTIAL 0 299 #define MAX_PARTIAL 0 300 #endif 301 302 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 303 SLAB_POISON | SLAB_STORE_USER) 304 305 /* 306 * These debug flags cannot use CMPXCHG because there might be consistency 307 * issues when checking or reading debug information 308 */ 309 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 310 SLAB_TRACE) 311 312 313 /* 314 * Debugging flags that require metadata to be stored in the slab. These get 315 * disabled when slab_debug=O is used and a cache's min order increases with 316 * metadata. 317 */ 318 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 319 320 #define OO_SHIFT 16 321 #define OO_MASK ((1 << OO_SHIFT) - 1) 322 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ 323 324 /* Internal SLUB flags */ 325 /* Poison object */ 326 #define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON) 327 /* Use cmpxchg_double */ 328 329 #ifdef system_has_freelist_aba 330 #define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE) 331 #else 332 #define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED 333 #endif 334 335 /* 336 * Tracking user of a slab. 337 */ 338 #define TRACK_ADDRS_COUNT 16 339 struct track { 340 unsigned long addr; /* Called from address */ 341 #ifdef CONFIG_STACKDEPOT 342 depot_stack_handle_t handle; 343 #endif 344 int cpu; /* Was running on cpu */ 345 int pid; /* Pid context */ 346 unsigned long when; /* When did the operation occur */ 347 }; 348 349 enum track_item { TRACK_ALLOC, TRACK_FREE }; 350 351 #ifdef SLAB_SUPPORTS_SYSFS 352 static int sysfs_slab_add(struct kmem_cache *); 353 static int sysfs_slab_alias(struct kmem_cache *, const char *); 354 #else 355 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 356 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 357 { return 0; } 358 #endif 359 360 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 361 static void debugfs_slab_add(struct kmem_cache *); 362 #else 363 static inline void debugfs_slab_add(struct kmem_cache *s) { } 364 #endif 365 366 enum stat_item { 367 ALLOC_PCS, /* Allocation from percpu sheaf */ 368 ALLOC_FASTPATH, /* Allocation from cpu slab */ 369 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 370 FREE_PCS, /* Free to percpu sheaf */ 371 FREE_RCU_SHEAF, /* Free to rcu_free sheaf */ 372 FREE_RCU_SHEAF_FAIL, /* Failed to free to a rcu_free sheaf */ 373 FREE_FASTPATH, /* Free to cpu slab */ 374 FREE_SLOWPATH, /* Freeing not to cpu slab */ 375 FREE_FROZEN, /* Freeing to frozen slab */ 376 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 377 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 378 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ 379 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 380 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 381 ALLOC_NODE_MISMATCH, /* Switching cpu slab */ 382 FREE_SLAB, /* Slab freed to the page allocator */ 383 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 384 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 385 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 386 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 387 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 388 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 389 DEACTIVATE_BYPASS, /* Implicit deactivation */ 390 ORDER_FALLBACK, /* Number of times fallback was necessary */ 391 CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */ 392 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */ 393 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ 394 CPU_PARTIAL_FREE, /* Refill cpu partial on free */ 395 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ 396 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ 397 SHEAF_FLUSH, /* Objects flushed from a sheaf */ 398 SHEAF_REFILL, /* Objects refilled to a sheaf */ 399 SHEAF_ALLOC, /* Allocation of an empty sheaf */ 400 SHEAF_FREE, /* Freeing of an empty sheaf */ 401 BARN_GET, /* Got full sheaf from barn */ 402 BARN_GET_FAIL, /* Failed to get full sheaf from barn */ 403 BARN_PUT, /* Put full sheaf to barn */ 404 BARN_PUT_FAIL, /* Failed to put full sheaf to barn */ 405 SHEAF_PREFILL_FAST, /* Sheaf prefill grabbed the spare sheaf */ 406 SHEAF_PREFILL_SLOW, /* Sheaf prefill found no spare sheaf */ 407 SHEAF_PREFILL_OVERSIZE, /* Allocation of oversize sheaf for prefill */ 408 SHEAF_RETURN_FAST, /* Sheaf return reattached spare sheaf */ 409 SHEAF_RETURN_SLOW, /* Sheaf return could not reattach spare */ 410 NR_SLUB_STAT_ITEMS 411 }; 412 413 #ifndef CONFIG_SLUB_TINY 414 /* 415 * When changing the layout, make sure freelist and tid are still compatible 416 * with this_cpu_cmpxchg_double() alignment requirements. 417 */ 418 struct kmem_cache_cpu { 419 union { 420 struct { 421 void **freelist; /* Pointer to next available object */ 422 unsigned long tid; /* Globally unique transaction id */ 423 }; 424 freelist_aba_t freelist_tid; 425 }; 426 struct slab *slab; /* The slab from which we are allocating */ 427 #ifdef CONFIG_SLUB_CPU_PARTIAL 428 struct slab *partial; /* Partially allocated slabs */ 429 #endif 430 local_trylock_t lock; /* Protects the fields above */ 431 #ifdef CONFIG_SLUB_STATS 432 unsigned int stat[NR_SLUB_STAT_ITEMS]; 433 #endif 434 }; 435 #endif /* CONFIG_SLUB_TINY */ 436 437 static inline void stat(const struct kmem_cache *s, enum stat_item si) 438 { 439 #ifdef CONFIG_SLUB_STATS 440 /* 441 * The rmw is racy on a preemptible kernel but this is acceptable, so 442 * avoid this_cpu_add()'s irq-disable overhead. 443 */ 444 raw_cpu_inc(s->cpu_slab->stat[si]); 445 #endif 446 } 447 448 static inline 449 void stat_add(const struct kmem_cache *s, enum stat_item si, int v) 450 { 451 #ifdef CONFIG_SLUB_STATS 452 raw_cpu_add(s->cpu_slab->stat[si], v); 453 #endif 454 } 455 456 #define MAX_FULL_SHEAVES 10 457 #define MAX_EMPTY_SHEAVES 10 458 459 struct node_barn { 460 spinlock_t lock; 461 struct list_head sheaves_full; 462 struct list_head sheaves_empty; 463 unsigned int nr_full; 464 unsigned int nr_empty; 465 }; 466 467 struct slab_sheaf { 468 union { 469 struct rcu_head rcu_head; 470 struct list_head barn_list; 471 /* only used for prefilled sheafs */ 472 unsigned int capacity; 473 }; 474 struct kmem_cache *cache; 475 unsigned int size; 476 int node; /* only used for rcu_sheaf */ 477 void *objects[]; 478 }; 479 480 struct slub_percpu_sheaves { 481 local_trylock_t lock; 482 struct slab_sheaf *main; /* never NULL when unlocked */ 483 struct slab_sheaf *spare; /* empty or full, may be NULL */ 484 struct slab_sheaf *rcu_free; /* for batching kfree_rcu() */ 485 }; 486 487 /* 488 * The slab lists for all objects. 489 */ 490 struct kmem_cache_node { 491 spinlock_t list_lock; 492 unsigned long nr_partial; 493 struct list_head partial; 494 #ifdef CONFIG_SLUB_DEBUG 495 atomic_long_t nr_slabs; 496 atomic_long_t total_objects; 497 struct list_head full; 498 #endif 499 struct node_barn *barn; 500 }; 501 502 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 503 { 504 return s->node[node]; 505 } 506 507 /* Get the barn of the current cpu's memory node */ 508 static inline struct node_barn *get_barn(struct kmem_cache *s) 509 { 510 return get_node(s, numa_mem_id())->barn; 511 } 512 513 /* 514 * Iterator over all nodes. The body will be executed for each node that has 515 * a kmem_cache_node structure allocated (which is true for all online nodes) 516 */ 517 #define for_each_kmem_cache_node(__s, __node, __n) \ 518 for (__node = 0; __node < nr_node_ids; __node++) \ 519 if ((__n = get_node(__s, __node))) 520 521 /* 522 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 523 * Corresponds to node_state[N_MEMORY], but can temporarily 524 * differ during memory hotplug/hotremove operations. 525 * Protected by slab_mutex. 526 */ 527 static nodemask_t slab_nodes; 528 529 /* 530 * Workqueue used for flush_cpu_slab(). 531 */ 532 static struct workqueue_struct *flushwq; 533 534 struct slub_flush_work { 535 struct work_struct work; 536 struct kmem_cache *s; 537 bool skip; 538 }; 539 540 static DEFINE_MUTEX(flush_lock); 541 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 542 543 /******************************************************************** 544 * Core slab cache functions 545 *******************************************************************/ 546 547 /* 548 * Returns freelist pointer (ptr). With hardening, this is obfuscated 549 * with an XOR of the address where the pointer is held and a per-cache 550 * random number. 551 */ 552 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s, 553 void *ptr, unsigned long ptr_addr) 554 { 555 unsigned long encoded; 556 557 #ifdef CONFIG_SLAB_FREELIST_HARDENED 558 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); 559 #else 560 encoded = (unsigned long)ptr; 561 #endif 562 return (freeptr_t){.v = encoded}; 563 } 564 565 static inline void *freelist_ptr_decode(const struct kmem_cache *s, 566 freeptr_t ptr, unsigned long ptr_addr) 567 { 568 void *decoded; 569 570 #ifdef CONFIG_SLAB_FREELIST_HARDENED 571 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); 572 #else 573 decoded = (void *)ptr.v; 574 #endif 575 return decoded; 576 } 577 578 static inline void *get_freepointer(struct kmem_cache *s, void *object) 579 { 580 unsigned long ptr_addr; 581 freeptr_t p; 582 583 object = kasan_reset_tag(object); 584 ptr_addr = (unsigned long)object + s->offset; 585 p = *(freeptr_t *)(ptr_addr); 586 return freelist_ptr_decode(s, p, ptr_addr); 587 } 588 589 #ifndef CONFIG_SLUB_TINY 590 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 591 { 592 prefetchw(object + s->offset); 593 } 594 #endif 595 596 /* 597 * When running under KMSAN, get_freepointer_safe() may return an uninitialized 598 * pointer value in the case the current thread loses the race for the next 599 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in 600 * slab_alloc_node() will fail, so the uninitialized value won't be used, but 601 * KMSAN will still check all arguments of cmpxchg because of imperfect 602 * handling of inline assembly. 603 * To work around this problem, we apply __no_kmsan_checks to ensure that 604 * get_freepointer_safe() returns initialized memory. 605 */ 606 __no_kmsan_checks 607 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 608 { 609 unsigned long freepointer_addr; 610 freeptr_t p; 611 612 if (!debug_pagealloc_enabled_static()) 613 return get_freepointer(s, object); 614 615 object = kasan_reset_tag(object); 616 freepointer_addr = (unsigned long)object + s->offset; 617 copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p)); 618 return freelist_ptr_decode(s, p, freepointer_addr); 619 } 620 621 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 622 { 623 unsigned long freeptr_addr = (unsigned long)object + s->offset; 624 625 #ifdef CONFIG_SLAB_FREELIST_HARDENED 626 BUG_ON(object == fp); /* naive detection of double free or corruption */ 627 #endif 628 629 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 630 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); 631 } 632 633 /* 634 * See comment in calculate_sizes(). 635 */ 636 static inline bool freeptr_outside_object(struct kmem_cache *s) 637 { 638 return s->offset >= s->inuse; 639 } 640 641 /* 642 * Return offset of the end of info block which is inuse + free pointer if 643 * not overlapping with object. 644 */ 645 static inline unsigned int get_info_end(struct kmem_cache *s) 646 { 647 if (freeptr_outside_object(s)) 648 return s->inuse + sizeof(void *); 649 else 650 return s->inuse; 651 } 652 653 /* Loop over all objects in a slab */ 654 #define for_each_object(__p, __s, __addr, __objects) \ 655 for (__p = fixup_red_left(__s, __addr); \ 656 __p < (__addr) + (__objects) * (__s)->size; \ 657 __p += (__s)->size) 658 659 static inline unsigned int order_objects(unsigned int order, unsigned int size) 660 { 661 return ((unsigned int)PAGE_SIZE << order) / size; 662 } 663 664 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 665 unsigned int size) 666 { 667 struct kmem_cache_order_objects x = { 668 (order << OO_SHIFT) + order_objects(order, size) 669 }; 670 671 return x; 672 } 673 674 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 675 { 676 return x.x >> OO_SHIFT; 677 } 678 679 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 680 { 681 return x.x & OO_MASK; 682 } 683 684 #ifdef CONFIG_SLUB_CPU_PARTIAL 685 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 686 { 687 unsigned int nr_slabs; 688 689 s->cpu_partial = nr_objects; 690 691 /* 692 * We take the number of objects but actually limit the number of 693 * slabs on the per cpu partial list, in order to limit excessive 694 * growth of the list. For simplicity we assume that the slabs will 695 * be half-full. 696 */ 697 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); 698 s->cpu_partial_slabs = nr_slabs; 699 } 700 701 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 702 { 703 return s->cpu_partial_slabs; 704 } 705 #else 706 static inline void 707 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 708 { 709 } 710 711 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 712 { 713 return 0; 714 } 715 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 716 717 /* 718 * If network-based swap is enabled, slub must keep track of whether memory 719 * were allocated from pfmemalloc reserves. 720 */ 721 static inline bool slab_test_pfmemalloc(const struct slab *slab) 722 { 723 return test_bit(SL_pfmemalloc, &slab->flags.f); 724 } 725 726 static inline void slab_set_pfmemalloc(struct slab *slab) 727 { 728 set_bit(SL_pfmemalloc, &slab->flags.f); 729 } 730 731 static inline void __slab_clear_pfmemalloc(struct slab *slab) 732 { 733 __clear_bit(SL_pfmemalloc, &slab->flags.f); 734 } 735 736 /* 737 * Per slab locking using the pagelock 738 */ 739 static __always_inline void slab_lock(struct slab *slab) 740 { 741 bit_spin_lock(SL_locked, &slab->flags.f); 742 } 743 744 static __always_inline void slab_unlock(struct slab *slab) 745 { 746 bit_spin_unlock(SL_locked, &slab->flags.f); 747 } 748 749 static inline bool 750 __update_freelist_fast(struct slab *slab, 751 void *freelist_old, unsigned long counters_old, 752 void *freelist_new, unsigned long counters_new) 753 { 754 #ifdef system_has_freelist_aba 755 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; 756 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; 757 758 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); 759 #else 760 return false; 761 #endif 762 } 763 764 static inline bool 765 __update_freelist_slow(struct slab *slab, 766 void *freelist_old, unsigned long counters_old, 767 void *freelist_new, unsigned long counters_new) 768 { 769 bool ret = false; 770 771 slab_lock(slab); 772 if (slab->freelist == freelist_old && 773 slab->counters == counters_old) { 774 slab->freelist = freelist_new; 775 slab->counters = counters_new; 776 ret = true; 777 } 778 slab_unlock(slab); 779 780 return ret; 781 } 782 783 /* 784 * Interrupts must be disabled (for the fallback code to work right), typically 785 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is 786 * part of bit_spin_lock(), is sufficient because the policy is not to allow any 787 * allocation/ free operation in hardirq context. Therefore nothing can 788 * interrupt the operation. 789 */ 790 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, 791 void *freelist_old, unsigned long counters_old, 792 void *freelist_new, unsigned long counters_new, 793 const char *n) 794 { 795 bool ret; 796 797 if (USE_LOCKLESS_FAST_PATH()) 798 lockdep_assert_irqs_disabled(); 799 800 if (s->flags & __CMPXCHG_DOUBLE) { 801 ret = __update_freelist_fast(slab, freelist_old, counters_old, 802 freelist_new, counters_new); 803 } else { 804 ret = __update_freelist_slow(slab, freelist_old, counters_old, 805 freelist_new, counters_new); 806 } 807 if (likely(ret)) 808 return true; 809 810 cpu_relax(); 811 stat(s, CMPXCHG_DOUBLE_FAIL); 812 813 #ifdef SLUB_DEBUG_CMPXCHG 814 pr_info("%s %s: cmpxchg double redo ", n, s->name); 815 #endif 816 817 return false; 818 } 819 820 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, 821 void *freelist_old, unsigned long counters_old, 822 void *freelist_new, unsigned long counters_new, 823 const char *n) 824 { 825 bool ret; 826 827 if (s->flags & __CMPXCHG_DOUBLE) { 828 ret = __update_freelist_fast(slab, freelist_old, counters_old, 829 freelist_new, counters_new); 830 } else { 831 unsigned long flags; 832 833 local_irq_save(flags); 834 ret = __update_freelist_slow(slab, freelist_old, counters_old, 835 freelist_new, counters_new); 836 local_irq_restore(flags); 837 } 838 if (likely(ret)) 839 return true; 840 841 cpu_relax(); 842 stat(s, CMPXCHG_DOUBLE_FAIL); 843 844 #ifdef SLUB_DEBUG_CMPXCHG 845 pr_info("%s %s: cmpxchg double redo ", n, s->name); 846 #endif 847 848 return false; 849 } 850 851 /* 852 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API 853 * family will round up the real request size to these fixed ones, so 854 * there could be an extra area than what is requested. Save the original 855 * request size in the meta data area, for better debug and sanity check. 856 */ 857 static inline void set_orig_size(struct kmem_cache *s, 858 void *object, unsigned int orig_size) 859 { 860 void *p = kasan_reset_tag(object); 861 862 if (!slub_debug_orig_size(s)) 863 return; 864 865 p += get_info_end(s); 866 p += sizeof(struct track) * 2; 867 868 *(unsigned int *)p = orig_size; 869 } 870 871 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) 872 { 873 void *p = kasan_reset_tag(object); 874 875 if (is_kfence_address(object)) 876 return kfence_ksize(object); 877 878 if (!slub_debug_orig_size(s)) 879 return s->object_size; 880 881 p += get_info_end(s); 882 p += sizeof(struct track) * 2; 883 884 return *(unsigned int *)p; 885 } 886 887 #ifdef CONFIG_SLUB_DEBUG 888 889 /* 890 * For debugging context when we want to check if the struct slab pointer 891 * appears to be valid. 892 */ 893 static inline bool validate_slab_ptr(struct slab *slab) 894 { 895 return PageSlab(slab_page(slab)); 896 } 897 898 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 899 static DEFINE_SPINLOCK(object_map_lock); 900 901 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 902 struct slab *slab) 903 { 904 void *addr = slab_address(slab); 905 void *p; 906 907 bitmap_zero(obj_map, slab->objects); 908 909 for (p = slab->freelist; p; p = get_freepointer(s, p)) 910 set_bit(__obj_to_index(s, addr, p), obj_map); 911 } 912 913 #if IS_ENABLED(CONFIG_KUNIT) 914 static bool slab_add_kunit_errors(void) 915 { 916 struct kunit_resource *resource; 917 918 if (!kunit_get_current_test()) 919 return false; 920 921 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 922 if (!resource) 923 return false; 924 925 (*(int *)resource->data)++; 926 kunit_put_resource(resource); 927 return true; 928 } 929 930 bool slab_in_kunit_test(void) 931 { 932 struct kunit_resource *resource; 933 934 if (!kunit_get_current_test()) 935 return false; 936 937 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 938 if (!resource) 939 return false; 940 941 kunit_put_resource(resource); 942 return true; 943 } 944 #else 945 static inline bool slab_add_kunit_errors(void) { return false; } 946 #endif 947 948 static inline unsigned int size_from_object(struct kmem_cache *s) 949 { 950 if (s->flags & SLAB_RED_ZONE) 951 return s->size - s->red_left_pad; 952 953 return s->size; 954 } 955 956 static inline void *restore_red_left(struct kmem_cache *s, void *p) 957 { 958 if (s->flags & SLAB_RED_ZONE) 959 p -= s->red_left_pad; 960 961 return p; 962 } 963 964 /* 965 * Debug settings: 966 */ 967 #if defined(CONFIG_SLUB_DEBUG_ON) 968 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 969 #else 970 static slab_flags_t slub_debug; 971 #endif 972 973 static char *slub_debug_string; 974 static int disable_higher_order_debug; 975 976 /* 977 * slub is about to manipulate internal object metadata. This memory lies 978 * outside the range of the allocated object, so accessing it would normally 979 * be reported by kasan as a bounds error. metadata_access_enable() is used 980 * to tell kasan that these accesses are OK. 981 */ 982 static inline void metadata_access_enable(void) 983 { 984 kasan_disable_current(); 985 kmsan_disable_current(); 986 } 987 988 static inline void metadata_access_disable(void) 989 { 990 kmsan_enable_current(); 991 kasan_enable_current(); 992 } 993 994 /* 995 * Object debugging 996 */ 997 998 /* Verify that a pointer has an address that is valid within a slab page */ 999 static inline int check_valid_pointer(struct kmem_cache *s, 1000 struct slab *slab, void *object) 1001 { 1002 void *base; 1003 1004 if (!object) 1005 return 1; 1006 1007 base = slab_address(slab); 1008 object = kasan_reset_tag(object); 1009 object = restore_red_left(s, object); 1010 if (object < base || object >= base + slab->objects * s->size || 1011 (object - base) % s->size) { 1012 return 0; 1013 } 1014 1015 return 1; 1016 } 1017 1018 static void print_section(char *level, char *text, u8 *addr, 1019 unsigned int length) 1020 { 1021 metadata_access_enable(); 1022 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 1023 16, 1, kasan_reset_tag((void *)addr), length, 1); 1024 metadata_access_disable(); 1025 } 1026 1027 static struct track *get_track(struct kmem_cache *s, void *object, 1028 enum track_item alloc) 1029 { 1030 struct track *p; 1031 1032 p = object + get_info_end(s); 1033 1034 return kasan_reset_tag(p + alloc); 1035 } 1036 1037 #ifdef CONFIG_STACKDEPOT 1038 static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) 1039 { 1040 depot_stack_handle_t handle; 1041 unsigned long entries[TRACK_ADDRS_COUNT]; 1042 unsigned int nr_entries; 1043 1044 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 1045 handle = stack_depot_save(entries, nr_entries, gfp_flags); 1046 1047 return handle; 1048 } 1049 #else 1050 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) 1051 { 1052 return 0; 1053 } 1054 #endif 1055 1056 static void set_track_update(struct kmem_cache *s, void *object, 1057 enum track_item alloc, unsigned long addr, 1058 depot_stack_handle_t handle) 1059 { 1060 struct track *p = get_track(s, object, alloc); 1061 1062 #ifdef CONFIG_STACKDEPOT 1063 p->handle = handle; 1064 #endif 1065 p->addr = addr; 1066 p->cpu = smp_processor_id(); 1067 p->pid = current->pid; 1068 p->when = jiffies; 1069 } 1070 1071 static __always_inline void set_track(struct kmem_cache *s, void *object, 1072 enum track_item alloc, unsigned long addr, gfp_t gfp_flags) 1073 { 1074 depot_stack_handle_t handle = set_track_prepare(gfp_flags); 1075 1076 set_track_update(s, object, alloc, addr, handle); 1077 } 1078 1079 static void init_tracking(struct kmem_cache *s, void *object) 1080 { 1081 struct track *p; 1082 1083 if (!(s->flags & SLAB_STORE_USER)) 1084 return; 1085 1086 p = get_track(s, object, TRACK_ALLOC); 1087 memset(p, 0, 2*sizeof(struct track)); 1088 } 1089 1090 static void print_track(const char *s, struct track *t, unsigned long pr_time) 1091 { 1092 depot_stack_handle_t handle __maybe_unused; 1093 1094 if (!t->addr) 1095 return; 1096 1097 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 1098 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 1099 #ifdef CONFIG_STACKDEPOT 1100 handle = READ_ONCE(t->handle); 1101 if (handle) 1102 stack_depot_print(handle); 1103 else 1104 pr_err("object allocation/free stack trace missing\n"); 1105 #endif 1106 } 1107 1108 void print_tracking(struct kmem_cache *s, void *object) 1109 { 1110 unsigned long pr_time = jiffies; 1111 if (!(s->flags & SLAB_STORE_USER)) 1112 return; 1113 1114 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 1115 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 1116 } 1117 1118 static void print_slab_info(const struct slab *slab) 1119 { 1120 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", 1121 slab, slab->objects, slab->inuse, slab->freelist, 1122 &slab->flags.f); 1123 } 1124 1125 void skip_orig_size_check(struct kmem_cache *s, const void *object) 1126 { 1127 set_orig_size(s, (void *)object, s->object_size); 1128 } 1129 1130 static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp) 1131 { 1132 struct va_format vaf; 1133 va_list args; 1134 1135 va_copy(args, argsp); 1136 vaf.fmt = fmt; 1137 vaf.va = &args; 1138 pr_err("=============================================================================\n"); 1139 pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf); 1140 pr_err("-----------------------------------------------------------------------------\n\n"); 1141 va_end(args); 1142 } 1143 1144 static void slab_bug(struct kmem_cache *s, const char *fmt, ...) 1145 { 1146 va_list args; 1147 1148 va_start(args, fmt); 1149 __slab_bug(s, fmt, args); 1150 va_end(args); 1151 } 1152 1153 __printf(2, 3) 1154 static void slab_fix(struct kmem_cache *s, const char *fmt, ...) 1155 { 1156 struct va_format vaf; 1157 va_list args; 1158 1159 if (slab_add_kunit_errors()) 1160 return; 1161 1162 va_start(args, fmt); 1163 vaf.fmt = fmt; 1164 vaf.va = &args; 1165 pr_err("FIX %s: %pV\n", s->name, &vaf); 1166 va_end(args); 1167 } 1168 1169 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) 1170 { 1171 unsigned int off; /* Offset of last byte */ 1172 u8 *addr = slab_address(slab); 1173 1174 print_tracking(s, p); 1175 1176 print_slab_info(slab); 1177 1178 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 1179 p, p - addr, get_freepointer(s, p)); 1180 1181 if (s->flags & SLAB_RED_ZONE) 1182 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 1183 s->red_left_pad); 1184 else if (p > addr + 16) 1185 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 1186 1187 print_section(KERN_ERR, "Object ", p, 1188 min_t(unsigned int, s->object_size, PAGE_SIZE)); 1189 if (s->flags & SLAB_RED_ZONE) 1190 print_section(KERN_ERR, "Redzone ", p + s->object_size, 1191 s->inuse - s->object_size); 1192 1193 off = get_info_end(s); 1194 1195 if (s->flags & SLAB_STORE_USER) 1196 off += 2 * sizeof(struct track); 1197 1198 if (slub_debug_orig_size(s)) 1199 off += sizeof(unsigned int); 1200 1201 off += kasan_metadata_size(s, false); 1202 1203 if (off != size_from_object(s)) 1204 /* Beginning of the filler is the free pointer */ 1205 print_section(KERN_ERR, "Padding ", p + off, 1206 size_from_object(s) - off); 1207 } 1208 1209 static void object_err(struct kmem_cache *s, struct slab *slab, 1210 u8 *object, const char *reason) 1211 { 1212 if (slab_add_kunit_errors()) 1213 return; 1214 1215 slab_bug(s, reason); 1216 if (!object || !check_valid_pointer(s, slab, object)) { 1217 print_slab_info(slab); 1218 pr_err("Invalid pointer 0x%p\n", object); 1219 } else { 1220 print_trailer(s, slab, object); 1221 } 1222 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1223 1224 WARN_ON(1); 1225 } 1226 1227 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1228 void **freelist, void *nextfree) 1229 { 1230 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 1231 !check_valid_pointer(s, slab, nextfree) && freelist) { 1232 object_err(s, slab, *freelist, "Freechain corrupt"); 1233 *freelist = NULL; 1234 slab_fix(s, "Isolate corrupted freechain"); 1235 return true; 1236 } 1237 1238 return false; 1239 } 1240 1241 static void __slab_err(struct slab *slab) 1242 { 1243 if (slab_in_kunit_test()) 1244 return; 1245 1246 print_slab_info(slab); 1247 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1248 1249 WARN_ON(1); 1250 } 1251 1252 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, 1253 const char *fmt, ...) 1254 { 1255 va_list args; 1256 1257 if (slab_add_kunit_errors()) 1258 return; 1259 1260 va_start(args, fmt); 1261 __slab_bug(s, fmt, args); 1262 va_end(args); 1263 1264 __slab_err(slab); 1265 } 1266 1267 static void init_object(struct kmem_cache *s, void *object, u8 val) 1268 { 1269 u8 *p = kasan_reset_tag(object); 1270 unsigned int poison_size = s->object_size; 1271 1272 if (s->flags & SLAB_RED_ZONE) { 1273 /* 1274 * Here and below, avoid overwriting the KMSAN shadow. Keeping 1275 * the shadow makes it possible to distinguish uninit-value 1276 * from use-after-free. 1277 */ 1278 memset_no_sanitize_memory(p - s->red_left_pad, val, 1279 s->red_left_pad); 1280 1281 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1282 /* 1283 * Redzone the extra allocated space by kmalloc than 1284 * requested, and the poison size will be limited to 1285 * the original request size accordingly. 1286 */ 1287 poison_size = get_orig_size(s, object); 1288 } 1289 } 1290 1291 if (s->flags & __OBJECT_POISON) { 1292 memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1); 1293 memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1); 1294 } 1295 1296 if (s->flags & SLAB_RED_ZONE) 1297 memset_no_sanitize_memory(p + poison_size, val, 1298 s->inuse - poison_size); 1299 } 1300 1301 static void restore_bytes(struct kmem_cache *s, const char *message, u8 data, 1302 void *from, void *to) 1303 { 1304 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 1305 memset(from, data, to - from); 1306 } 1307 1308 #ifdef CONFIG_KMSAN 1309 #define pad_check_attributes noinline __no_kmsan_checks 1310 #else 1311 #define pad_check_attributes 1312 #endif 1313 1314 static pad_check_attributes int 1315 check_bytes_and_report(struct kmem_cache *s, struct slab *slab, 1316 u8 *object, const char *what, u8 *start, unsigned int value, 1317 unsigned int bytes, bool slab_obj_print) 1318 { 1319 u8 *fault; 1320 u8 *end; 1321 u8 *addr = slab_address(slab); 1322 1323 metadata_access_enable(); 1324 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 1325 metadata_access_disable(); 1326 if (!fault) 1327 return 1; 1328 1329 end = start + bytes; 1330 while (end > fault && end[-1] == value) 1331 end--; 1332 1333 if (slab_add_kunit_errors()) 1334 goto skip_bug_print; 1335 1336 pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 1337 what, fault, end - 1, fault - addr, fault[0], value); 1338 1339 if (slab_obj_print) 1340 object_err(s, slab, object, "Object corrupt"); 1341 1342 skip_bug_print: 1343 restore_bytes(s, what, value, fault, end); 1344 return 0; 1345 } 1346 1347 /* 1348 * Object layout: 1349 * 1350 * object address 1351 * Bytes of the object to be managed. 1352 * If the freepointer may overlay the object then the free 1353 * pointer is at the middle of the object. 1354 * 1355 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 1356 * 0xa5 (POISON_END) 1357 * 1358 * object + s->object_size 1359 * Padding to reach word boundary. This is also used for Redzoning. 1360 * Padding is extended by another word if Redzoning is enabled and 1361 * object_size == inuse. 1362 * 1363 * We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with 1364 * 0xcc (SLUB_RED_ACTIVE) for objects in use. 1365 * 1366 * object + s->inuse 1367 * Meta data starts here. 1368 * 1369 * A. Free pointer (if we cannot overwrite object on free) 1370 * B. Tracking data for SLAB_STORE_USER 1371 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) 1372 * D. Padding to reach required alignment boundary or at minimum 1373 * one word if debugging is on to be able to detect writes 1374 * before the word boundary. 1375 * 1376 * Padding is done using 0x5a (POISON_INUSE) 1377 * 1378 * object + s->size 1379 * Nothing is used beyond s->size. 1380 * 1381 * If slabcaches are merged then the object_size and inuse boundaries are mostly 1382 * ignored. And therefore no slab options that rely on these boundaries 1383 * may be used with merged slabcaches. 1384 */ 1385 1386 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) 1387 { 1388 unsigned long off = get_info_end(s); /* The end of info */ 1389 1390 if (s->flags & SLAB_STORE_USER) { 1391 /* We also have user information there */ 1392 off += 2 * sizeof(struct track); 1393 1394 if (s->flags & SLAB_KMALLOC) 1395 off += sizeof(unsigned int); 1396 } 1397 1398 off += kasan_metadata_size(s, false); 1399 1400 if (size_from_object(s) == off) 1401 return 1; 1402 1403 return check_bytes_and_report(s, slab, p, "Object padding", 1404 p + off, POISON_INUSE, size_from_object(s) - off, true); 1405 } 1406 1407 /* Check the pad bytes at the end of a slab page */ 1408 static pad_check_attributes void 1409 slab_pad_check(struct kmem_cache *s, struct slab *slab) 1410 { 1411 u8 *start; 1412 u8 *fault; 1413 u8 *end; 1414 u8 *pad; 1415 int length; 1416 int remainder; 1417 1418 if (!(s->flags & SLAB_POISON)) 1419 return; 1420 1421 start = slab_address(slab); 1422 length = slab_size(slab); 1423 end = start + length; 1424 remainder = length % s->size; 1425 if (!remainder) 1426 return; 1427 1428 pad = end - remainder; 1429 metadata_access_enable(); 1430 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1431 metadata_access_disable(); 1432 if (!fault) 1433 return; 1434 while (end > fault && end[-1] == POISON_INUSE) 1435 end--; 1436 1437 slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1438 fault, end - 1, fault - start); 1439 print_section(KERN_ERR, "Padding ", pad, remainder); 1440 __slab_err(slab); 1441 1442 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1443 } 1444 1445 static int check_object(struct kmem_cache *s, struct slab *slab, 1446 void *object, u8 val) 1447 { 1448 u8 *p = object; 1449 u8 *endobject = object + s->object_size; 1450 unsigned int orig_size, kasan_meta_size; 1451 int ret = 1; 1452 1453 if (s->flags & SLAB_RED_ZONE) { 1454 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1455 object - s->red_left_pad, val, s->red_left_pad, ret)) 1456 ret = 0; 1457 1458 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1459 endobject, val, s->inuse - s->object_size, ret)) 1460 ret = 0; 1461 1462 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1463 orig_size = get_orig_size(s, object); 1464 1465 if (s->object_size > orig_size && 1466 !check_bytes_and_report(s, slab, object, 1467 "kmalloc Redzone", p + orig_size, 1468 val, s->object_size - orig_size, ret)) { 1469 ret = 0; 1470 } 1471 } 1472 } else { 1473 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1474 if (!check_bytes_and_report(s, slab, p, "Alignment padding", 1475 endobject, POISON_INUSE, 1476 s->inuse - s->object_size, ret)) 1477 ret = 0; 1478 } 1479 } 1480 1481 if (s->flags & SLAB_POISON) { 1482 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) { 1483 /* 1484 * KASAN can save its free meta data inside of the 1485 * object at offset 0. Thus, skip checking the part of 1486 * the redzone that overlaps with the meta data. 1487 */ 1488 kasan_meta_size = kasan_metadata_size(s, true); 1489 if (kasan_meta_size < s->object_size - 1 && 1490 !check_bytes_and_report(s, slab, p, "Poison", 1491 p + kasan_meta_size, POISON_FREE, 1492 s->object_size - kasan_meta_size - 1, ret)) 1493 ret = 0; 1494 if (kasan_meta_size < s->object_size && 1495 !check_bytes_and_report(s, slab, p, "End Poison", 1496 p + s->object_size - 1, POISON_END, 1, ret)) 1497 ret = 0; 1498 } 1499 /* 1500 * check_pad_bytes cleans up on its own. 1501 */ 1502 if (!check_pad_bytes(s, slab, p)) 1503 ret = 0; 1504 } 1505 1506 /* 1507 * Cannot check freepointer while object is allocated if 1508 * object and freepointer overlap. 1509 */ 1510 if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) && 1511 !check_valid_pointer(s, slab, get_freepointer(s, p))) { 1512 object_err(s, slab, p, "Freepointer corrupt"); 1513 /* 1514 * No choice but to zap it and thus lose the remainder 1515 * of the free objects in this slab. May cause 1516 * another error because the object count is now wrong. 1517 */ 1518 set_freepointer(s, p, NULL); 1519 ret = 0; 1520 } 1521 1522 return ret; 1523 } 1524 1525 /* 1526 * Checks if the slab state looks sane. Assumes the struct slab pointer 1527 * was either obtained in a way that ensures it's valid, or validated 1528 * by validate_slab_ptr() 1529 */ 1530 static int check_slab(struct kmem_cache *s, struct slab *slab) 1531 { 1532 int maxobj; 1533 1534 maxobj = order_objects(slab_order(slab), s->size); 1535 if (slab->objects > maxobj) { 1536 slab_err(s, slab, "objects %u > max %u", 1537 slab->objects, maxobj); 1538 return 0; 1539 } 1540 if (slab->inuse > slab->objects) { 1541 slab_err(s, slab, "inuse %u > max %u", 1542 slab->inuse, slab->objects); 1543 return 0; 1544 } 1545 if (slab->frozen) { 1546 slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed"); 1547 return 0; 1548 } 1549 1550 /* Slab_pad_check fixes things up after itself */ 1551 slab_pad_check(s, slab); 1552 return 1; 1553 } 1554 1555 /* 1556 * Determine if a certain object in a slab is on the freelist. Must hold the 1557 * slab lock to guarantee that the chains are in a consistent state. 1558 */ 1559 static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search) 1560 { 1561 int nr = 0; 1562 void *fp; 1563 void *object = NULL; 1564 int max_objects; 1565 1566 fp = slab->freelist; 1567 while (fp && nr <= slab->objects) { 1568 if (fp == search) 1569 return true; 1570 if (!check_valid_pointer(s, slab, fp)) { 1571 if (object) { 1572 object_err(s, slab, object, 1573 "Freechain corrupt"); 1574 set_freepointer(s, object, NULL); 1575 break; 1576 } else { 1577 slab_err(s, slab, "Freepointer corrupt"); 1578 slab->freelist = NULL; 1579 slab->inuse = slab->objects; 1580 slab_fix(s, "Freelist cleared"); 1581 return false; 1582 } 1583 } 1584 object = fp; 1585 fp = get_freepointer(s, object); 1586 nr++; 1587 } 1588 1589 if (nr > slab->objects) { 1590 slab_err(s, slab, "Freelist cycle detected"); 1591 slab->freelist = NULL; 1592 slab->inuse = slab->objects; 1593 slab_fix(s, "Freelist cleared"); 1594 return false; 1595 } 1596 1597 max_objects = order_objects(slab_order(slab), s->size); 1598 if (max_objects > MAX_OBJS_PER_PAGE) 1599 max_objects = MAX_OBJS_PER_PAGE; 1600 1601 if (slab->objects != max_objects) { 1602 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", 1603 slab->objects, max_objects); 1604 slab->objects = max_objects; 1605 slab_fix(s, "Number of objects adjusted"); 1606 } 1607 if (slab->inuse != slab->objects - nr) { 1608 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", 1609 slab->inuse, slab->objects - nr); 1610 slab->inuse = slab->objects - nr; 1611 slab_fix(s, "Object count adjusted"); 1612 } 1613 return search == NULL; 1614 } 1615 1616 static void trace(struct kmem_cache *s, struct slab *slab, void *object, 1617 int alloc) 1618 { 1619 if (s->flags & SLAB_TRACE) { 1620 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1621 s->name, 1622 alloc ? "alloc" : "free", 1623 object, slab->inuse, 1624 slab->freelist); 1625 1626 if (!alloc) 1627 print_section(KERN_INFO, "Object ", (void *)object, 1628 s->object_size); 1629 1630 dump_stack(); 1631 } 1632 } 1633 1634 /* 1635 * Tracking of fully allocated slabs for debugging purposes. 1636 */ 1637 static void add_full(struct kmem_cache *s, 1638 struct kmem_cache_node *n, struct slab *slab) 1639 { 1640 if (!(s->flags & SLAB_STORE_USER)) 1641 return; 1642 1643 lockdep_assert_held(&n->list_lock); 1644 list_add(&slab->slab_list, &n->full); 1645 } 1646 1647 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) 1648 { 1649 if (!(s->flags & SLAB_STORE_USER)) 1650 return; 1651 1652 lockdep_assert_held(&n->list_lock); 1653 list_del(&slab->slab_list); 1654 } 1655 1656 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1657 { 1658 return atomic_long_read(&n->nr_slabs); 1659 } 1660 1661 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1662 { 1663 struct kmem_cache_node *n = get_node(s, node); 1664 1665 atomic_long_inc(&n->nr_slabs); 1666 atomic_long_add(objects, &n->total_objects); 1667 } 1668 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1669 { 1670 struct kmem_cache_node *n = get_node(s, node); 1671 1672 atomic_long_dec(&n->nr_slabs); 1673 atomic_long_sub(objects, &n->total_objects); 1674 } 1675 1676 /* Object debug checks for alloc/free paths */ 1677 static void setup_object_debug(struct kmem_cache *s, void *object) 1678 { 1679 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1680 return; 1681 1682 init_object(s, object, SLUB_RED_INACTIVE); 1683 init_tracking(s, object); 1684 } 1685 1686 static 1687 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) 1688 { 1689 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1690 return; 1691 1692 metadata_access_enable(); 1693 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); 1694 metadata_access_disable(); 1695 } 1696 1697 static inline int alloc_consistency_checks(struct kmem_cache *s, 1698 struct slab *slab, void *object) 1699 { 1700 if (!check_slab(s, slab)) 1701 return 0; 1702 1703 if (!check_valid_pointer(s, slab, object)) { 1704 object_err(s, slab, object, "Freelist Pointer check fails"); 1705 return 0; 1706 } 1707 1708 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) 1709 return 0; 1710 1711 return 1; 1712 } 1713 1714 static noinline bool alloc_debug_processing(struct kmem_cache *s, 1715 struct slab *slab, void *object, int orig_size) 1716 { 1717 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1718 if (!alloc_consistency_checks(s, slab, object)) 1719 goto bad; 1720 } 1721 1722 /* Success. Perform special debug activities for allocs */ 1723 trace(s, slab, object, 1); 1724 set_orig_size(s, object, orig_size); 1725 init_object(s, object, SLUB_RED_ACTIVE); 1726 return true; 1727 1728 bad: 1729 /* 1730 * Let's do the best we can to avoid issues in the future. Marking all 1731 * objects as used avoids touching the remaining objects. 1732 */ 1733 slab_fix(s, "Marking all objects used"); 1734 slab->inuse = slab->objects; 1735 slab->freelist = NULL; 1736 slab->frozen = 1; /* mark consistency-failed slab as frozen */ 1737 1738 return false; 1739 } 1740 1741 static inline int free_consistency_checks(struct kmem_cache *s, 1742 struct slab *slab, void *object, unsigned long addr) 1743 { 1744 if (!check_valid_pointer(s, slab, object)) { 1745 slab_err(s, slab, "Invalid object pointer 0x%p", object); 1746 return 0; 1747 } 1748 1749 if (on_freelist(s, slab, object)) { 1750 object_err(s, slab, object, "Object already free"); 1751 return 0; 1752 } 1753 1754 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) 1755 return 0; 1756 1757 if (unlikely(s != slab->slab_cache)) { 1758 if (!slab->slab_cache) { 1759 slab_err(NULL, slab, "No slab cache for object 0x%p", 1760 object); 1761 } else { 1762 object_err(s, slab, object, 1763 "page slab pointer corrupt."); 1764 } 1765 return 0; 1766 } 1767 return 1; 1768 } 1769 1770 /* 1771 * Parse a block of slab_debug options. Blocks are delimited by ';' 1772 * 1773 * @str: start of block 1774 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1775 * @slabs: return start of list of slabs, or NULL when there's no list 1776 * @init: assume this is initial parsing and not per-kmem-create parsing 1777 * 1778 * returns the start of next block if there's any, or NULL 1779 */ 1780 static char * 1781 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1782 { 1783 bool higher_order_disable = false; 1784 1785 /* Skip any completely empty blocks */ 1786 while (*str && *str == ';') 1787 str++; 1788 1789 if (*str == ',') { 1790 /* 1791 * No options but restriction on slabs. This means full 1792 * debugging for slabs matching a pattern. 1793 */ 1794 *flags = DEBUG_DEFAULT_FLAGS; 1795 goto check_slabs; 1796 } 1797 *flags = 0; 1798 1799 /* Determine which debug features should be switched on */ 1800 for (; *str && *str != ',' && *str != ';'; str++) { 1801 switch (tolower(*str)) { 1802 case '-': 1803 *flags = 0; 1804 break; 1805 case 'f': 1806 *flags |= SLAB_CONSISTENCY_CHECKS; 1807 break; 1808 case 'z': 1809 *flags |= SLAB_RED_ZONE; 1810 break; 1811 case 'p': 1812 *flags |= SLAB_POISON; 1813 break; 1814 case 'u': 1815 *flags |= SLAB_STORE_USER; 1816 break; 1817 case 't': 1818 *flags |= SLAB_TRACE; 1819 break; 1820 case 'a': 1821 *flags |= SLAB_FAILSLAB; 1822 break; 1823 case 'o': 1824 /* 1825 * Avoid enabling debugging on caches if its minimum 1826 * order would increase as a result. 1827 */ 1828 higher_order_disable = true; 1829 break; 1830 default: 1831 if (init) 1832 pr_err("slab_debug option '%c' unknown. skipped\n", *str); 1833 } 1834 } 1835 check_slabs: 1836 if (*str == ',') 1837 *slabs = ++str; 1838 else 1839 *slabs = NULL; 1840 1841 /* Skip over the slab list */ 1842 while (*str && *str != ';') 1843 str++; 1844 1845 /* Skip any completely empty blocks */ 1846 while (*str && *str == ';') 1847 str++; 1848 1849 if (init && higher_order_disable) 1850 disable_higher_order_debug = 1; 1851 1852 if (*str) 1853 return str; 1854 else 1855 return NULL; 1856 } 1857 1858 static int __init setup_slub_debug(char *str) 1859 { 1860 slab_flags_t flags; 1861 slab_flags_t global_flags; 1862 char *saved_str; 1863 char *slab_list; 1864 bool global_slub_debug_changed = false; 1865 bool slab_list_specified = false; 1866 1867 global_flags = DEBUG_DEFAULT_FLAGS; 1868 if (*str++ != '=' || !*str) 1869 /* 1870 * No options specified. Switch on full debugging. 1871 */ 1872 goto out; 1873 1874 saved_str = str; 1875 while (str) { 1876 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1877 1878 if (!slab_list) { 1879 global_flags = flags; 1880 global_slub_debug_changed = true; 1881 } else { 1882 slab_list_specified = true; 1883 if (flags & SLAB_STORE_USER) 1884 stack_depot_request_early_init(); 1885 } 1886 } 1887 1888 /* 1889 * For backwards compatibility, a single list of flags with list of 1890 * slabs means debugging is only changed for those slabs, so the global 1891 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1892 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1893 * long as there is no option specifying flags without a slab list. 1894 */ 1895 if (slab_list_specified) { 1896 if (!global_slub_debug_changed) 1897 global_flags = slub_debug; 1898 slub_debug_string = saved_str; 1899 } 1900 out: 1901 slub_debug = global_flags; 1902 if (slub_debug & SLAB_STORE_USER) 1903 stack_depot_request_early_init(); 1904 if (slub_debug != 0 || slub_debug_string) 1905 static_branch_enable(&slub_debug_enabled); 1906 else 1907 static_branch_disable(&slub_debug_enabled); 1908 if ((static_branch_unlikely(&init_on_alloc) || 1909 static_branch_unlikely(&init_on_free)) && 1910 (slub_debug & SLAB_POISON)) 1911 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1912 return 1; 1913 } 1914 1915 __setup("slab_debug", setup_slub_debug); 1916 __setup_param("slub_debug", slub_debug, setup_slub_debug, 0); 1917 1918 /* 1919 * kmem_cache_flags - apply debugging options to the cache 1920 * @flags: flags to set 1921 * @name: name of the cache 1922 * 1923 * Debug option(s) are applied to @flags. In addition to the debug 1924 * option(s), if a slab name (or multiple) is specified i.e. 1925 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1926 * then only the select slabs will receive the debug option(s). 1927 */ 1928 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1929 { 1930 char *iter; 1931 size_t len; 1932 char *next_block; 1933 slab_flags_t block_flags; 1934 slab_flags_t slub_debug_local = slub_debug; 1935 1936 if (flags & SLAB_NO_USER_FLAGS) 1937 return flags; 1938 1939 /* 1940 * If the slab cache is for debugging (e.g. kmemleak) then 1941 * don't store user (stack trace) information by default, 1942 * but let the user enable it via the command line below. 1943 */ 1944 if (flags & SLAB_NOLEAKTRACE) 1945 slub_debug_local &= ~SLAB_STORE_USER; 1946 1947 len = strlen(name); 1948 next_block = slub_debug_string; 1949 /* Go through all blocks of debug options, see if any matches our slab's name */ 1950 while (next_block) { 1951 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1952 if (!iter) 1953 continue; 1954 /* Found a block that has a slab list, search it */ 1955 while (*iter) { 1956 char *end, *glob; 1957 size_t cmplen; 1958 1959 end = strchrnul(iter, ','); 1960 if (next_block && next_block < end) 1961 end = next_block - 1; 1962 1963 glob = strnchr(iter, end - iter, '*'); 1964 if (glob) 1965 cmplen = glob - iter; 1966 else 1967 cmplen = max_t(size_t, len, (end - iter)); 1968 1969 if (!strncmp(name, iter, cmplen)) { 1970 flags |= block_flags; 1971 return flags; 1972 } 1973 1974 if (!*end || *end == ';') 1975 break; 1976 iter = end + 1; 1977 } 1978 } 1979 1980 return flags | slub_debug_local; 1981 } 1982 #else /* !CONFIG_SLUB_DEBUG */ 1983 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} 1984 static inline 1985 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} 1986 1987 static inline bool alloc_debug_processing(struct kmem_cache *s, 1988 struct slab *slab, void *object, int orig_size) { return true; } 1989 1990 static inline bool free_debug_processing(struct kmem_cache *s, 1991 struct slab *slab, void *head, void *tail, int *bulk_cnt, 1992 unsigned long addr, depot_stack_handle_t handle) { return true; } 1993 1994 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} 1995 static inline int check_object(struct kmem_cache *s, struct slab *slab, 1996 void *object, u8 val) { return 1; } 1997 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; } 1998 static inline void set_track(struct kmem_cache *s, void *object, 1999 enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {} 2000 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 2001 struct slab *slab) {} 2002 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 2003 struct slab *slab) {} 2004 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 2005 { 2006 return flags; 2007 } 2008 #define slub_debug 0 2009 2010 #define disable_higher_order_debug 0 2011 2012 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 2013 { return 0; } 2014 static inline void inc_slabs_node(struct kmem_cache *s, int node, 2015 int objects) {} 2016 static inline void dec_slabs_node(struct kmem_cache *s, int node, 2017 int objects) {} 2018 #ifndef CONFIG_SLUB_TINY 2019 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 2020 void **freelist, void *nextfree) 2021 { 2022 return false; 2023 } 2024 #endif 2025 #endif /* CONFIG_SLUB_DEBUG */ 2026 2027 #ifdef CONFIG_SLAB_OBJ_EXT 2028 2029 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 2030 2031 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) 2032 { 2033 struct slabobj_ext *slab_exts; 2034 struct slab *obj_exts_slab; 2035 2036 obj_exts_slab = virt_to_slab(obj_exts); 2037 slab_exts = slab_obj_exts(obj_exts_slab); 2038 if (slab_exts) { 2039 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, 2040 obj_exts_slab, obj_exts); 2041 /* codetag should be NULL */ 2042 WARN_ON(slab_exts[offs].ref.ct); 2043 set_codetag_empty(&slab_exts[offs].ref); 2044 } 2045 } 2046 2047 static inline void mark_failed_objexts_alloc(struct slab *slab) 2048 { 2049 slab->obj_exts = OBJEXTS_ALLOC_FAIL; 2050 } 2051 2052 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 2053 struct slabobj_ext *vec, unsigned int objects) 2054 { 2055 /* 2056 * If vector previously failed to allocate then we have live 2057 * objects with no tag reference. Mark all references in this 2058 * vector as empty to avoid warnings later on. 2059 */ 2060 if (obj_exts == OBJEXTS_ALLOC_FAIL) { 2061 unsigned int i; 2062 2063 for (i = 0; i < objects; i++) 2064 set_codetag_empty(&vec[i].ref); 2065 } 2066 } 2067 2068 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 2069 2070 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} 2071 static inline void mark_failed_objexts_alloc(struct slab *slab) {} 2072 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 2073 struct slabobj_ext *vec, unsigned int objects) {} 2074 2075 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 2076 2077 /* 2078 * The allocated objcg pointers array is not accounted directly. 2079 * Moreover, it should not come from DMA buffer and is not readily 2080 * reclaimable. So those GFP bits should be masked off. 2081 */ 2082 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ 2083 __GFP_ACCOUNT | __GFP_NOFAIL) 2084 2085 static inline void init_slab_obj_exts(struct slab *slab) 2086 { 2087 slab->obj_exts = 0; 2088 } 2089 2090 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 2091 gfp_t gfp, bool new_slab) 2092 { 2093 bool allow_spin = gfpflags_allow_spinning(gfp); 2094 unsigned int objects = objs_per_slab(s, slab); 2095 unsigned long new_exts; 2096 unsigned long old_exts; 2097 struct slabobj_ext *vec; 2098 2099 gfp &= ~OBJCGS_CLEAR_MASK; 2100 /* Prevent recursive extension vector allocation */ 2101 gfp |= __GFP_NO_OBJ_EXT; 2102 2103 /* 2104 * Note that allow_spin may be false during early boot and its 2105 * restricted GFP_BOOT_MASK. Due to kmalloc_nolock() only supporting 2106 * architectures with cmpxchg16b, early obj_exts will be missing for 2107 * very early allocations on those. 2108 */ 2109 if (unlikely(!allow_spin)) { 2110 size_t sz = objects * sizeof(struct slabobj_ext); 2111 2112 vec = kmalloc_nolock(sz, __GFP_ZERO | __GFP_NO_OBJ_EXT, 2113 slab_nid(slab)); 2114 } else { 2115 vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, 2116 slab_nid(slab)); 2117 } 2118 if (!vec) { 2119 /* Mark vectors which failed to allocate */ 2120 mark_failed_objexts_alloc(slab); 2121 2122 return -ENOMEM; 2123 } 2124 2125 new_exts = (unsigned long)vec; 2126 if (unlikely(!allow_spin)) 2127 new_exts |= OBJEXTS_NOSPIN_ALLOC; 2128 #ifdef CONFIG_MEMCG 2129 new_exts |= MEMCG_DATA_OBJEXTS; 2130 #endif 2131 old_exts = READ_ONCE(slab->obj_exts); 2132 handle_failed_objexts_alloc(old_exts, vec, objects); 2133 if (new_slab) { 2134 /* 2135 * If the slab is brand new and nobody can yet access its 2136 * obj_exts, no synchronization is required and obj_exts can 2137 * be simply assigned. 2138 */ 2139 slab->obj_exts = new_exts; 2140 } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) || 2141 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { 2142 /* 2143 * If the slab is already in use, somebody can allocate and 2144 * assign slabobj_exts in parallel. In this case the existing 2145 * objcg vector should be reused. 2146 */ 2147 mark_objexts_empty(vec); 2148 if (unlikely(!allow_spin)) 2149 kfree_nolock(vec); 2150 else 2151 kfree(vec); 2152 return 0; 2153 } 2154 2155 if (allow_spin) 2156 kmemleak_not_leak(vec); 2157 return 0; 2158 } 2159 2160 static inline void free_slab_obj_exts(struct slab *slab) 2161 { 2162 struct slabobj_ext *obj_exts; 2163 2164 obj_exts = slab_obj_exts(slab); 2165 if (!obj_exts) 2166 return; 2167 2168 /* 2169 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its 2170 * corresponding extension will be NULL. alloc_tag_sub() will throw a 2171 * warning if slab has extensions but the extension of an object is 2172 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that 2173 * the extension for obj_exts is expected to be NULL. 2174 */ 2175 mark_objexts_empty(obj_exts); 2176 if (unlikely(READ_ONCE(slab->obj_exts) & OBJEXTS_NOSPIN_ALLOC)) 2177 kfree_nolock(obj_exts); 2178 else 2179 kfree(obj_exts); 2180 slab->obj_exts = 0; 2181 } 2182 2183 #else /* CONFIG_SLAB_OBJ_EXT */ 2184 2185 static inline void init_slab_obj_exts(struct slab *slab) 2186 { 2187 } 2188 2189 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 2190 gfp_t gfp, bool new_slab) 2191 { 2192 return 0; 2193 } 2194 2195 static inline void free_slab_obj_exts(struct slab *slab) 2196 { 2197 } 2198 2199 #endif /* CONFIG_SLAB_OBJ_EXT */ 2200 2201 #ifdef CONFIG_MEM_ALLOC_PROFILING 2202 2203 static inline struct slabobj_ext * 2204 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) 2205 { 2206 struct slab *slab; 2207 2208 slab = virt_to_slab(p); 2209 if (!slab_obj_exts(slab) && 2210 alloc_slab_obj_exts(slab, s, flags, false)) { 2211 pr_warn_once("%s, %s: Failed to create slab extension vector!\n", 2212 __func__, s->name); 2213 return NULL; 2214 } 2215 2216 return slab_obj_exts(slab) + obj_to_index(s, slab, p); 2217 } 2218 2219 /* Should be called only if mem_alloc_profiling_enabled() */ 2220 static noinline void 2221 __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2222 { 2223 struct slabobj_ext *obj_exts; 2224 2225 if (!object) 2226 return; 2227 2228 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2229 return; 2230 2231 if (flags & __GFP_NO_OBJ_EXT) 2232 return; 2233 2234 obj_exts = prepare_slab_obj_exts_hook(s, flags, object); 2235 /* 2236 * Currently obj_exts is used only for allocation profiling. 2237 * If other users appear then mem_alloc_profiling_enabled() 2238 * check should be added before alloc_tag_add(). 2239 */ 2240 if (likely(obj_exts)) 2241 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); 2242 else 2243 alloc_tag_set_inaccurate(current->alloc_tag); 2244 } 2245 2246 static inline void 2247 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2248 { 2249 if (mem_alloc_profiling_enabled()) 2250 __alloc_tagging_slab_alloc_hook(s, object, flags); 2251 } 2252 2253 /* Should be called only if mem_alloc_profiling_enabled() */ 2254 static noinline void 2255 __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2256 int objects) 2257 { 2258 struct slabobj_ext *obj_exts; 2259 int i; 2260 2261 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ 2262 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2263 return; 2264 2265 obj_exts = slab_obj_exts(slab); 2266 if (!obj_exts) 2267 return; 2268 2269 for (i = 0; i < objects; i++) { 2270 unsigned int off = obj_to_index(s, slab, p[i]); 2271 2272 alloc_tag_sub(&obj_exts[off].ref, s->size); 2273 } 2274 } 2275 2276 static inline void 2277 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2278 int objects) 2279 { 2280 if (mem_alloc_profiling_enabled()) 2281 __alloc_tagging_slab_free_hook(s, slab, p, objects); 2282 } 2283 2284 #else /* CONFIG_MEM_ALLOC_PROFILING */ 2285 2286 static inline void 2287 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2288 { 2289 } 2290 2291 static inline void 2292 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2293 int objects) 2294 { 2295 } 2296 2297 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 2298 2299 2300 #ifdef CONFIG_MEMCG 2301 2302 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object); 2303 2304 static __fastpath_inline 2305 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 2306 gfp_t flags, size_t size, void **p) 2307 { 2308 if (likely(!memcg_kmem_online())) 2309 return true; 2310 2311 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))) 2312 return true; 2313 2314 if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p))) 2315 return true; 2316 2317 if (likely(size == 1)) { 2318 memcg_alloc_abort_single(s, *p); 2319 *p = NULL; 2320 } else { 2321 kmem_cache_free_bulk(s, size, p); 2322 } 2323 2324 return false; 2325 } 2326 2327 static __fastpath_inline 2328 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2329 int objects) 2330 { 2331 struct slabobj_ext *obj_exts; 2332 2333 if (!memcg_kmem_online()) 2334 return; 2335 2336 obj_exts = slab_obj_exts(slab); 2337 if (likely(!obj_exts)) 2338 return; 2339 2340 __memcg_slab_free_hook(s, slab, p, objects, obj_exts); 2341 } 2342 2343 static __fastpath_inline 2344 bool memcg_slab_post_charge(void *p, gfp_t flags) 2345 { 2346 struct slabobj_ext *slab_exts; 2347 struct kmem_cache *s; 2348 struct folio *folio; 2349 struct slab *slab; 2350 unsigned long off; 2351 2352 folio = virt_to_folio(p); 2353 if (!folio_test_slab(folio)) { 2354 int size; 2355 2356 if (folio_memcg_kmem(folio)) 2357 return true; 2358 2359 if (__memcg_kmem_charge_page(folio_page(folio, 0), flags, 2360 folio_order(folio))) 2361 return false; 2362 2363 /* 2364 * This folio has already been accounted in the global stats but 2365 * not in the memcg stats. So, subtract from the global and use 2366 * the interface which adds to both global and memcg stats. 2367 */ 2368 size = folio_size(folio); 2369 node_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -size); 2370 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, size); 2371 return true; 2372 } 2373 2374 slab = folio_slab(folio); 2375 s = slab->slab_cache; 2376 2377 /* 2378 * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency 2379 * of slab_obj_exts being allocated from the same slab and thus the slab 2380 * becoming effectively unfreeable. 2381 */ 2382 if (is_kmalloc_normal(s)) 2383 return true; 2384 2385 /* Ignore already charged objects. */ 2386 slab_exts = slab_obj_exts(slab); 2387 if (slab_exts) { 2388 off = obj_to_index(s, slab, p); 2389 if (unlikely(slab_exts[off].objcg)) 2390 return true; 2391 } 2392 2393 return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p); 2394 } 2395 2396 #else /* CONFIG_MEMCG */ 2397 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s, 2398 struct list_lru *lru, 2399 gfp_t flags, size_t size, 2400 void **p) 2401 { 2402 return true; 2403 } 2404 2405 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 2406 void **p, int objects) 2407 { 2408 } 2409 2410 static inline bool memcg_slab_post_charge(void *p, gfp_t flags) 2411 { 2412 return true; 2413 } 2414 #endif /* CONFIG_MEMCG */ 2415 2416 #ifdef CONFIG_SLUB_RCU_DEBUG 2417 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head); 2418 2419 struct rcu_delayed_free { 2420 struct rcu_head head; 2421 void *object; 2422 }; 2423 #endif 2424 2425 /* 2426 * Hooks for other subsystems that check memory allocations. In a typical 2427 * production configuration these hooks all should produce no code at all. 2428 * 2429 * Returns true if freeing of the object can proceed, false if its reuse 2430 * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned 2431 * to KFENCE. 2432 */ 2433 static __always_inline 2434 bool slab_free_hook(struct kmem_cache *s, void *x, bool init, 2435 bool after_rcu_delay) 2436 { 2437 /* Are the object contents still accessible? */ 2438 bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay; 2439 2440 kmemleak_free_recursive(x, s->flags); 2441 kmsan_slab_free(s, x); 2442 2443 debug_check_no_locks_freed(x, s->object_size); 2444 2445 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 2446 debug_check_no_obj_freed(x, s->object_size); 2447 2448 /* Use KCSAN to help debug racy use-after-free. */ 2449 if (!still_accessible) 2450 __kcsan_check_access(x, s->object_size, 2451 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 2452 2453 if (kfence_free(x)) 2454 return false; 2455 2456 /* 2457 * Give KASAN a chance to notice an invalid free operation before we 2458 * modify the object. 2459 */ 2460 if (kasan_slab_pre_free(s, x)) 2461 return false; 2462 2463 #ifdef CONFIG_SLUB_RCU_DEBUG 2464 if (still_accessible) { 2465 struct rcu_delayed_free *delayed_free; 2466 2467 delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT); 2468 if (delayed_free) { 2469 /* 2470 * Let KASAN track our call stack as a "related work 2471 * creation", just like if the object had been freed 2472 * normally via kfree_rcu(). 2473 * We have to do this manually because the rcu_head is 2474 * not located inside the object. 2475 */ 2476 kasan_record_aux_stack(x); 2477 2478 delayed_free->object = x; 2479 call_rcu(&delayed_free->head, slab_free_after_rcu_debug); 2480 return false; 2481 } 2482 } 2483 #endif /* CONFIG_SLUB_RCU_DEBUG */ 2484 2485 /* 2486 * As memory initialization might be integrated into KASAN, 2487 * kasan_slab_free and initialization memset's must be 2488 * kept together to avoid discrepancies in behavior. 2489 * 2490 * The initialization memset's clear the object and the metadata, 2491 * but don't touch the SLAB redzone. 2492 * 2493 * The object's freepointer is also avoided if stored outside the 2494 * object. 2495 */ 2496 if (unlikely(init)) { 2497 int rsize; 2498 unsigned int inuse, orig_size; 2499 2500 inuse = get_info_end(s); 2501 orig_size = get_orig_size(s, x); 2502 if (!kasan_has_integrated_init()) 2503 memset(kasan_reset_tag(x), 0, orig_size); 2504 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 2505 memset((char *)kasan_reset_tag(x) + inuse, 0, 2506 s->size - inuse - rsize); 2507 /* 2508 * Restore orig_size, otherwize kmalloc redzone overwritten 2509 * would be reported 2510 */ 2511 set_orig_size(s, x, orig_size); 2512 2513 } 2514 /* KASAN might put x into memory quarantine, delaying its reuse. */ 2515 return !kasan_slab_free(s, x, init, still_accessible, false); 2516 } 2517 2518 static __fastpath_inline 2519 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail, 2520 int *cnt) 2521 { 2522 2523 void *object; 2524 void *next = *head; 2525 void *old_tail = *tail; 2526 bool init; 2527 2528 if (is_kfence_address(next)) { 2529 slab_free_hook(s, next, false, false); 2530 return false; 2531 } 2532 2533 /* Head and tail of the reconstructed freelist */ 2534 *head = NULL; 2535 *tail = NULL; 2536 2537 init = slab_want_init_on_free(s); 2538 2539 do { 2540 object = next; 2541 next = get_freepointer(s, object); 2542 2543 /* If object's reuse doesn't have to be delayed */ 2544 if (likely(slab_free_hook(s, object, init, false))) { 2545 /* Move object to the new freelist */ 2546 set_freepointer(s, object, *head); 2547 *head = object; 2548 if (!*tail) 2549 *tail = object; 2550 } else { 2551 /* 2552 * Adjust the reconstructed freelist depth 2553 * accordingly if object's reuse is delayed. 2554 */ 2555 --(*cnt); 2556 } 2557 } while (object != old_tail); 2558 2559 return *head != NULL; 2560 } 2561 2562 static void *setup_object(struct kmem_cache *s, void *object) 2563 { 2564 setup_object_debug(s, object); 2565 object = kasan_init_slab_obj(s, object); 2566 if (unlikely(s->ctor)) { 2567 kasan_unpoison_new_object(s, object); 2568 s->ctor(object); 2569 kasan_poison_new_object(s, object); 2570 } 2571 return object; 2572 } 2573 2574 static struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp) 2575 { 2576 struct slab_sheaf *sheaf = kzalloc(struct_size(sheaf, objects, 2577 s->sheaf_capacity), gfp); 2578 2579 if (unlikely(!sheaf)) 2580 return NULL; 2581 2582 sheaf->cache = s; 2583 2584 stat(s, SHEAF_ALLOC); 2585 2586 return sheaf; 2587 } 2588 2589 static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf) 2590 { 2591 kfree(sheaf); 2592 2593 stat(s, SHEAF_FREE); 2594 } 2595 2596 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 2597 size_t size, void **p); 2598 2599 2600 static int refill_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf, 2601 gfp_t gfp) 2602 { 2603 int to_fill = s->sheaf_capacity - sheaf->size; 2604 int filled; 2605 2606 if (!to_fill) 2607 return 0; 2608 2609 filled = __kmem_cache_alloc_bulk(s, gfp, to_fill, 2610 &sheaf->objects[sheaf->size]); 2611 2612 sheaf->size += filled; 2613 2614 stat_add(s, SHEAF_REFILL, filled); 2615 2616 if (filled < to_fill) 2617 return -ENOMEM; 2618 2619 return 0; 2620 } 2621 2622 2623 static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp) 2624 { 2625 struct slab_sheaf *sheaf = alloc_empty_sheaf(s, gfp); 2626 2627 if (!sheaf) 2628 return NULL; 2629 2630 if (refill_sheaf(s, sheaf, gfp)) { 2631 free_empty_sheaf(s, sheaf); 2632 return NULL; 2633 } 2634 2635 return sheaf; 2636 } 2637 2638 /* 2639 * Maximum number of objects freed during a single flush of main pcs sheaf. 2640 * Translates directly to an on-stack array size. 2641 */ 2642 #define PCS_BATCH_MAX 32U 2643 2644 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); 2645 2646 /* 2647 * Free all objects from the main sheaf. In order to perform 2648 * __kmem_cache_free_bulk() outside of cpu_sheaves->lock, work in batches where 2649 * object pointers are moved to a on-stack array under the lock. To bound the 2650 * stack usage, limit each batch to PCS_BATCH_MAX. 2651 * 2652 * returns true if at least partially flushed 2653 */ 2654 static bool sheaf_flush_main(struct kmem_cache *s) 2655 { 2656 struct slub_percpu_sheaves *pcs; 2657 unsigned int batch, remaining; 2658 void *objects[PCS_BATCH_MAX]; 2659 struct slab_sheaf *sheaf; 2660 bool ret = false; 2661 2662 next_batch: 2663 if (!local_trylock(&s->cpu_sheaves->lock)) 2664 return ret; 2665 2666 pcs = this_cpu_ptr(s->cpu_sheaves); 2667 sheaf = pcs->main; 2668 2669 batch = min(PCS_BATCH_MAX, sheaf->size); 2670 2671 sheaf->size -= batch; 2672 memcpy(objects, sheaf->objects + sheaf->size, batch * sizeof(void *)); 2673 2674 remaining = sheaf->size; 2675 2676 local_unlock(&s->cpu_sheaves->lock); 2677 2678 __kmem_cache_free_bulk(s, batch, &objects[0]); 2679 2680 stat_add(s, SHEAF_FLUSH, batch); 2681 2682 ret = true; 2683 2684 if (remaining) 2685 goto next_batch; 2686 2687 return ret; 2688 } 2689 2690 /* 2691 * Free all objects from a sheaf that's unused, i.e. not linked to any 2692 * cpu_sheaves, so we need no locking and batching. The locking is also not 2693 * necessary when flushing cpu's sheaves (both spare and main) during cpu 2694 * hotremove as the cpu is not executing anymore. 2695 */ 2696 static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf) 2697 { 2698 if (!sheaf->size) 2699 return; 2700 2701 stat_add(s, SHEAF_FLUSH, sheaf->size); 2702 2703 __kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]); 2704 2705 sheaf->size = 0; 2706 } 2707 2708 static void __rcu_free_sheaf_prepare(struct kmem_cache *s, 2709 struct slab_sheaf *sheaf) 2710 { 2711 bool init = slab_want_init_on_free(s); 2712 void **p = &sheaf->objects[0]; 2713 unsigned int i = 0; 2714 2715 while (i < sheaf->size) { 2716 struct slab *slab = virt_to_slab(p[i]); 2717 2718 memcg_slab_free_hook(s, slab, p + i, 1); 2719 alloc_tagging_slab_free_hook(s, slab, p + i, 1); 2720 2721 if (unlikely(!slab_free_hook(s, p[i], init, true))) { 2722 p[i] = p[--sheaf->size]; 2723 continue; 2724 } 2725 2726 i++; 2727 } 2728 } 2729 2730 static void rcu_free_sheaf_nobarn(struct rcu_head *head) 2731 { 2732 struct slab_sheaf *sheaf; 2733 struct kmem_cache *s; 2734 2735 sheaf = container_of(head, struct slab_sheaf, rcu_head); 2736 s = sheaf->cache; 2737 2738 __rcu_free_sheaf_prepare(s, sheaf); 2739 2740 sheaf_flush_unused(s, sheaf); 2741 2742 free_empty_sheaf(s, sheaf); 2743 } 2744 2745 /* 2746 * Caller needs to make sure migration is disabled in order to fully flush 2747 * single cpu's sheaves 2748 * 2749 * must not be called from an irq 2750 * 2751 * flushing operations are rare so let's keep it simple and flush to slabs 2752 * directly, skipping the barn 2753 */ 2754 static void pcs_flush_all(struct kmem_cache *s) 2755 { 2756 struct slub_percpu_sheaves *pcs; 2757 struct slab_sheaf *spare, *rcu_free; 2758 2759 local_lock(&s->cpu_sheaves->lock); 2760 pcs = this_cpu_ptr(s->cpu_sheaves); 2761 2762 spare = pcs->spare; 2763 pcs->spare = NULL; 2764 2765 rcu_free = pcs->rcu_free; 2766 pcs->rcu_free = NULL; 2767 2768 local_unlock(&s->cpu_sheaves->lock); 2769 2770 if (spare) { 2771 sheaf_flush_unused(s, spare); 2772 free_empty_sheaf(s, spare); 2773 } 2774 2775 if (rcu_free) 2776 call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn); 2777 2778 sheaf_flush_main(s); 2779 } 2780 2781 static void __pcs_flush_all_cpu(struct kmem_cache *s, unsigned int cpu) 2782 { 2783 struct slub_percpu_sheaves *pcs; 2784 2785 pcs = per_cpu_ptr(s->cpu_sheaves, cpu); 2786 2787 /* The cpu is not executing anymore so we don't need pcs->lock */ 2788 sheaf_flush_unused(s, pcs->main); 2789 if (pcs->spare) { 2790 sheaf_flush_unused(s, pcs->spare); 2791 free_empty_sheaf(s, pcs->spare); 2792 pcs->spare = NULL; 2793 } 2794 2795 if (pcs->rcu_free) { 2796 call_rcu(&pcs->rcu_free->rcu_head, rcu_free_sheaf_nobarn); 2797 pcs->rcu_free = NULL; 2798 } 2799 } 2800 2801 static void pcs_destroy(struct kmem_cache *s) 2802 { 2803 int cpu; 2804 2805 for_each_possible_cpu(cpu) { 2806 struct slub_percpu_sheaves *pcs; 2807 2808 pcs = per_cpu_ptr(s->cpu_sheaves, cpu); 2809 2810 /* can happen when unwinding failed create */ 2811 if (!pcs->main) 2812 continue; 2813 2814 /* 2815 * We have already passed __kmem_cache_shutdown() so everything 2816 * was flushed and there should be no objects allocated from 2817 * slabs, otherwise kmem_cache_destroy() would have aborted. 2818 * Therefore something would have to be really wrong if the 2819 * warnings here trigger, and we should rather leave objects and 2820 * sheaves to leak in that case. 2821 */ 2822 2823 WARN_ON(pcs->spare); 2824 WARN_ON(pcs->rcu_free); 2825 2826 if (!WARN_ON(pcs->main->size)) { 2827 free_empty_sheaf(s, pcs->main); 2828 pcs->main = NULL; 2829 } 2830 } 2831 2832 free_percpu(s->cpu_sheaves); 2833 s->cpu_sheaves = NULL; 2834 } 2835 2836 static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn) 2837 { 2838 struct slab_sheaf *empty = NULL; 2839 unsigned long flags; 2840 2841 if (!data_race(barn->nr_empty)) 2842 return NULL; 2843 2844 spin_lock_irqsave(&barn->lock, flags); 2845 2846 if (likely(barn->nr_empty)) { 2847 empty = list_first_entry(&barn->sheaves_empty, 2848 struct slab_sheaf, barn_list); 2849 list_del(&empty->barn_list); 2850 barn->nr_empty--; 2851 } 2852 2853 spin_unlock_irqrestore(&barn->lock, flags); 2854 2855 return empty; 2856 } 2857 2858 /* 2859 * The following two functions are used mainly in cases where we have to undo an 2860 * intended action due to a race or cpu migration. Thus they do not check the 2861 * empty or full sheaf limits for simplicity. 2862 */ 2863 2864 static void barn_put_empty_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf) 2865 { 2866 unsigned long flags; 2867 2868 spin_lock_irqsave(&barn->lock, flags); 2869 2870 list_add(&sheaf->barn_list, &barn->sheaves_empty); 2871 barn->nr_empty++; 2872 2873 spin_unlock_irqrestore(&barn->lock, flags); 2874 } 2875 2876 static void barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf) 2877 { 2878 unsigned long flags; 2879 2880 spin_lock_irqsave(&barn->lock, flags); 2881 2882 list_add(&sheaf->barn_list, &barn->sheaves_full); 2883 barn->nr_full++; 2884 2885 spin_unlock_irqrestore(&barn->lock, flags); 2886 } 2887 2888 static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn) 2889 { 2890 struct slab_sheaf *sheaf = NULL; 2891 unsigned long flags; 2892 2893 if (!data_race(barn->nr_full) && !data_race(barn->nr_empty)) 2894 return NULL; 2895 2896 spin_lock_irqsave(&barn->lock, flags); 2897 2898 if (barn->nr_full) { 2899 sheaf = list_first_entry(&barn->sheaves_full, struct slab_sheaf, 2900 barn_list); 2901 list_del(&sheaf->barn_list); 2902 barn->nr_full--; 2903 } else if (barn->nr_empty) { 2904 sheaf = list_first_entry(&barn->sheaves_empty, 2905 struct slab_sheaf, barn_list); 2906 list_del(&sheaf->barn_list); 2907 barn->nr_empty--; 2908 } 2909 2910 spin_unlock_irqrestore(&barn->lock, flags); 2911 2912 return sheaf; 2913 } 2914 2915 /* 2916 * If a full sheaf is available, return it and put the supplied empty one to 2917 * barn. We ignore the limit on empty sheaves as the number of sheaves doesn't 2918 * change. 2919 */ 2920 static struct slab_sheaf * 2921 barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty) 2922 { 2923 struct slab_sheaf *full = NULL; 2924 unsigned long flags; 2925 2926 if (!data_race(barn->nr_full)) 2927 return NULL; 2928 2929 spin_lock_irqsave(&barn->lock, flags); 2930 2931 if (likely(barn->nr_full)) { 2932 full = list_first_entry(&barn->sheaves_full, struct slab_sheaf, 2933 barn_list); 2934 list_del(&full->barn_list); 2935 list_add(&empty->barn_list, &barn->sheaves_empty); 2936 barn->nr_full--; 2937 barn->nr_empty++; 2938 } 2939 2940 spin_unlock_irqrestore(&barn->lock, flags); 2941 2942 return full; 2943 } 2944 2945 /* 2946 * If an empty sheaf is available, return it and put the supplied full one to 2947 * barn. But if there are too many full sheaves, reject this with -E2BIG. 2948 */ 2949 static struct slab_sheaf * 2950 barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full) 2951 { 2952 struct slab_sheaf *empty; 2953 unsigned long flags; 2954 2955 /* we don't repeat this check under barn->lock as it's not critical */ 2956 if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES) 2957 return ERR_PTR(-E2BIG); 2958 if (!data_race(barn->nr_empty)) 2959 return ERR_PTR(-ENOMEM); 2960 2961 spin_lock_irqsave(&barn->lock, flags); 2962 2963 if (likely(barn->nr_empty)) { 2964 empty = list_first_entry(&barn->sheaves_empty, struct slab_sheaf, 2965 barn_list); 2966 list_del(&empty->barn_list); 2967 list_add(&full->barn_list, &barn->sheaves_full); 2968 barn->nr_empty--; 2969 barn->nr_full++; 2970 } else { 2971 empty = ERR_PTR(-ENOMEM); 2972 } 2973 2974 spin_unlock_irqrestore(&barn->lock, flags); 2975 2976 return empty; 2977 } 2978 2979 static void barn_init(struct node_barn *barn) 2980 { 2981 spin_lock_init(&barn->lock); 2982 INIT_LIST_HEAD(&barn->sheaves_full); 2983 INIT_LIST_HEAD(&barn->sheaves_empty); 2984 barn->nr_full = 0; 2985 barn->nr_empty = 0; 2986 } 2987 2988 static void barn_shrink(struct kmem_cache *s, struct node_barn *barn) 2989 { 2990 struct list_head empty_list; 2991 struct list_head full_list; 2992 struct slab_sheaf *sheaf, *sheaf2; 2993 unsigned long flags; 2994 2995 INIT_LIST_HEAD(&empty_list); 2996 INIT_LIST_HEAD(&full_list); 2997 2998 spin_lock_irqsave(&barn->lock, flags); 2999 3000 list_splice_init(&barn->sheaves_full, &full_list); 3001 barn->nr_full = 0; 3002 list_splice_init(&barn->sheaves_empty, &empty_list); 3003 barn->nr_empty = 0; 3004 3005 spin_unlock_irqrestore(&barn->lock, flags); 3006 3007 list_for_each_entry_safe(sheaf, sheaf2, &full_list, barn_list) { 3008 sheaf_flush_unused(s, sheaf); 3009 free_empty_sheaf(s, sheaf); 3010 } 3011 3012 list_for_each_entry_safe(sheaf, sheaf2, &empty_list, barn_list) 3013 free_empty_sheaf(s, sheaf); 3014 } 3015 3016 /* 3017 * Slab allocation and freeing 3018 */ 3019 static inline struct slab *alloc_slab_page(gfp_t flags, int node, 3020 struct kmem_cache_order_objects oo, 3021 bool allow_spin) 3022 { 3023 struct folio *folio; 3024 struct slab *slab; 3025 unsigned int order = oo_order(oo); 3026 3027 if (unlikely(!allow_spin)) 3028 folio = (struct folio *)alloc_frozen_pages_nolock(0/* __GFP_COMP is implied */, 3029 node, order); 3030 else if (node == NUMA_NO_NODE) 3031 folio = (struct folio *)alloc_frozen_pages(flags, order); 3032 else 3033 folio = (struct folio *)__alloc_frozen_pages(flags, order, node, NULL); 3034 3035 if (!folio) 3036 return NULL; 3037 3038 slab = folio_slab(folio); 3039 __folio_set_slab(folio); 3040 if (folio_is_pfmemalloc(folio)) 3041 slab_set_pfmemalloc(slab); 3042 3043 return slab; 3044 } 3045 3046 #ifdef CONFIG_SLAB_FREELIST_RANDOM 3047 /* Pre-initialize the random sequence cache */ 3048 static int init_cache_random_seq(struct kmem_cache *s) 3049 { 3050 unsigned int count = oo_objects(s->oo); 3051 int err; 3052 3053 /* Bailout if already initialised */ 3054 if (s->random_seq) 3055 return 0; 3056 3057 err = cache_random_seq_create(s, count, GFP_KERNEL); 3058 if (err) { 3059 pr_err("SLUB: Unable to initialize free list for %s\n", 3060 s->name); 3061 return err; 3062 } 3063 3064 /* Transform to an offset on the set of pages */ 3065 if (s->random_seq) { 3066 unsigned int i; 3067 3068 for (i = 0; i < count; i++) 3069 s->random_seq[i] *= s->size; 3070 } 3071 return 0; 3072 } 3073 3074 /* Initialize each random sequence freelist per cache */ 3075 static void __init init_freelist_randomization(void) 3076 { 3077 struct kmem_cache *s; 3078 3079 mutex_lock(&slab_mutex); 3080 3081 list_for_each_entry(s, &slab_caches, list) 3082 init_cache_random_seq(s); 3083 3084 mutex_unlock(&slab_mutex); 3085 } 3086 3087 /* Get the next entry on the pre-computed freelist randomized */ 3088 static void *next_freelist_entry(struct kmem_cache *s, 3089 unsigned long *pos, void *start, 3090 unsigned long page_limit, 3091 unsigned long freelist_count) 3092 { 3093 unsigned int idx; 3094 3095 /* 3096 * If the target page allocation failed, the number of objects on the 3097 * page might be smaller than the usual size defined by the cache. 3098 */ 3099 do { 3100 idx = s->random_seq[*pos]; 3101 *pos += 1; 3102 if (*pos >= freelist_count) 3103 *pos = 0; 3104 } while (unlikely(idx >= page_limit)); 3105 3106 return (char *)start + idx; 3107 } 3108 3109 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 3110 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 3111 { 3112 void *start; 3113 void *cur; 3114 void *next; 3115 unsigned long idx, pos, page_limit, freelist_count; 3116 3117 if (slab->objects < 2 || !s->random_seq) 3118 return false; 3119 3120 freelist_count = oo_objects(s->oo); 3121 pos = get_random_u32_below(freelist_count); 3122 3123 page_limit = slab->objects * s->size; 3124 start = fixup_red_left(s, slab_address(slab)); 3125 3126 /* First entry is used as the base of the freelist */ 3127 cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count); 3128 cur = setup_object(s, cur); 3129 slab->freelist = cur; 3130 3131 for (idx = 1; idx < slab->objects; idx++) { 3132 next = next_freelist_entry(s, &pos, start, page_limit, 3133 freelist_count); 3134 next = setup_object(s, next); 3135 set_freepointer(s, cur, next); 3136 cur = next; 3137 } 3138 set_freepointer(s, cur, NULL); 3139 3140 return true; 3141 } 3142 #else 3143 static inline int init_cache_random_seq(struct kmem_cache *s) 3144 { 3145 return 0; 3146 } 3147 static inline void init_freelist_randomization(void) { } 3148 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 3149 { 3150 return false; 3151 } 3152 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 3153 3154 static __always_inline void account_slab(struct slab *slab, int order, 3155 struct kmem_cache *s, gfp_t gfp) 3156 { 3157 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 3158 alloc_slab_obj_exts(slab, s, gfp, true); 3159 3160 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 3161 PAGE_SIZE << order); 3162 } 3163 3164 static __always_inline void unaccount_slab(struct slab *slab, int order, 3165 struct kmem_cache *s) 3166 { 3167 /* 3168 * The slab object extensions should now be freed regardless of 3169 * whether mem_alloc_profiling_enabled() or not because profiling 3170 * might have been disabled after slab->obj_exts got allocated. 3171 */ 3172 free_slab_obj_exts(slab); 3173 3174 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 3175 -(PAGE_SIZE << order)); 3176 } 3177 3178 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 3179 { 3180 bool allow_spin = gfpflags_allow_spinning(flags); 3181 struct slab *slab; 3182 struct kmem_cache_order_objects oo = s->oo; 3183 gfp_t alloc_gfp; 3184 void *start, *p, *next; 3185 int idx; 3186 bool shuffle; 3187 3188 flags &= gfp_allowed_mask; 3189 3190 flags |= s->allocflags; 3191 3192 /* 3193 * Let the initial higher-order allocation fail under memory pressure 3194 * so we fall-back to the minimum order allocation. 3195 */ 3196 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 3197 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 3198 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; 3199 3200 /* 3201 * __GFP_RECLAIM could be cleared on the first allocation attempt, 3202 * so pass allow_spin flag directly. 3203 */ 3204 slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin); 3205 if (unlikely(!slab)) { 3206 oo = s->min; 3207 alloc_gfp = flags; 3208 /* 3209 * Allocation may have failed due to fragmentation. 3210 * Try a lower order alloc if possible 3211 */ 3212 slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin); 3213 if (unlikely(!slab)) 3214 return NULL; 3215 stat(s, ORDER_FALLBACK); 3216 } 3217 3218 slab->objects = oo_objects(oo); 3219 slab->inuse = 0; 3220 slab->frozen = 0; 3221 init_slab_obj_exts(slab); 3222 3223 account_slab(slab, oo_order(oo), s, flags); 3224 3225 slab->slab_cache = s; 3226 3227 kasan_poison_slab(slab); 3228 3229 start = slab_address(slab); 3230 3231 setup_slab_debug(s, slab, start); 3232 3233 shuffle = shuffle_freelist(s, slab); 3234 3235 if (!shuffle) { 3236 start = fixup_red_left(s, start); 3237 start = setup_object(s, start); 3238 slab->freelist = start; 3239 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { 3240 next = p + s->size; 3241 next = setup_object(s, next); 3242 set_freepointer(s, p, next); 3243 p = next; 3244 } 3245 set_freepointer(s, p, NULL); 3246 } 3247 3248 return slab; 3249 } 3250 3251 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) 3252 { 3253 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 3254 flags = kmalloc_fix_flags(flags); 3255 3256 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 3257 3258 return allocate_slab(s, 3259 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 3260 } 3261 3262 static void __free_slab(struct kmem_cache *s, struct slab *slab) 3263 { 3264 struct folio *folio = slab_folio(slab); 3265 int order = folio_order(folio); 3266 int pages = 1 << order; 3267 3268 __slab_clear_pfmemalloc(slab); 3269 folio->mapping = NULL; 3270 __folio_clear_slab(folio); 3271 mm_account_reclaimed_pages(pages); 3272 unaccount_slab(slab, order, s); 3273 free_frozen_pages(&folio->page, order); 3274 } 3275 3276 static void rcu_free_slab(struct rcu_head *h) 3277 { 3278 struct slab *slab = container_of(h, struct slab, rcu_head); 3279 3280 __free_slab(slab->slab_cache, slab); 3281 } 3282 3283 static void free_slab(struct kmem_cache *s, struct slab *slab) 3284 { 3285 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 3286 void *p; 3287 3288 slab_pad_check(s, slab); 3289 for_each_object(p, s, slab_address(slab), slab->objects) 3290 check_object(s, slab, p, SLUB_RED_INACTIVE); 3291 } 3292 3293 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) 3294 call_rcu(&slab->rcu_head, rcu_free_slab); 3295 else 3296 __free_slab(s, slab); 3297 } 3298 3299 static void discard_slab(struct kmem_cache *s, struct slab *slab) 3300 { 3301 dec_slabs_node(s, slab_nid(slab), slab->objects); 3302 free_slab(s, slab); 3303 } 3304 3305 static inline bool slab_test_node_partial(const struct slab *slab) 3306 { 3307 return test_bit(SL_partial, &slab->flags.f); 3308 } 3309 3310 static inline void slab_set_node_partial(struct slab *slab) 3311 { 3312 set_bit(SL_partial, &slab->flags.f); 3313 } 3314 3315 static inline void slab_clear_node_partial(struct slab *slab) 3316 { 3317 clear_bit(SL_partial, &slab->flags.f); 3318 } 3319 3320 /* 3321 * Management of partially allocated slabs. 3322 */ 3323 static inline void 3324 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 3325 { 3326 n->nr_partial++; 3327 if (tail == DEACTIVATE_TO_TAIL) 3328 list_add_tail(&slab->slab_list, &n->partial); 3329 else 3330 list_add(&slab->slab_list, &n->partial); 3331 slab_set_node_partial(slab); 3332 } 3333 3334 static inline void add_partial(struct kmem_cache_node *n, 3335 struct slab *slab, int tail) 3336 { 3337 lockdep_assert_held(&n->list_lock); 3338 __add_partial(n, slab, tail); 3339 } 3340 3341 static inline void remove_partial(struct kmem_cache_node *n, 3342 struct slab *slab) 3343 { 3344 lockdep_assert_held(&n->list_lock); 3345 list_del(&slab->slab_list); 3346 slab_clear_node_partial(slab); 3347 n->nr_partial--; 3348 } 3349 3350 /* 3351 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a 3352 * slab from the n->partial list. Remove only a single object from the slab, do 3353 * the alloc_debug_processing() checks and leave the slab on the list, or move 3354 * it to full list if it was the last free object. 3355 */ 3356 static void *alloc_single_from_partial(struct kmem_cache *s, 3357 struct kmem_cache_node *n, struct slab *slab, int orig_size) 3358 { 3359 void *object; 3360 3361 lockdep_assert_held(&n->list_lock); 3362 3363 #ifdef CONFIG_SLUB_DEBUG 3364 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3365 if (!validate_slab_ptr(slab)) { 3366 slab_err(s, slab, "Not a valid slab page"); 3367 return NULL; 3368 } 3369 } 3370 #endif 3371 3372 object = slab->freelist; 3373 slab->freelist = get_freepointer(s, object); 3374 slab->inuse++; 3375 3376 if (!alloc_debug_processing(s, slab, object, orig_size)) { 3377 remove_partial(n, slab); 3378 return NULL; 3379 } 3380 3381 if (slab->inuse == slab->objects) { 3382 remove_partial(n, slab); 3383 add_full(s, n, slab); 3384 } 3385 3386 return object; 3387 } 3388 3389 static void defer_deactivate_slab(struct slab *slab, void *flush_freelist); 3390 3391 /* 3392 * Called only for kmem_cache_debug() caches to allocate from a freshly 3393 * allocated slab. Allocate a single object instead of whole freelist 3394 * and put the slab to the partial (or full) list. 3395 */ 3396 static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab, 3397 int orig_size, gfp_t gfpflags) 3398 { 3399 bool allow_spin = gfpflags_allow_spinning(gfpflags); 3400 int nid = slab_nid(slab); 3401 struct kmem_cache_node *n = get_node(s, nid); 3402 unsigned long flags; 3403 void *object; 3404 3405 if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) { 3406 /* Unlucky, discard newly allocated slab */ 3407 slab->frozen = 1; 3408 defer_deactivate_slab(slab, NULL); 3409 return NULL; 3410 } 3411 3412 object = slab->freelist; 3413 slab->freelist = get_freepointer(s, object); 3414 slab->inuse = 1; 3415 3416 if (!alloc_debug_processing(s, slab, object, orig_size)) { 3417 /* 3418 * It's not really expected that this would fail on a 3419 * freshly allocated slab, but a concurrent memory 3420 * corruption in theory could cause that. 3421 * Leak memory of allocated slab. 3422 */ 3423 if (!allow_spin) 3424 spin_unlock_irqrestore(&n->list_lock, flags); 3425 return NULL; 3426 } 3427 3428 if (allow_spin) 3429 spin_lock_irqsave(&n->list_lock, flags); 3430 3431 if (slab->inuse == slab->objects) 3432 add_full(s, n, slab); 3433 else 3434 add_partial(n, slab, DEACTIVATE_TO_HEAD); 3435 3436 inc_slabs_node(s, nid, slab->objects); 3437 spin_unlock_irqrestore(&n->list_lock, flags); 3438 3439 return object; 3440 } 3441 3442 #ifdef CONFIG_SLUB_CPU_PARTIAL 3443 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); 3444 #else 3445 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, 3446 int drain) { } 3447 #endif 3448 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); 3449 3450 /* 3451 * Try to allocate a partial slab from a specific node. 3452 */ 3453 static struct slab *get_partial_node(struct kmem_cache *s, 3454 struct kmem_cache_node *n, 3455 struct partial_context *pc) 3456 { 3457 struct slab *slab, *slab2, *partial = NULL; 3458 unsigned long flags; 3459 unsigned int partial_slabs = 0; 3460 3461 /* 3462 * Racy check. If we mistakenly see no partial slabs then we 3463 * just allocate an empty slab. If we mistakenly try to get a 3464 * partial slab and there is none available then get_partial() 3465 * will return NULL. 3466 */ 3467 if (!n || !n->nr_partial) 3468 return NULL; 3469 3470 if (gfpflags_allow_spinning(pc->flags)) 3471 spin_lock_irqsave(&n->list_lock, flags); 3472 else if (!spin_trylock_irqsave(&n->list_lock, flags)) 3473 return NULL; 3474 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { 3475 if (!pfmemalloc_match(slab, pc->flags)) 3476 continue; 3477 3478 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 3479 void *object = alloc_single_from_partial(s, n, slab, 3480 pc->orig_size); 3481 if (object) { 3482 partial = slab; 3483 pc->object = object; 3484 break; 3485 } 3486 continue; 3487 } 3488 3489 remove_partial(n, slab); 3490 3491 if (!partial) { 3492 partial = slab; 3493 stat(s, ALLOC_FROM_PARTIAL); 3494 3495 if ((slub_get_cpu_partial(s) == 0)) { 3496 break; 3497 } 3498 } else { 3499 put_cpu_partial(s, slab, 0); 3500 stat(s, CPU_PARTIAL_NODE); 3501 3502 if (++partial_slabs > slub_get_cpu_partial(s) / 2) { 3503 break; 3504 } 3505 } 3506 } 3507 spin_unlock_irqrestore(&n->list_lock, flags); 3508 return partial; 3509 } 3510 3511 /* 3512 * Get a slab from somewhere. Search in increasing NUMA distances. 3513 */ 3514 static struct slab *get_any_partial(struct kmem_cache *s, 3515 struct partial_context *pc) 3516 { 3517 #ifdef CONFIG_NUMA 3518 struct zonelist *zonelist; 3519 struct zoneref *z; 3520 struct zone *zone; 3521 enum zone_type highest_zoneidx = gfp_zone(pc->flags); 3522 struct slab *slab; 3523 unsigned int cpuset_mems_cookie; 3524 3525 /* 3526 * The defrag ratio allows a configuration of the tradeoffs between 3527 * inter node defragmentation and node local allocations. A lower 3528 * defrag_ratio increases the tendency to do local allocations 3529 * instead of attempting to obtain partial slabs from other nodes. 3530 * 3531 * If the defrag_ratio is set to 0 then kmalloc() always 3532 * returns node local objects. If the ratio is higher then kmalloc() 3533 * may return off node objects because partial slabs are obtained 3534 * from other nodes and filled up. 3535 * 3536 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 3537 * (which makes defrag_ratio = 1000) then every (well almost) 3538 * allocation will first attempt to defrag slab caches on other nodes. 3539 * This means scanning over all nodes to look for partial slabs which 3540 * may be expensive if we do it every time we are trying to find a slab 3541 * with available objects. 3542 */ 3543 if (!s->remote_node_defrag_ratio || 3544 get_cycles() % 1024 > s->remote_node_defrag_ratio) 3545 return NULL; 3546 3547 do { 3548 cpuset_mems_cookie = read_mems_allowed_begin(); 3549 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); 3550 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 3551 struct kmem_cache_node *n; 3552 3553 n = get_node(s, zone_to_nid(zone)); 3554 3555 if (n && cpuset_zone_allowed(zone, pc->flags) && 3556 n->nr_partial > s->min_partial) { 3557 slab = get_partial_node(s, n, pc); 3558 if (slab) { 3559 /* 3560 * Don't check read_mems_allowed_retry() 3561 * here - if mems_allowed was updated in 3562 * parallel, that was a harmless race 3563 * between allocation and the cpuset 3564 * update 3565 */ 3566 return slab; 3567 } 3568 } 3569 } 3570 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 3571 #endif /* CONFIG_NUMA */ 3572 return NULL; 3573 } 3574 3575 /* 3576 * Get a partial slab, lock it and return it. 3577 */ 3578 static struct slab *get_partial(struct kmem_cache *s, int node, 3579 struct partial_context *pc) 3580 { 3581 struct slab *slab; 3582 int searchnode = node; 3583 3584 if (node == NUMA_NO_NODE) 3585 searchnode = numa_mem_id(); 3586 3587 slab = get_partial_node(s, get_node(s, searchnode), pc); 3588 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE))) 3589 return slab; 3590 3591 return get_any_partial(s, pc); 3592 } 3593 3594 #ifndef CONFIG_SLUB_TINY 3595 3596 #ifdef CONFIG_PREEMPTION 3597 /* 3598 * Calculate the next globally unique transaction for disambiguation 3599 * during cmpxchg. The transactions start with the cpu number and are then 3600 * incremented by CONFIG_NR_CPUS. 3601 */ 3602 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 3603 #else 3604 /* 3605 * No preemption supported therefore also no need to check for 3606 * different cpus. 3607 */ 3608 #define TID_STEP 1 3609 #endif /* CONFIG_PREEMPTION */ 3610 3611 static inline unsigned long next_tid(unsigned long tid) 3612 { 3613 return tid + TID_STEP; 3614 } 3615 3616 #ifdef SLUB_DEBUG_CMPXCHG 3617 static inline unsigned int tid_to_cpu(unsigned long tid) 3618 { 3619 return tid % TID_STEP; 3620 } 3621 3622 static inline unsigned long tid_to_event(unsigned long tid) 3623 { 3624 return tid / TID_STEP; 3625 } 3626 #endif 3627 3628 static inline unsigned int init_tid(int cpu) 3629 { 3630 return cpu; 3631 } 3632 3633 static inline void note_cmpxchg_failure(const char *n, 3634 const struct kmem_cache *s, unsigned long tid) 3635 { 3636 #ifdef SLUB_DEBUG_CMPXCHG 3637 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 3638 3639 pr_info("%s %s: cmpxchg redo ", n, s->name); 3640 3641 if (IS_ENABLED(CONFIG_PREEMPTION) && 3642 tid_to_cpu(tid) != tid_to_cpu(actual_tid)) { 3643 pr_warn("due to cpu change %d -> %d\n", 3644 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 3645 } else if (tid_to_event(tid) != tid_to_event(actual_tid)) { 3646 pr_warn("due to cpu running other code. Event %ld->%ld\n", 3647 tid_to_event(tid), tid_to_event(actual_tid)); 3648 } else { 3649 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 3650 actual_tid, tid, next_tid(tid)); 3651 } 3652 #endif 3653 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 3654 } 3655 3656 static void init_kmem_cache_cpus(struct kmem_cache *s) 3657 { 3658 #ifdef CONFIG_PREEMPT_RT 3659 /* 3660 * Register lockdep key for non-boot kmem caches to avoid 3661 * WARN_ON_ONCE(static_obj(key))) in lockdep_register_key() 3662 */ 3663 bool finegrain_lockdep = !init_section_contains(s, 1); 3664 #else 3665 /* 3666 * Don't bother with different lockdep classes for each 3667 * kmem_cache, since we only use local_trylock_irqsave(). 3668 */ 3669 bool finegrain_lockdep = false; 3670 #endif 3671 int cpu; 3672 struct kmem_cache_cpu *c; 3673 3674 if (finegrain_lockdep) 3675 lockdep_register_key(&s->lock_key); 3676 for_each_possible_cpu(cpu) { 3677 c = per_cpu_ptr(s->cpu_slab, cpu); 3678 local_trylock_init(&c->lock); 3679 if (finegrain_lockdep) 3680 lockdep_set_class(&c->lock, &s->lock_key); 3681 c->tid = init_tid(cpu); 3682 } 3683 } 3684 3685 /* 3686 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, 3687 * unfreezes the slabs and puts it on the proper list. 3688 * Assumes the slab has been already safely taken away from kmem_cache_cpu 3689 * by the caller. 3690 */ 3691 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, 3692 void *freelist) 3693 { 3694 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 3695 int free_delta = 0; 3696 void *nextfree, *freelist_iter, *freelist_tail; 3697 int tail = DEACTIVATE_TO_HEAD; 3698 unsigned long flags = 0; 3699 struct slab new; 3700 struct slab old; 3701 3702 if (READ_ONCE(slab->freelist)) { 3703 stat(s, DEACTIVATE_REMOTE_FREES); 3704 tail = DEACTIVATE_TO_TAIL; 3705 } 3706 3707 /* 3708 * Stage one: Count the objects on cpu's freelist as free_delta and 3709 * remember the last object in freelist_tail for later splicing. 3710 */ 3711 freelist_tail = NULL; 3712 freelist_iter = freelist; 3713 while (freelist_iter) { 3714 nextfree = get_freepointer(s, freelist_iter); 3715 3716 /* 3717 * If 'nextfree' is invalid, it is possible that the object at 3718 * 'freelist_iter' is already corrupted. So isolate all objects 3719 * starting at 'freelist_iter' by skipping them. 3720 */ 3721 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) 3722 break; 3723 3724 freelist_tail = freelist_iter; 3725 free_delta++; 3726 3727 freelist_iter = nextfree; 3728 } 3729 3730 /* 3731 * Stage two: Unfreeze the slab while splicing the per-cpu 3732 * freelist to the head of slab's freelist. 3733 */ 3734 do { 3735 old.freelist = READ_ONCE(slab->freelist); 3736 old.counters = READ_ONCE(slab->counters); 3737 VM_BUG_ON(!old.frozen); 3738 3739 /* Determine target state of the slab */ 3740 new.counters = old.counters; 3741 new.frozen = 0; 3742 if (freelist_tail) { 3743 new.inuse -= free_delta; 3744 set_freepointer(s, freelist_tail, old.freelist); 3745 new.freelist = freelist; 3746 } else { 3747 new.freelist = old.freelist; 3748 } 3749 } while (!slab_update_freelist(s, slab, 3750 old.freelist, old.counters, 3751 new.freelist, new.counters, 3752 "unfreezing slab")); 3753 3754 /* 3755 * Stage three: Manipulate the slab list based on the updated state. 3756 */ 3757 if (!new.inuse && n->nr_partial >= s->min_partial) { 3758 stat(s, DEACTIVATE_EMPTY); 3759 discard_slab(s, slab); 3760 stat(s, FREE_SLAB); 3761 } else if (new.freelist) { 3762 spin_lock_irqsave(&n->list_lock, flags); 3763 add_partial(n, slab, tail); 3764 spin_unlock_irqrestore(&n->list_lock, flags); 3765 stat(s, tail); 3766 } else { 3767 stat(s, DEACTIVATE_FULL); 3768 } 3769 } 3770 3771 /* 3772 * ___slab_alloc()'s caller is supposed to check if kmem_cache::kmem_cache_cpu::lock 3773 * can be acquired without a deadlock before invoking the function. 3774 * 3775 * Without LOCKDEP we trust the code to be correct. kmalloc_nolock() is 3776 * using local_lock_is_locked() properly before calling local_lock_cpu_slab(), 3777 * and kmalloc() is not used in an unsupported context. 3778 * 3779 * With LOCKDEP, on PREEMPT_RT lockdep does its checking in local_lock_irqsave(). 3780 * On !PREEMPT_RT we use trylock to avoid false positives in NMI, but 3781 * lockdep_assert() will catch a bug in case: 3782 * #1 3783 * kmalloc() -> ___slab_alloc() -> irqsave -> NMI -> bpf -> kmalloc_nolock() 3784 * or 3785 * #2 3786 * kmalloc() -> ___slab_alloc() -> irqsave -> tracepoint/kprobe -> bpf -> kmalloc_nolock() 3787 * 3788 * On PREEMPT_RT an invocation is not possible from IRQ-off or preempt 3789 * disabled context. The lock will always be acquired and if needed it 3790 * block and sleep until the lock is available. 3791 * #1 is possible in !PREEMPT_RT only. 3792 * #2 is possible in both with a twist that irqsave is replaced with rt_spinlock: 3793 * kmalloc() -> ___slab_alloc() -> rt_spin_lock(kmem_cache_A) -> 3794 * tracepoint/kprobe -> bpf -> kmalloc_nolock() -> rt_spin_lock(kmem_cache_B) 3795 * 3796 * local_lock_is_locked() prevents the case kmem_cache_A == kmem_cache_B 3797 */ 3798 #if defined(CONFIG_PREEMPT_RT) || !defined(CONFIG_LOCKDEP) 3799 #define local_lock_cpu_slab(s, flags) \ 3800 local_lock_irqsave(&(s)->cpu_slab->lock, flags) 3801 #else 3802 #define local_lock_cpu_slab(s, flags) \ 3803 do { \ 3804 bool __l = local_trylock_irqsave(&(s)->cpu_slab->lock, flags); \ 3805 lockdep_assert(__l); \ 3806 } while (0) 3807 #endif 3808 3809 #define local_unlock_cpu_slab(s, flags) \ 3810 local_unlock_irqrestore(&(s)->cpu_slab->lock, flags) 3811 3812 #ifdef CONFIG_SLUB_CPU_PARTIAL 3813 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab) 3814 { 3815 struct kmem_cache_node *n = NULL, *n2 = NULL; 3816 struct slab *slab, *slab_to_discard = NULL; 3817 unsigned long flags = 0; 3818 3819 while (partial_slab) { 3820 slab = partial_slab; 3821 partial_slab = slab->next; 3822 3823 n2 = get_node(s, slab_nid(slab)); 3824 if (n != n2) { 3825 if (n) 3826 spin_unlock_irqrestore(&n->list_lock, flags); 3827 3828 n = n2; 3829 spin_lock_irqsave(&n->list_lock, flags); 3830 } 3831 3832 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { 3833 slab->next = slab_to_discard; 3834 slab_to_discard = slab; 3835 } else { 3836 add_partial(n, slab, DEACTIVATE_TO_TAIL); 3837 stat(s, FREE_ADD_PARTIAL); 3838 } 3839 } 3840 3841 if (n) 3842 spin_unlock_irqrestore(&n->list_lock, flags); 3843 3844 while (slab_to_discard) { 3845 slab = slab_to_discard; 3846 slab_to_discard = slab_to_discard->next; 3847 3848 stat(s, DEACTIVATE_EMPTY); 3849 discard_slab(s, slab); 3850 stat(s, FREE_SLAB); 3851 } 3852 } 3853 3854 /* 3855 * Put all the cpu partial slabs to the node partial list. 3856 */ 3857 static void put_partials(struct kmem_cache *s) 3858 { 3859 struct slab *partial_slab; 3860 unsigned long flags; 3861 3862 local_lock_irqsave(&s->cpu_slab->lock, flags); 3863 partial_slab = this_cpu_read(s->cpu_slab->partial); 3864 this_cpu_write(s->cpu_slab->partial, NULL); 3865 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3866 3867 if (partial_slab) 3868 __put_partials(s, partial_slab); 3869 } 3870 3871 static void put_partials_cpu(struct kmem_cache *s, 3872 struct kmem_cache_cpu *c) 3873 { 3874 struct slab *partial_slab; 3875 3876 partial_slab = slub_percpu_partial(c); 3877 c->partial = NULL; 3878 3879 if (partial_slab) 3880 __put_partials(s, partial_slab); 3881 } 3882 3883 /* 3884 * Put a slab into a partial slab slot if available. 3885 * 3886 * If we did not find a slot then simply move all the partials to the 3887 * per node partial list. 3888 */ 3889 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) 3890 { 3891 struct slab *oldslab; 3892 struct slab *slab_to_put = NULL; 3893 unsigned long flags; 3894 int slabs = 0; 3895 3896 local_lock_cpu_slab(s, flags); 3897 3898 oldslab = this_cpu_read(s->cpu_slab->partial); 3899 3900 if (oldslab) { 3901 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { 3902 /* 3903 * Partial array is full. Move the existing set to the 3904 * per node partial list. Postpone the actual unfreezing 3905 * outside of the critical section. 3906 */ 3907 slab_to_put = oldslab; 3908 oldslab = NULL; 3909 } else { 3910 slabs = oldslab->slabs; 3911 } 3912 } 3913 3914 slabs++; 3915 3916 slab->slabs = slabs; 3917 slab->next = oldslab; 3918 3919 this_cpu_write(s->cpu_slab->partial, slab); 3920 3921 local_unlock_cpu_slab(s, flags); 3922 3923 if (slab_to_put) { 3924 __put_partials(s, slab_to_put); 3925 stat(s, CPU_PARTIAL_DRAIN); 3926 } 3927 } 3928 3929 #else /* CONFIG_SLUB_CPU_PARTIAL */ 3930 3931 static inline void put_partials(struct kmem_cache *s) { } 3932 static inline void put_partials_cpu(struct kmem_cache *s, 3933 struct kmem_cache_cpu *c) { } 3934 3935 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 3936 3937 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 3938 { 3939 unsigned long flags; 3940 struct slab *slab; 3941 void *freelist; 3942 3943 local_lock_irqsave(&s->cpu_slab->lock, flags); 3944 3945 slab = c->slab; 3946 freelist = c->freelist; 3947 3948 c->slab = NULL; 3949 c->freelist = NULL; 3950 c->tid = next_tid(c->tid); 3951 3952 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3953 3954 if (slab) { 3955 deactivate_slab(s, slab, freelist); 3956 stat(s, CPUSLAB_FLUSH); 3957 } 3958 } 3959 3960 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 3961 { 3962 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3963 void *freelist = c->freelist; 3964 struct slab *slab = c->slab; 3965 3966 c->slab = NULL; 3967 c->freelist = NULL; 3968 c->tid = next_tid(c->tid); 3969 3970 if (slab) { 3971 deactivate_slab(s, slab, freelist); 3972 stat(s, CPUSLAB_FLUSH); 3973 } 3974 3975 put_partials_cpu(s, c); 3976 } 3977 3978 static inline void flush_this_cpu_slab(struct kmem_cache *s) 3979 { 3980 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); 3981 3982 if (c->slab) 3983 flush_slab(s, c); 3984 3985 put_partials(s); 3986 } 3987 3988 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 3989 { 3990 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3991 3992 return c->slab || slub_percpu_partial(c); 3993 } 3994 3995 #else /* CONFIG_SLUB_TINY */ 3996 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { } 3997 static inline bool has_cpu_slab(int cpu, struct kmem_cache *s) { return false; } 3998 static inline void flush_this_cpu_slab(struct kmem_cache *s) { } 3999 #endif /* CONFIG_SLUB_TINY */ 4000 4001 static bool has_pcs_used(int cpu, struct kmem_cache *s) 4002 { 4003 struct slub_percpu_sheaves *pcs; 4004 4005 if (!s->cpu_sheaves) 4006 return false; 4007 4008 pcs = per_cpu_ptr(s->cpu_sheaves, cpu); 4009 4010 return (pcs->spare || pcs->rcu_free || pcs->main->size); 4011 } 4012 4013 /* 4014 * Flush cpu slab. 4015 * 4016 * Called from CPU work handler with migration disabled. 4017 */ 4018 static void flush_cpu_slab(struct work_struct *w) 4019 { 4020 struct kmem_cache *s; 4021 struct slub_flush_work *sfw; 4022 4023 sfw = container_of(w, struct slub_flush_work, work); 4024 4025 s = sfw->s; 4026 4027 if (s->cpu_sheaves) 4028 pcs_flush_all(s); 4029 4030 flush_this_cpu_slab(s); 4031 } 4032 4033 static void flush_all_cpus_locked(struct kmem_cache *s) 4034 { 4035 struct slub_flush_work *sfw; 4036 unsigned int cpu; 4037 4038 lockdep_assert_cpus_held(); 4039 mutex_lock(&flush_lock); 4040 4041 for_each_online_cpu(cpu) { 4042 sfw = &per_cpu(slub_flush, cpu); 4043 if (!has_cpu_slab(cpu, s) && !has_pcs_used(cpu, s)) { 4044 sfw->skip = true; 4045 continue; 4046 } 4047 INIT_WORK(&sfw->work, flush_cpu_slab); 4048 sfw->skip = false; 4049 sfw->s = s; 4050 queue_work_on(cpu, flushwq, &sfw->work); 4051 } 4052 4053 for_each_online_cpu(cpu) { 4054 sfw = &per_cpu(slub_flush, cpu); 4055 if (sfw->skip) 4056 continue; 4057 flush_work(&sfw->work); 4058 } 4059 4060 mutex_unlock(&flush_lock); 4061 } 4062 4063 static void flush_all(struct kmem_cache *s) 4064 { 4065 cpus_read_lock(); 4066 flush_all_cpus_locked(s); 4067 cpus_read_unlock(); 4068 } 4069 4070 static void flush_rcu_sheaf(struct work_struct *w) 4071 { 4072 struct slub_percpu_sheaves *pcs; 4073 struct slab_sheaf *rcu_free; 4074 struct slub_flush_work *sfw; 4075 struct kmem_cache *s; 4076 4077 sfw = container_of(w, struct slub_flush_work, work); 4078 s = sfw->s; 4079 4080 local_lock(&s->cpu_sheaves->lock); 4081 pcs = this_cpu_ptr(s->cpu_sheaves); 4082 4083 rcu_free = pcs->rcu_free; 4084 pcs->rcu_free = NULL; 4085 4086 local_unlock(&s->cpu_sheaves->lock); 4087 4088 if (rcu_free) 4089 call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn); 4090 } 4091 4092 4093 /* needed for kvfree_rcu_barrier() */ 4094 void flush_all_rcu_sheaves(void) 4095 { 4096 struct slub_flush_work *sfw; 4097 struct kmem_cache *s; 4098 unsigned int cpu; 4099 4100 cpus_read_lock(); 4101 mutex_lock(&slab_mutex); 4102 4103 list_for_each_entry(s, &slab_caches, list) { 4104 if (!s->cpu_sheaves) 4105 continue; 4106 4107 mutex_lock(&flush_lock); 4108 4109 for_each_online_cpu(cpu) { 4110 sfw = &per_cpu(slub_flush, cpu); 4111 4112 /* 4113 * we don't check if rcu_free sheaf exists - racing 4114 * __kfree_rcu_sheaf() might have just removed it. 4115 * by executing flush_rcu_sheaf() on the cpu we make 4116 * sure the __kfree_rcu_sheaf() finished its call_rcu() 4117 */ 4118 4119 INIT_WORK(&sfw->work, flush_rcu_sheaf); 4120 sfw->s = s; 4121 queue_work_on(cpu, flushwq, &sfw->work); 4122 } 4123 4124 for_each_online_cpu(cpu) { 4125 sfw = &per_cpu(slub_flush, cpu); 4126 flush_work(&sfw->work); 4127 } 4128 4129 mutex_unlock(&flush_lock); 4130 } 4131 4132 mutex_unlock(&slab_mutex); 4133 cpus_read_unlock(); 4134 4135 rcu_barrier(); 4136 } 4137 4138 /* 4139 * Use the cpu notifier to insure that the cpu slabs are flushed when 4140 * necessary. 4141 */ 4142 static int slub_cpu_dead(unsigned int cpu) 4143 { 4144 struct kmem_cache *s; 4145 4146 mutex_lock(&slab_mutex); 4147 list_for_each_entry(s, &slab_caches, list) { 4148 __flush_cpu_slab(s, cpu); 4149 if (s->cpu_sheaves) 4150 __pcs_flush_all_cpu(s, cpu); 4151 } 4152 mutex_unlock(&slab_mutex); 4153 return 0; 4154 } 4155 4156 /* 4157 * Check if the objects in a per cpu structure fit numa 4158 * locality expectations. 4159 */ 4160 static inline int node_match(struct slab *slab, int node) 4161 { 4162 #ifdef CONFIG_NUMA 4163 if (node != NUMA_NO_NODE && slab_nid(slab) != node) 4164 return 0; 4165 #endif 4166 return 1; 4167 } 4168 4169 #ifdef CONFIG_SLUB_DEBUG 4170 static int count_free(struct slab *slab) 4171 { 4172 return slab->objects - slab->inuse; 4173 } 4174 4175 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 4176 { 4177 return atomic_long_read(&n->total_objects); 4178 } 4179 4180 /* Supports checking bulk free of a constructed freelist */ 4181 static inline bool free_debug_processing(struct kmem_cache *s, 4182 struct slab *slab, void *head, void *tail, int *bulk_cnt, 4183 unsigned long addr, depot_stack_handle_t handle) 4184 { 4185 bool checks_ok = false; 4186 void *object = head; 4187 int cnt = 0; 4188 4189 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 4190 if (!check_slab(s, slab)) 4191 goto out; 4192 } 4193 4194 if (slab->inuse < *bulk_cnt) { 4195 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", 4196 slab->inuse, *bulk_cnt); 4197 goto out; 4198 } 4199 4200 next_object: 4201 4202 if (++cnt > *bulk_cnt) 4203 goto out_cnt; 4204 4205 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 4206 if (!free_consistency_checks(s, slab, object, addr)) 4207 goto out; 4208 } 4209 4210 if (s->flags & SLAB_STORE_USER) 4211 set_track_update(s, object, TRACK_FREE, addr, handle); 4212 trace(s, slab, object, 0); 4213 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 4214 init_object(s, object, SLUB_RED_INACTIVE); 4215 4216 /* Reached end of constructed freelist yet? */ 4217 if (object != tail) { 4218 object = get_freepointer(s, object); 4219 goto next_object; 4220 } 4221 checks_ok = true; 4222 4223 out_cnt: 4224 if (cnt != *bulk_cnt) { 4225 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", 4226 *bulk_cnt, cnt); 4227 *bulk_cnt = cnt; 4228 } 4229 4230 out: 4231 4232 if (!checks_ok) 4233 slab_fix(s, "Object at 0x%p not freed", object); 4234 4235 return checks_ok; 4236 } 4237 #endif /* CONFIG_SLUB_DEBUG */ 4238 4239 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS) 4240 static unsigned long count_partial(struct kmem_cache_node *n, 4241 int (*get_count)(struct slab *)) 4242 { 4243 unsigned long flags; 4244 unsigned long x = 0; 4245 struct slab *slab; 4246 4247 spin_lock_irqsave(&n->list_lock, flags); 4248 list_for_each_entry(slab, &n->partial, slab_list) 4249 x += get_count(slab); 4250 spin_unlock_irqrestore(&n->list_lock, flags); 4251 return x; 4252 } 4253 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */ 4254 4255 #ifdef CONFIG_SLUB_DEBUG 4256 #define MAX_PARTIAL_TO_SCAN 10000 4257 4258 static unsigned long count_partial_free_approx(struct kmem_cache_node *n) 4259 { 4260 unsigned long flags; 4261 unsigned long x = 0; 4262 struct slab *slab; 4263 4264 spin_lock_irqsave(&n->list_lock, flags); 4265 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) { 4266 list_for_each_entry(slab, &n->partial, slab_list) 4267 x += slab->objects - slab->inuse; 4268 } else { 4269 /* 4270 * For a long list, approximate the total count of objects in 4271 * it to meet the limit on the number of slabs to scan. 4272 * Scan from both the list's head and tail for better accuracy. 4273 */ 4274 unsigned long scanned = 0; 4275 4276 list_for_each_entry(slab, &n->partial, slab_list) { 4277 x += slab->objects - slab->inuse; 4278 if (++scanned == MAX_PARTIAL_TO_SCAN / 2) 4279 break; 4280 } 4281 list_for_each_entry_reverse(slab, &n->partial, slab_list) { 4282 x += slab->objects - slab->inuse; 4283 if (++scanned == MAX_PARTIAL_TO_SCAN) 4284 break; 4285 } 4286 x = mult_frac(x, n->nr_partial, scanned); 4287 x = min(x, node_nr_objs(n)); 4288 } 4289 spin_unlock_irqrestore(&n->list_lock, flags); 4290 return x; 4291 } 4292 4293 static noinline void 4294 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 4295 { 4296 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 4297 DEFAULT_RATELIMIT_BURST); 4298 int cpu = raw_smp_processor_id(); 4299 int node; 4300 struct kmem_cache_node *n; 4301 4302 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 4303 return; 4304 4305 pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n", 4306 cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags); 4307 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 4308 s->name, s->object_size, s->size, oo_order(s->oo), 4309 oo_order(s->min)); 4310 4311 if (oo_order(s->min) > get_order(s->object_size)) 4312 pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n", 4313 s->name); 4314 4315 for_each_kmem_cache_node(s, node, n) { 4316 unsigned long nr_slabs; 4317 unsigned long nr_objs; 4318 unsigned long nr_free; 4319 4320 nr_free = count_partial_free_approx(n); 4321 nr_slabs = node_nr_slabs(n); 4322 nr_objs = node_nr_objs(n); 4323 4324 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 4325 node, nr_slabs, nr_objs, nr_free); 4326 } 4327 } 4328 #else /* CONFIG_SLUB_DEBUG */ 4329 static inline void 4330 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } 4331 #endif 4332 4333 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) 4334 { 4335 if (unlikely(slab_test_pfmemalloc(slab))) 4336 return gfp_pfmemalloc_allowed(gfpflags); 4337 4338 return true; 4339 } 4340 4341 #ifndef CONFIG_SLUB_TINY 4342 static inline bool 4343 __update_cpu_freelist_fast(struct kmem_cache *s, 4344 void *freelist_old, void *freelist_new, 4345 unsigned long tid) 4346 { 4347 freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; 4348 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; 4349 4350 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, 4351 &old.full, new.full); 4352 } 4353 4354 /* 4355 * Check the slab->freelist and either transfer the freelist to the 4356 * per cpu freelist or deactivate the slab. 4357 * 4358 * The slab is still frozen if the return value is not NULL. 4359 * 4360 * If this function returns NULL then the slab has been unfrozen. 4361 */ 4362 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) 4363 { 4364 struct slab new; 4365 unsigned long counters; 4366 void *freelist; 4367 4368 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 4369 4370 do { 4371 freelist = slab->freelist; 4372 counters = slab->counters; 4373 4374 new.counters = counters; 4375 4376 new.inuse = slab->objects; 4377 new.frozen = freelist != NULL; 4378 4379 } while (!__slab_update_freelist(s, slab, 4380 freelist, counters, 4381 NULL, new.counters, 4382 "get_freelist")); 4383 4384 return freelist; 4385 } 4386 4387 /* 4388 * Freeze the partial slab and return the pointer to the freelist. 4389 */ 4390 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab) 4391 { 4392 struct slab new; 4393 unsigned long counters; 4394 void *freelist; 4395 4396 do { 4397 freelist = slab->freelist; 4398 counters = slab->counters; 4399 4400 new.counters = counters; 4401 VM_BUG_ON(new.frozen); 4402 4403 new.inuse = slab->objects; 4404 new.frozen = 1; 4405 4406 } while (!slab_update_freelist(s, slab, 4407 freelist, counters, 4408 NULL, new.counters, 4409 "freeze_slab")); 4410 4411 return freelist; 4412 } 4413 4414 /* 4415 * Slow path. The lockless freelist is empty or we need to perform 4416 * debugging duties. 4417 * 4418 * Processing is still very fast if new objects have been freed to the 4419 * regular freelist. In that case we simply take over the regular freelist 4420 * as the lockless freelist and zap the regular freelist. 4421 * 4422 * If that is not working then we fall back to the partial lists. We take the 4423 * first element of the freelist as the object to allocate now and move the 4424 * rest of the freelist to the lockless freelist. 4425 * 4426 * And if we were unable to get a new slab from the partial slab lists then 4427 * we need to allocate a new slab. This is the slowest path since it involves 4428 * a call to the page allocator and the setup of a new slab. 4429 * 4430 * Version of __slab_alloc to use when we know that preemption is 4431 * already disabled (which is the case for bulk allocation). 4432 */ 4433 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 4434 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 4435 { 4436 bool allow_spin = gfpflags_allow_spinning(gfpflags); 4437 void *freelist; 4438 struct slab *slab; 4439 unsigned long flags; 4440 struct partial_context pc; 4441 bool try_thisnode = true; 4442 4443 stat(s, ALLOC_SLOWPATH); 4444 4445 reread_slab: 4446 4447 slab = READ_ONCE(c->slab); 4448 if (!slab) { 4449 /* 4450 * if the node is not online or has no normal memory, just 4451 * ignore the node constraint 4452 */ 4453 if (unlikely(node != NUMA_NO_NODE && 4454 !node_isset(node, slab_nodes))) 4455 node = NUMA_NO_NODE; 4456 goto new_slab; 4457 } 4458 4459 if (unlikely(!node_match(slab, node))) { 4460 /* 4461 * same as above but node_match() being false already 4462 * implies node != NUMA_NO_NODE. 4463 * 4464 * We don't strictly honor pfmemalloc and NUMA preferences 4465 * when !allow_spin because: 4466 * 4467 * 1. Most kmalloc() users allocate objects on the local node, 4468 * so kmalloc_nolock() tries not to interfere with them by 4469 * deactivating the cpu slab. 4470 * 4471 * 2. Deactivating due to NUMA or pfmemalloc mismatch may cause 4472 * unnecessary slab allocations even when n->partial list 4473 * is not empty. 4474 */ 4475 if (!node_isset(node, slab_nodes) || 4476 !allow_spin) { 4477 node = NUMA_NO_NODE; 4478 } else { 4479 stat(s, ALLOC_NODE_MISMATCH); 4480 goto deactivate_slab; 4481 } 4482 } 4483 4484 /* 4485 * By rights, we should be searching for a slab page that was 4486 * PFMEMALLOC but right now, we are losing the pfmemalloc 4487 * information when the page leaves the per-cpu allocator 4488 */ 4489 if (unlikely(!pfmemalloc_match(slab, gfpflags) && allow_spin)) 4490 goto deactivate_slab; 4491 4492 /* must check again c->slab in case we got preempted and it changed */ 4493 local_lock_cpu_slab(s, flags); 4494 4495 if (unlikely(slab != c->slab)) { 4496 local_unlock_cpu_slab(s, flags); 4497 goto reread_slab; 4498 } 4499 freelist = c->freelist; 4500 if (freelist) 4501 goto load_freelist; 4502 4503 freelist = get_freelist(s, slab); 4504 4505 if (!freelist) { 4506 c->slab = NULL; 4507 c->tid = next_tid(c->tid); 4508 local_unlock_cpu_slab(s, flags); 4509 stat(s, DEACTIVATE_BYPASS); 4510 goto new_slab; 4511 } 4512 4513 stat(s, ALLOC_REFILL); 4514 4515 load_freelist: 4516 4517 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 4518 4519 /* 4520 * freelist is pointing to the list of objects to be used. 4521 * slab is pointing to the slab from which the objects are obtained. 4522 * That slab must be frozen for per cpu allocations to work. 4523 */ 4524 VM_BUG_ON(!c->slab->frozen); 4525 c->freelist = get_freepointer(s, freelist); 4526 c->tid = next_tid(c->tid); 4527 local_unlock_cpu_slab(s, flags); 4528 return freelist; 4529 4530 deactivate_slab: 4531 4532 local_lock_cpu_slab(s, flags); 4533 if (slab != c->slab) { 4534 local_unlock_cpu_slab(s, flags); 4535 goto reread_slab; 4536 } 4537 freelist = c->freelist; 4538 c->slab = NULL; 4539 c->freelist = NULL; 4540 c->tid = next_tid(c->tid); 4541 local_unlock_cpu_slab(s, flags); 4542 deactivate_slab(s, slab, freelist); 4543 4544 new_slab: 4545 4546 #ifdef CONFIG_SLUB_CPU_PARTIAL 4547 while (slub_percpu_partial(c)) { 4548 local_lock_cpu_slab(s, flags); 4549 if (unlikely(c->slab)) { 4550 local_unlock_cpu_slab(s, flags); 4551 goto reread_slab; 4552 } 4553 if (unlikely(!slub_percpu_partial(c))) { 4554 local_unlock_cpu_slab(s, flags); 4555 /* we were preempted and partial list got empty */ 4556 goto new_objects; 4557 } 4558 4559 slab = slub_percpu_partial(c); 4560 slub_set_percpu_partial(c, slab); 4561 4562 if (likely(node_match(slab, node) && 4563 pfmemalloc_match(slab, gfpflags)) || 4564 !allow_spin) { 4565 c->slab = slab; 4566 freelist = get_freelist(s, slab); 4567 VM_BUG_ON(!freelist); 4568 stat(s, CPU_PARTIAL_ALLOC); 4569 goto load_freelist; 4570 } 4571 4572 local_unlock_cpu_slab(s, flags); 4573 4574 slab->next = NULL; 4575 __put_partials(s, slab); 4576 } 4577 #endif 4578 4579 new_objects: 4580 4581 pc.flags = gfpflags; 4582 /* 4583 * When a preferred node is indicated but no __GFP_THISNODE 4584 * 4585 * 1) try to get a partial slab from target node only by having 4586 * __GFP_THISNODE in pc.flags for get_partial() 4587 * 2) if 1) failed, try to allocate a new slab from target node with 4588 * GPF_NOWAIT | __GFP_THISNODE opportunistically 4589 * 3) if 2) failed, retry with original gfpflags which will allow 4590 * get_partial() try partial lists of other nodes before potentially 4591 * allocating new page from other nodes 4592 */ 4593 if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 4594 && try_thisnode)) { 4595 if (unlikely(!allow_spin)) 4596 /* Do not upgrade gfp to NOWAIT from more restrictive mode */ 4597 pc.flags = gfpflags | __GFP_THISNODE; 4598 else 4599 pc.flags = GFP_NOWAIT | __GFP_THISNODE; 4600 } 4601 4602 pc.orig_size = orig_size; 4603 slab = get_partial(s, node, &pc); 4604 if (slab) { 4605 if (kmem_cache_debug(s)) { 4606 freelist = pc.object; 4607 /* 4608 * For debug caches here we had to go through 4609 * alloc_single_from_partial() so just store the 4610 * tracking info and return the object. 4611 * 4612 * Due to disabled preemption we need to disallow 4613 * blocking. The flags are further adjusted by 4614 * gfp_nested_mask() in stack_depot itself. 4615 */ 4616 if (s->flags & SLAB_STORE_USER) 4617 set_track(s, freelist, TRACK_ALLOC, addr, 4618 gfpflags & ~(__GFP_DIRECT_RECLAIM)); 4619 4620 return freelist; 4621 } 4622 4623 freelist = freeze_slab(s, slab); 4624 goto retry_load_slab; 4625 } 4626 4627 slub_put_cpu_ptr(s->cpu_slab); 4628 slab = new_slab(s, pc.flags, node); 4629 c = slub_get_cpu_ptr(s->cpu_slab); 4630 4631 if (unlikely(!slab)) { 4632 if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 4633 && try_thisnode) { 4634 try_thisnode = false; 4635 goto new_objects; 4636 } 4637 slab_out_of_memory(s, gfpflags, node); 4638 return NULL; 4639 } 4640 4641 stat(s, ALLOC_SLAB); 4642 4643 if (kmem_cache_debug(s)) { 4644 freelist = alloc_single_from_new_slab(s, slab, orig_size, gfpflags); 4645 4646 if (unlikely(!freelist)) 4647 goto new_objects; 4648 4649 if (s->flags & SLAB_STORE_USER) 4650 set_track(s, freelist, TRACK_ALLOC, addr, 4651 gfpflags & ~(__GFP_DIRECT_RECLAIM)); 4652 4653 return freelist; 4654 } 4655 4656 /* 4657 * No other reference to the slab yet so we can 4658 * muck around with it freely without cmpxchg 4659 */ 4660 freelist = slab->freelist; 4661 slab->freelist = NULL; 4662 slab->inuse = slab->objects; 4663 slab->frozen = 1; 4664 4665 inc_slabs_node(s, slab_nid(slab), slab->objects); 4666 4667 if (unlikely(!pfmemalloc_match(slab, gfpflags) && allow_spin)) { 4668 /* 4669 * For !pfmemalloc_match() case we don't load freelist so that 4670 * we don't make further mismatched allocations easier. 4671 */ 4672 deactivate_slab(s, slab, get_freepointer(s, freelist)); 4673 return freelist; 4674 } 4675 4676 retry_load_slab: 4677 4678 local_lock_cpu_slab(s, flags); 4679 if (unlikely(c->slab)) { 4680 void *flush_freelist = c->freelist; 4681 struct slab *flush_slab = c->slab; 4682 4683 c->slab = NULL; 4684 c->freelist = NULL; 4685 c->tid = next_tid(c->tid); 4686 4687 local_unlock_cpu_slab(s, flags); 4688 4689 if (unlikely(!allow_spin)) { 4690 /* Reentrant slub cannot take locks, defer */ 4691 defer_deactivate_slab(flush_slab, flush_freelist); 4692 } else { 4693 deactivate_slab(s, flush_slab, flush_freelist); 4694 } 4695 4696 stat(s, CPUSLAB_FLUSH); 4697 4698 goto retry_load_slab; 4699 } 4700 c->slab = slab; 4701 4702 goto load_freelist; 4703 } 4704 /* 4705 * We disallow kprobes in ___slab_alloc() to prevent reentrance 4706 * 4707 * kmalloc() -> ___slab_alloc() -> local_lock_cpu_slab() protected part of 4708 * ___slab_alloc() manipulating c->freelist -> kprobe -> bpf -> 4709 * kmalloc_nolock() or kfree_nolock() -> __update_cpu_freelist_fast() 4710 * manipulating c->freelist without lock. 4711 * 4712 * This does not prevent kprobe in functions called from ___slab_alloc() such as 4713 * local_lock_irqsave() itself, and that is fine, we only need to protect the 4714 * c->freelist manipulation in ___slab_alloc() itself. 4715 */ 4716 NOKPROBE_SYMBOL(___slab_alloc); 4717 4718 /* 4719 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 4720 * disabled. Compensates for possible cpu changes by refetching the per cpu area 4721 * pointer. 4722 */ 4723 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 4724 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 4725 { 4726 void *p; 4727 4728 #ifdef CONFIG_PREEMPT_COUNT 4729 /* 4730 * We may have been preempted and rescheduled on a different 4731 * cpu before disabling preemption. Need to reload cpu area 4732 * pointer. 4733 */ 4734 c = slub_get_cpu_ptr(s->cpu_slab); 4735 #endif 4736 if (unlikely(!gfpflags_allow_spinning(gfpflags))) { 4737 if (local_lock_is_locked(&s->cpu_slab->lock)) { 4738 /* 4739 * EBUSY is an internal signal to kmalloc_nolock() to 4740 * retry a different bucket. It's not propagated 4741 * to the caller. 4742 */ 4743 p = ERR_PTR(-EBUSY); 4744 goto out; 4745 } 4746 } 4747 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); 4748 out: 4749 #ifdef CONFIG_PREEMPT_COUNT 4750 slub_put_cpu_ptr(s->cpu_slab); 4751 #endif 4752 return p; 4753 } 4754 4755 static __always_inline void *__slab_alloc_node(struct kmem_cache *s, 4756 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 4757 { 4758 struct kmem_cache_cpu *c; 4759 struct slab *slab; 4760 unsigned long tid; 4761 void *object; 4762 4763 redo: 4764 /* 4765 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 4766 * enabled. We may switch back and forth between cpus while 4767 * reading from one cpu area. That does not matter as long 4768 * as we end up on the original cpu again when doing the cmpxchg. 4769 * 4770 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 4771 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 4772 * the tid. If we are preempted and switched to another cpu between the 4773 * two reads, it's OK as the two are still associated with the same cpu 4774 * and cmpxchg later will validate the cpu. 4775 */ 4776 c = raw_cpu_ptr(s->cpu_slab); 4777 tid = READ_ONCE(c->tid); 4778 4779 /* 4780 * Irqless object alloc/free algorithm used here depends on sequence 4781 * of fetching cpu_slab's data. tid should be fetched before anything 4782 * on c to guarantee that object and slab associated with previous tid 4783 * won't be used with current tid. If we fetch tid first, object and 4784 * slab could be one associated with next tid and our alloc/free 4785 * request will be failed. In this case, we will retry. So, no problem. 4786 */ 4787 barrier(); 4788 4789 /* 4790 * The transaction ids are globally unique per cpu and per operation on 4791 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 4792 * occurs on the right processor and that there was no operation on the 4793 * linked list in between. 4794 */ 4795 4796 object = c->freelist; 4797 slab = c->slab; 4798 4799 #ifdef CONFIG_NUMA 4800 if (static_branch_unlikely(&strict_numa) && 4801 node == NUMA_NO_NODE) { 4802 4803 struct mempolicy *mpol = current->mempolicy; 4804 4805 if (mpol) { 4806 /* 4807 * Special BIND rule support. If existing slab 4808 * is in permitted set then do not redirect 4809 * to a particular node. 4810 * Otherwise we apply the memory policy to get 4811 * the node we need to allocate on. 4812 */ 4813 if (mpol->mode != MPOL_BIND || !slab || 4814 !node_isset(slab_nid(slab), mpol->nodes)) 4815 4816 node = mempolicy_slab_node(); 4817 } 4818 } 4819 #endif 4820 4821 if (!USE_LOCKLESS_FAST_PATH() || 4822 unlikely(!object || !slab || !node_match(slab, node))) { 4823 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); 4824 } else { 4825 void *next_object = get_freepointer_safe(s, object); 4826 4827 /* 4828 * The cmpxchg will only match if there was no additional 4829 * operation and if we are on the right processor. 4830 * 4831 * The cmpxchg does the following atomically (without lock 4832 * semantics!) 4833 * 1. Relocate first pointer to the current per cpu area. 4834 * 2. Verify that tid and freelist have not been changed 4835 * 3. If they were not changed replace tid and freelist 4836 * 4837 * Since this is without lock semantics the protection is only 4838 * against code executing on this cpu *not* from access by 4839 * other cpus. 4840 */ 4841 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { 4842 note_cmpxchg_failure("slab_alloc", s, tid); 4843 goto redo; 4844 } 4845 prefetch_freepointer(s, next_object); 4846 stat(s, ALLOC_FASTPATH); 4847 } 4848 4849 return object; 4850 } 4851 #else /* CONFIG_SLUB_TINY */ 4852 static void *__slab_alloc_node(struct kmem_cache *s, 4853 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 4854 { 4855 struct partial_context pc; 4856 struct slab *slab; 4857 void *object; 4858 4859 pc.flags = gfpflags; 4860 pc.orig_size = orig_size; 4861 slab = get_partial(s, node, &pc); 4862 4863 if (slab) 4864 return pc.object; 4865 4866 slab = new_slab(s, gfpflags, node); 4867 if (unlikely(!slab)) { 4868 slab_out_of_memory(s, gfpflags, node); 4869 return NULL; 4870 } 4871 4872 object = alloc_single_from_new_slab(s, slab, orig_size, gfpflags); 4873 4874 return object; 4875 } 4876 #endif /* CONFIG_SLUB_TINY */ 4877 4878 /* 4879 * If the object has been wiped upon free, make sure it's fully initialized by 4880 * zeroing out freelist pointer. 4881 * 4882 * Note that we also wipe custom freelist pointers. 4883 */ 4884 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 4885 void *obj) 4886 { 4887 if (unlikely(slab_want_init_on_free(s)) && obj && 4888 !freeptr_outside_object(s)) 4889 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 4890 0, sizeof(void *)); 4891 } 4892 4893 static __fastpath_inline 4894 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 4895 { 4896 flags &= gfp_allowed_mask; 4897 4898 might_alloc(flags); 4899 4900 if (unlikely(should_failslab(s, flags))) 4901 return NULL; 4902 4903 return s; 4904 } 4905 4906 static __fastpath_inline 4907 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 4908 gfp_t flags, size_t size, void **p, bool init, 4909 unsigned int orig_size) 4910 { 4911 unsigned int zero_size = s->object_size; 4912 bool kasan_init = init; 4913 size_t i; 4914 gfp_t init_flags = flags & gfp_allowed_mask; 4915 4916 /* 4917 * For kmalloc object, the allocated memory size(object_size) is likely 4918 * larger than the requested size(orig_size). If redzone check is 4919 * enabled for the extra space, don't zero it, as it will be redzoned 4920 * soon. The redzone operation for this extra space could be seen as a 4921 * replacement of current poisoning under certain debug option, and 4922 * won't break other sanity checks. 4923 */ 4924 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 4925 (s->flags & SLAB_KMALLOC)) 4926 zero_size = orig_size; 4927 4928 /* 4929 * When slab_debug is enabled, avoid memory initialization integrated 4930 * into KASAN and instead zero out the memory via the memset below with 4931 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and 4932 * cause false-positive reports. This does not lead to a performance 4933 * penalty on production builds, as slab_debug is not intended to be 4934 * enabled there. 4935 */ 4936 if (__slub_debug_enabled()) 4937 kasan_init = false; 4938 4939 /* 4940 * As memory initialization might be integrated into KASAN, 4941 * kasan_slab_alloc and initialization memset must be 4942 * kept together to avoid discrepancies in behavior. 4943 * 4944 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 4945 */ 4946 for (i = 0; i < size; i++) { 4947 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init); 4948 if (p[i] && init && (!kasan_init || 4949 !kasan_has_integrated_init())) 4950 memset(p[i], 0, zero_size); 4951 if (gfpflags_allow_spinning(flags)) 4952 kmemleak_alloc_recursive(p[i], s->object_size, 1, 4953 s->flags, init_flags); 4954 kmsan_slab_alloc(s, p[i], init_flags); 4955 alloc_tagging_slab_alloc_hook(s, p[i], flags); 4956 } 4957 4958 return memcg_slab_post_alloc_hook(s, lru, flags, size, p); 4959 } 4960 4961 /* 4962 * Replace the empty main sheaf with a (at least partially) full sheaf. 4963 * 4964 * Must be called with the cpu_sheaves local lock locked. If successful, returns 4965 * the pcs pointer and the local lock locked (possibly on a different cpu than 4966 * initially called). If not successful, returns NULL and the local lock 4967 * unlocked. 4968 */ 4969 static struct slub_percpu_sheaves * 4970 __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, gfp_t gfp) 4971 { 4972 struct slab_sheaf *empty = NULL; 4973 struct slab_sheaf *full; 4974 struct node_barn *barn; 4975 bool can_alloc; 4976 4977 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); 4978 4979 if (pcs->spare && pcs->spare->size > 0) { 4980 swap(pcs->main, pcs->spare); 4981 return pcs; 4982 } 4983 4984 barn = get_barn(s); 4985 4986 full = barn_replace_empty_sheaf(barn, pcs->main); 4987 4988 if (full) { 4989 stat(s, BARN_GET); 4990 pcs->main = full; 4991 return pcs; 4992 } 4993 4994 stat(s, BARN_GET_FAIL); 4995 4996 can_alloc = gfpflags_allow_blocking(gfp); 4997 4998 if (can_alloc) { 4999 if (pcs->spare) { 5000 empty = pcs->spare; 5001 pcs->spare = NULL; 5002 } else { 5003 empty = barn_get_empty_sheaf(barn); 5004 } 5005 } 5006 5007 local_unlock(&s->cpu_sheaves->lock); 5008 5009 if (!can_alloc) 5010 return NULL; 5011 5012 if (empty) { 5013 if (!refill_sheaf(s, empty, gfp)) { 5014 full = empty; 5015 } else { 5016 /* 5017 * we must be very low on memory so don't bother 5018 * with the barn 5019 */ 5020 free_empty_sheaf(s, empty); 5021 } 5022 } else { 5023 full = alloc_full_sheaf(s, gfp); 5024 } 5025 5026 if (!full) 5027 return NULL; 5028 5029 /* 5030 * we can reach here only when gfpflags_allow_blocking 5031 * so this must not be an irq 5032 */ 5033 local_lock(&s->cpu_sheaves->lock); 5034 pcs = this_cpu_ptr(s->cpu_sheaves); 5035 5036 /* 5037 * If we are returning empty sheaf, we either got it from the 5038 * barn or had to allocate one. If we are returning a full 5039 * sheaf, it's due to racing or being migrated to a different 5040 * cpu. Breaching the barn's sheaf limits should be thus rare 5041 * enough so just ignore them to simplify the recovery. 5042 */ 5043 5044 if (pcs->main->size == 0) { 5045 barn_put_empty_sheaf(barn, pcs->main); 5046 pcs->main = full; 5047 return pcs; 5048 } 5049 5050 if (!pcs->spare) { 5051 pcs->spare = full; 5052 return pcs; 5053 } 5054 5055 if (pcs->spare->size == 0) { 5056 barn_put_empty_sheaf(barn, pcs->spare); 5057 pcs->spare = full; 5058 return pcs; 5059 } 5060 5061 barn_put_full_sheaf(barn, full); 5062 stat(s, BARN_PUT); 5063 5064 return pcs; 5065 } 5066 5067 static __fastpath_inline 5068 void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node) 5069 { 5070 struct slub_percpu_sheaves *pcs; 5071 bool node_requested; 5072 void *object; 5073 5074 #ifdef CONFIG_NUMA 5075 if (static_branch_unlikely(&strict_numa) && 5076 node == NUMA_NO_NODE) { 5077 5078 struct mempolicy *mpol = current->mempolicy; 5079 5080 if (mpol) { 5081 /* 5082 * Special BIND rule support. If the local node 5083 * is in permitted set then do not redirect 5084 * to a particular node. 5085 * Otherwise we apply the memory policy to get 5086 * the node we need to allocate on. 5087 */ 5088 if (mpol->mode != MPOL_BIND || 5089 !node_isset(numa_mem_id(), mpol->nodes)) 5090 5091 node = mempolicy_slab_node(); 5092 } 5093 } 5094 #endif 5095 5096 node_requested = IS_ENABLED(CONFIG_NUMA) && node != NUMA_NO_NODE; 5097 5098 /* 5099 * We assume the percpu sheaves contain only local objects although it's 5100 * not completely guaranteed, so we verify later. 5101 */ 5102 if (unlikely(node_requested && node != numa_mem_id())) 5103 return NULL; 5104 5105 if (!local_trylock(&s->cpu_sheaves->lock)) 5106 return NULL; 5107 5108 pcs = this_cpu_ptr(s->cpu_sheaves); 5109 5110 if (unlikely(pcs->main->size == 0)) { 5111 pcs = __pcs_replace_empty_main(s, pcs, gfp); 5112 if (unlikely(!pcs)) 5113 return NULL; 5114 } 5115 5116 object = pcs->main->objects[pcs->main->size - 1]; 5117 5118 if (unlikely(node_requested)) { 5119 /* 5120 * Verify that the object was from the node we want. This could 5121 * be false because of cpu migration during an unlocked part of 5122 * the current allocation or previous freeing process. 5123 */ 5124 if (folio_nid(virt_to_folio(object)) != node) { 5125 local_unlock(&s->cpu_sheaves->lock); 5126 return NULL; 5127 } 5128 } 5129 5130 pcs->main->size--; 5131 5132 local_unlock(&s->cpu_sheaves->lock); 5133 5134 stat(s, ALLOC_PCS); 5135 5136 return object; 5137 } 5138 5139 static __fastpath_inline 5140 unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, size_t size, void **p) 5141 { 5142 struct slub_percpu_sheaves *pcs; 5143 struct slab_sheaf *main; 5144 unsigned int allocated = 0; 5145 unsigned int batch; 5146 5147 next_batch: 5148 if (!local_trylock(&s->cpu_sheaves->lock)) 5149 return allocated; 5150 5151 pcs = this_cpu_ptr(s->cpu_sheaves); 5152 5153 if (unlikely(pcs->main->size == 0)) { 5154 5155 struct slab_sheaf *full; 5156 5157 if (pcs->spare && pcs->spare->size > 0) { 5158 swap(pcs->main, pcs->spare); 5159 goto do_alloc; 5160 } 5161 5162 full = barn_replace_empty_sheaf(get_barn(s), pcs->main); 5163 5164 if (full) { 5165 stat(s, BARN_GET); 5166 pcs->main = full; 5167 goto do_alloc; 5168 } 5169 5170 stat(s, BARN_GET_FAIL); 5171 5172 local_unlock(&s->cpu_sheaves->lock); 5173 5174 /* 5175 * Once full sheaves in barn are depleted, let the bulk 5176 * allocation continue from slab pages, otherwise we would just 5177 * be copying arrays of pointers twice. 5178 */ 5179 return allocated; 5180 } 5181 5182 do_alloc: 5183 5184 main = pcs->main; 5185 batch = min(size, main->size); 5186 5187 main->size -= batch; 5188 memcpy(p, main->objects + main->size, batch * sizeof(void *)); 5189 5190 local_unlock(&s->cpu_sheaves->lock); 5191 5192 stat_add(s, ALLOC_PCS, batch); 5193 5194 allocated += batch; 5195 5196 if (batch < size) { 5197 p += batch; 5198 size -= batch; 5199 goto next_batch; 5200 } 5201 5202 return allocated; 5203 } 5204 5205 5206 /* 5207 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 5208 * have the fastpath folded into their functions. So no function call 5209 * overhead for requests that can be satisfied on the fastpath. 5210 * 5211 * The fastpath works by first checking if the lockless freelist can be used. 5212 * If not then __slab_alloc is called for slow processing. 5213 * 5214 * Otherwise we can simply pick the next object from the lockless free list. 5215 */ 5216 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, 5217 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 5218 { 5219 void *object; 5220 bool init = false; 5221 5222 s = slab_pre_alloc_hook(s, gfpflags); 5223 if (unlikely(!s)) 5224 return NULL; 5225 5226 object = kfence_alloc(s, orig_size, gfpflags); 5227 if (unlikely(object)) 5228 goto out; 5229 5230 if (s->cpu_sheaves) 5231 object = alloc_from_pcs(s, gfpflags, node); 5232 5233 if (!object) 5234 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); 5235 5236 maybe_wipe_obj_freeptr(s, object); 5237 init = slab_want_init_on_alloc(gfpflags, s); 5238 5239 out: 5240 /* 5241 * When init equals 'true', like for kzalloc() family, only 5242 * @orig_size bytes might be zeroed instead of s->object_size 5243 * In case this fails due to memcg_slab_post_alloc_hook(), 5244 * object is set to NULL 5245 */ 5246 slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size); 5247 5248 return object; 5249 } 5250 5251 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags) 5252 { 5253 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, 5254 s->object_size); 5255 5256 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 5257 5258 return ret; 5259 } 5260 EXPORT_SYMBOL(kmem_cache_alloc_noprof); 5261 5262 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, 5263 gfp_t gfpflags) 5264 { 5265 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, 5266 s->object_size); 5267 5268 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 5269 5270 return ret; 5271 } 5272 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof); 5273 5274 bool kmem_cache_charge(void *objp, gfp_t gfpflags) 5275 { 5276 if (!memcg_kmem_online()) 5277 return true; 5278 5279 return memcg_slab_post_charge(objp, gfpflags); 5280 } 5281 EXPORT_SYMBOL(kmem_cache_charge); 5282 5283 /** 5284 * kmem_cache_alloc_node - Allocate an object on the specified node 5285 * @s: The cache to allocate from. 5286 * @gfpflags: See kmalloc(). 5287 * @node: node number of the target node. 5288 * 5289 * Identical to kmem_cache_alloc but it will allocate memory on the given 5290 * node, which can improve the performance for cpu bound structures. 5291 * 5292 * Fallback to other node is possible if __GFP_THISNODE is not set. 5293 * 5294 * Return: pointer to the new object or %NULL in case of error 5295 */ 5296 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node) 5297 { 5298 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); 5299 5300 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); 5301 5302 return ret; 5303 } 5304 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof); 5305 5306 /* 5307 * returns a sheaf that has at least the requested size 5308 * when prefilling is needed, do so with given gfp flags 5309 * 5310 * return NULL if sheaf allocation or prefilling failed 5311 */ 5312 struct slab_sheaf * 5313 kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size) 5314 { 5315 struct slub_percpu_sheaves *pcs; 5316 struct slab_sheaf *sheaf = NULL; 5317 5318 if (unlikely(size > s->sheaf_capacity)) { 5319 5320 /* 5321 * slab_debug disables cpu sheaves intentionally so all 5322 * prefilled sheaves become "oversize" and we give up on 5323 * performance for the debugging. Same with SLUB_TINY. 5324 * Creating a cache without sheaves and then requesting a 5325 * prefilled sheaf is however not expected, so warn. 5326 */ 5327 WARN_ON_ONCE(s->sheaf_capacity == 0 && 5328 !IS_ENABLED(CONFIG_SLUB_TINY) && 5329 !(s->flags & SLAB_DEBUG_FLAGS)); 5330 5331 sheaf = kzalloc(struct_size(sheaf, objects, size), gfp); 5332 if (!sheaf) 5333 return NULL; 5334 5335 stat(s, SHEAF_PREFILL_OVERSIZE); 5336 sheaf->cache = s; 5337 sheaf->capacity = size; 5338 5339 if (!__kmem_cache_alloc_bulk(s, gfp, size, 5340 &sheaf->objects[0])) { 5341 kfree(sheaf); 5342 return NULL; 5343 } 5344 5345 sheaf->size = size; 5346 5347 return sheaf; 5348 } 5349 5350 local_lock(&s->cpu_sheaves->lock); 5351 pcs = this_cpu_ptr(s->cpu_sheaves); 5352 5353 if (pcs->spare) { 5354 sheaf = pcs->spare; 5355 pcs->spare = NULL; 5356 stat(s, SHEAF_PREFILL_FAST); 5357 } else { 5358 stat(s, SHEAF_PREFILL_SLOW); 5359 sheaf = barn_get_full_or_empty_sheaf(get_barn(s)); 5360 if (sheaf && sheaf->size) 5361 stat(s, BARN_GET); 5362 else 5363 stat(s, BARN_GET_FAIL); 5364 } 5365 5366 local_unlock(&s->cpu_sheaves->lock); 5367 5368 5369 if (!sheaf) 5370 sheaf = alloc_empty_sheaf(s, gfp); 5371 5372 if (sheaf && sheaf->size < size) { 5373 if (refill_sheaf(s, sheaf, gfp)) { 5374 sheaf_flush_unused(s, sheaf); 5375 free_empty_sheaf(s, sheaf); 5376 sheaf = NULL; 5377 } 5378 } 5379 5380 if (sheaf) 5381 sheaf->capacity = s->sheaf_capacity; 5382 5383 return sheaf; 5384 } 5385 5386 /* 5387 * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf() 5388 * 5389 * If the sheaf cannot simply become the percpu spare sheaf, but there's space 5390 * for a full sheaf in the barn, we try to refill the sheaf back to the cache's 5391 * sheaf_capacity to avoid handling partially full sheaves. 5392 * 5393 * If the refill fails because gfp is e.g. GFP_NOWAIT, or the barn is full, the 5394 * sheaf is instead flushed and freed. 5395 */ 5396 void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, 5397 struct slab_sheaf *sheaf) 5398 { 5399 struct slub_percpu_sheaves *pcs; 5400 struct node_barn *barn; 5401 5402 if (unlikely(sheaf->capacity != s->sheaf_capacity)) { 5403 sheaf_flush_unused(s, sheaf); 5404 kfree(sheaf); 5405 return; 5406 } 5407 5408 local_lock(&s->cpu_sheaves->lock); 5409 pcs = this_cpu_ptr(s->cpu_sheaves); 5410 barn = get_barn(s); 5411 5412 if (!pcs->spare) { 5413 pcs->spare = sheaf; 5414 sheaf = NULL; 5415 stat(s, SHEAF_RETURN_FAST); 5416 } 5417 5418 local_unlock(&s->cpu_sheaves->lock); 5419 5420 if (!sheaf) 5421 return; 5422 5423 stat(s, SHEAF_RETURN_SLOW); 5424 5425 /* 5426 * If the barn has too many full sheaves or we fail to refill the sheaf, 5427 * simply flush and free it. 5428 */ 5429 if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES || 5430 refill_sheaf(s, sheaf, gfp)) { 5431 sheaf_flush_unused(s, sheaf); 5432 free_empty_sheaf(s, sheaf); 5433 return; 5434 } 5435 5436 barn_put_full_sheaf(barn, sheaf); 5437 stat(s, BARN_PUT); 5438 } 5439 5440 /* 5441 * refill a sheaf previously returned by kmem_cache_prefill_sheaf to at least 5442 * the given size 5443 * 5444 * the sheaf might be replaced by a new one when requesting more than 5445 * s->sheaf_capacity objects if such replacement is necessary, but the refill 5446 * fails (returning -ENOMEM), the existing sheaf is left intact 5447 * 5448 * In practice we always refill to full sheaf's capacity. 5449 */ 5450 int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, 5451 struct slab_sheaf **sheafp, unsigned int size) 5452 { 5453 struct slab_sheaf *sheaf; 5454 5455 /* 5456 * TODO: do we want to support *sheaf == NULL to be equivalent of 5457 * kmem_cache_prefill_sheaf() ? 5458 */ 5459 if (!sheafp || !(*sheafp)) 5460 return -EINVAL; 5461 5462 sheaf = *sheafp; 5463 if (sheaf->size >= size) 5464 return 0; 5465 5466 if (likely(sheaf->capacity >= size)) { 5467 if (likely(sheaf->capacity == s->sheaf_capacity)) 5468 return refill_sheaf(s, sheaf, gfp); 5469 5470 if (!__kmem_cache_alloc_bulk(s, gfp, sheaf->capacity - sheaf->size, 5471 &sheaf->objects[sheaf->size])) { 5472 return -ENOMEM; 5473 } 5474 sheaf->size = sheaf->capacity; 5475 5476 return 0; 5477 } 5478 5479 /* 5480 * We had a regular sized sheaf and need an oversize one, or we had an 5481 * oversize one already but need a larger one now. 5482 * This should be a very rare path so let's not complicate it. 5483 */ 5484 sheaf = kmem_cache_prefill_sheaf(s, gfp, size); 5485 if (!sheaf) 5486 return -ENOMEM; 5487 5488 kmem_cache_return_sheaf(s, gfp, *sheafp); 5489 *sheafp = sheaf; 5490 return 0; 5491 } 5492 5493 /* 5494 * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf() 5495 * 5496 * Guaranteed not to fail as many allocations as was the requested size. 5497 * After the sheaf is emptied, it fails - no fallback to the slab cache itself. 5498 * 5499 * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT 5500 * memcg charging is forced over limit if necessary, to avoid failure. 5501 */ 5502 void * 5503 kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp, 5504 struct slab_sheaf *sheaf) 5505 { 5506 void *ret = NULL; 5507 bool init; 5508 5509 if (sheaf->size == 0) 5510 goto out; 5511 5512 ret = sheaf->objects[--sheaf->size]; 5513 5514 init = slab_want_init_on_alloc(gfp, s); 5515 5516 /* add __GFP_NOFAIL to force successful memcg charging */ 5517 slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size); 5518 out: 5519 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE); 5520 5521 return ret; 5522 } 5523 5524 unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf) 5525 { 5526 return sheaf->size; 5527 } 5528 /* 5529 * To avoid unnecessary overhead, we pass through large allocation requests 5530 * directly to the page allocator. We use __GFP_COMP, because we will need to 5531 * know the allocation order to free the pages properly in kfree. 5532 */ 5533 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node) 5534 { 5535 struct folio *folio; 5536 void *ptr = NULL; 5537 unsigned int order = get_order(size); 5538 5539 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 5540 flags = kmalloc_fix_flags(flags); 5541 5542 flags |= __GFP_COMP; 5543 5544 if (node == NUMA_NO_NODE) 5545 folio = (struct folio *)alloc_frozen_pages_noprof(flags, order); 5546 else 5547 folio = (struct folio *)__alloc_frozen_pages_noprof(flags, order, node, NULL); 5548 5549 if (folio) { 5550 ptr = folio_address(folio); 5551 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 5552 PAGE_SIZE << order); 5553 __folio_set_large_kmalloc(folio); 5554 } 5555 5556 ptr = kasan_kmalloc_large(ptr, size, flags); 5557 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 5558 kmemleak_alloc(ptr, size, 1, flags); 5559 kmsan_kmalloc_large(ptr, size, flags); 5560 5561 return ptr; 5562 } 5563 5564 void *__kmalloc_large_noprof(size_t size, gfp_t flags) 5565 { 5566 void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE); 5567 5568 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 5569 flags, NUMA_NO_NODE); 5570 return ret; 5571 } 5572 EXPORT_SYMBOL(__kmalloc_large_noprof); 5573 5574 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) 5575 { 5576 void *ret = ___kmalloc_large_node(size, flags, node); 5577 5578 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 5579 flags, node); 5580 return ret; 5581 } 5582 EXPORT_SYMBOL(__kmalloc_large_node_noprof); 5583 5584 static __always_inline 5585 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node, 5586 unsigned long caller) 5587 { 5588 struct kmem_cache *s; 5589 void *ret; 5590 5591 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 5592 ret = __kmalloc_large_node_noprof(size, flags, node); 5593 trace_kmalloc(caller, ret, size, 5594 PAGE_SIZE << get_order(size), flags, node); 5595 return ret; 5596 } 5597 5598 if (unlikely(!size)) 5599 return ZERO_SIZE_PTR; 5600 5601 s = kmalloc_slab(size, b, flags, caller); 5602 5603 ret = slab_alloc_node(s, NULL, flags, node, caller, size); 5604 ret = kasan_kmalloc(s, ret, size, flags); 5605 trace_kmalloc(caller, ret, size, s->size, flags, node); 5606 return ret; 5607 } 5608 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) 5609 { 5610 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_); 5611 } 5612 EXPORT_SYMBOL(__kmalloc_node_noprof); 5613 5614 void *__kmalloc_noprof(size_t size, gfp_t flags) 5615 { 5616 return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_); 5617 } 5618 EXPORT_SYMBOL(__kmalloc_noprof); 5619 5620 /** 5621 * kmalloc_nolock - Allocate an object of given size from any context. 5622 * @size: size to allocate 5623 * @gfp_flags: GFP flags. Only __GFP_ACCOUNT, __GFP_ZERO, __GFP_NO_OBJ_EXT 5624 * allowed. 5625 * @node: node number of the target node. 5626 * 5627 * Return: pointer to the new object or NULL in case of error. 5628 * NULL does not mean EBUSY or EAGAIN. It means ENOMEM. 5629 * There is no reason to call it again and expect !NULL. 5630 */ 5631 void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node) 5632 { 5633 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_NOMEMALLOC | gfp_flags; 5634 struct kmem_cache *s; 5635 bool can_retry = true; 5636 void *ret = ERR_PTR(-EBUSY); 5637 5638 VM_WARN_ON_ONCE(gfp_flags & ~(__GFP_ACCOUNT | __GFP_ZERO | 5639 __GFP_NO_OBJ_EXT)); 5640 5641 if (unlikely(!size)) 5642 return ZERO_SIZE_PTR; 5643 5644 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) 5645 /* kmalloc_nolock() in PREEMPT_RT is not supported from irq */ 5646 return NULL; 5647 retry: 5648 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 5649 return NULL; 5650 s = kmalloc_slab(size, NULL, alloc_gfp, _RET_IP_); 5651 5652 if (!(s->flags & __CMPXCHG_DOUBLE) && !kmem_cache_debug(s)) 5653 /* 5654 * kmalloc_nolock() is not supported on architectures that 5655 * don't implement cmpxchg16b, but debug caches don't use 5656 * per-cpu slab and per-cpu partial slabs. They rely on 5657 * kmem_cache_node->list_lock, so kmalloc_nolock() can 5658 * attempt to allocate from debug caches by 5659 * spin_trylock_irqsave(&n->list_lock, ...) 5660 */ 5661 return NULL; 5662 5663 /* 5664 * Do not call slab_alloc_node(), since trylock mode isn't 5665 * compatible with slab_pre_alloc_hook/should_failslab and 5666 * kfence_alloc. Hence call __slab_alloc_node() (at most twice) 5667 * and slab_post_alloc_hook() directly. 5668 * 5669 * In !PREEMPT_RT ___slab_alloc() manipulates (freelist,tid) pair 5670 * in irq saved region. It assumes that the same cpu will not 5671 * __update_cpu_freelist_fast() into the same (freelist,tid) pair. 5672 * Therefore use in_nmi() to check whether particular bucket is in 5673 * irq protected section. 5674 * 5675 * If in_nmi() && local_lock_is_locked(s->cpu_slab) then it means that 5676 * this cpu was interrupted somewhere inside ___slab_alloc() after 5677 * it did local_lock_irqsave(&s->cpu_slab->lock, flags). 5678 * In this case fast path with __update_cpu_freelist_fast() is not safe. 5679 */ 5680 #ifndef CONFIG_SLUB_TINY 5681 if (!in_nmi() || !local_lock_is_locked(&s->cpu_slab->lock)) 5682 #endif 5683 ret = __slab_alloc_node(s, alloc_gfp, node, _RET_IP_, size); 5684 5685 if (PTR_ERR(ret) == -EBUSY) { 5686 if (can_retry) { 5687 /* pick the next kmalloc bucket */ 5688 size = s->object_size + 1; 5689 /* 5690 * Another alternative is to 5691 * if (memcg) alloc_gfp &= ~__GFP_ACCOUNT; 5692 * else if (!memcg) alloc_gfp |= __GFP_ACCOUNT; 5693 * to retry from bucket of the same size. 5694 */ 5695 can_retry = false; 5696 goto retry; 5697 } 5698 ret = NULL; 5699 } 5700 5701 maybe_wipe_obj_freeptr(s, ret); 5702 slab_post_alloc_hook(s, NULL, alloc_gfp, 1, &ret, 5703 slab_want_init_on_alloc(alloc_gfp, s), size); 5704 5705 ret = kasan_kmalloc(s, ret, size, alloc_gfp); 5706 return ret; 5707 } 5708 EXPORT_SYMBOL_GPL(kmalloc_nolock_noprof); 5709 5710 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, 5711 int node, unsigned long caller) 5712 { 5713 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller); 5714 5715 } 5716 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof); 5717 5718 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size) 5719 { 5720 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, 5721 _RET_IP_, size); 5722 5723 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); 5724 5725 ret = kasan_kmalloc(s, ret, size, gfpflags); 5726 return ret; 5727 } 5728 EXPORT_SYMBOL(__kmalloc_cache_noprof); 5729 5730 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags, 5731 int node, size_t size) 5732 { 5733 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); 5734 5735 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); 5736 5737 ret = kasan_kmalloc(s, ret, size, gfpflags); 5738 return ret; 5739 } 5740 EXPORT_SYMBOL(__kmalloc_cache_node_noprof); 5741 5742 static noinline void free_to_partial_list( 5743 struct kmem_cache *s, struct slab *slab, 5744 void *head, void *tail, int bulk_cnt, 5745 unsigned long addr) 5746 { 5747 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 5748 struct slab *slab_free = NULL; 5749 int cnt = bulk_cnt; 5750 unsigned long flags; 5751 depot_stack_handle_t handle = 0; 5752 5753 /* 5754 * We cannot use GFP_NOWAIT as there are callsites where waking up 5755 * kswapd could deadlock 5756 */ 5757 if (s->flags & SLAB_STORE_USER) 5758 handle = set_track_prepare(__GFP_NOWARN); 5759 5760 spin_lock_irqsave(&n->list_lock, flags); 5761 5762 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { 5763 void *prior = slab->freelist; 5764 5765 /* Perform the actual freeing while we still hold the locks */ 5766 slab->inuse -= cnt; 5767 set_freepointer(s, tail, prior); 5768 slab->freelist = head; 5769 5770 /* 5771 * If the slab is empty, and node's partial list is full, 5772 * it should be discarded anyway no matter it's on full or 5773 * partial list. 5774 */ 5775 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) 5776 slab_free = slab; 5777 5778 if (!prior) { 5779 /* was on full list */ 5780 remove_full(s, n, slab); 5781 if (!slab_free) { 5782 add_partial(n, slab, DEACTIVATE_TO_TAIL); 5783 stat(s, FREE_ADD_PARTIAL); 5784 } 5785 } else if (slab_free) { 5786 remove_partial(n, slab); 5787 stat(s, FREE_REMOVE_PARTIAL); 5788 } 5789 } 5790 5791 if (slab_free) { 5792 /* 5793 * Update the counters while still holding n->list_lock to 5794 * prevent spurious validation warnings 5795 */ 5796 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); 5797 } 5798 5799 spin_unlock_irqrestore(&n->list_lock, flags); 5800 5801 if (slab_free) { 5802 stat(s, FREE_SLAB); 5803 free_slab(s, slab_free); 5804 } 5805 } 5806 5807 /* 5808 * Slow path handling. This may still be called frequently since objects 5809 * have a longer lifetime than the cpu slabs in most processing loads. 5810 * 5811 * So we still attempt to reduce cache line usage. Just take the slab 5812 * lock and free the item. If there is no additional partial slab 5813 * handling required then we can return immediately. 5814 */ 5815 static void __slab_free(struct kmem_cache *s, struct slab *slab, 5816 void *head, void *tail, int cnt, 5817 unsigned long addr) 5818 5819 { 5820 void *prior; 5821 int was_frozen; 5822 struct slab new; 5823 unsigned long counters; 5824 struct kmem_cache_node *n = NULL; 5825 unsigned long flags; 5826 bool on_node_partial; 5827 5828 stat(s, FREE_SLOWPATH); 5829 5830 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 5831 free_to_partial_list(s, slab, head, tail, cnt, addr); 5832 return; 5833 } 5834 5835 do { 5836 if (unlikely(n)) { 5837 spin_unlock_irqrestore(&n->list_lock, flags); 5838 n = NULL; 5839 } 5840 prior = slab->freelist; 5841 counters = slab->counters; 5842 set_freepointer(s, tail, prior); 5843 new.counters = counters; 5844 was_frozen = new.frozen; 5845 new.inuse -= cnt; 5846 if ((!new.inuse || !prior) && !was_frozen) { 5847 /* Needs to be taken off a list */ 5848 if (!kmem_cache_has_cpu_partial(s) || prior) { 5849 5850 n = get_node(s, slab_nid(slab)); 5851 /* 5852 * Speculatively acquire the list_lock. 5853 * If the cmpxchg does not succeed then we may 5854 * drop the list_lock without any processing. 5855 * 5856 * Otherwise the list_lock will synchronize with 5857 * other processors updating the list of slabs. 5858 */ 5859 spin_lock_irqsave(&n->list_lock, flags); 5860 5861 on_node_partial = slab_test_node_partial(slab); 5862 } 5863 } 5864 5865 } while (!slab_update_freelist(s, slab, 5866 prior, counters, 5867 head, new.counters, 5868 "__slab_free")); 5869 5870 if (likely(!n)) { 5871 5872 if (likely(was_frozen)) { 5873 /* 5874 * The list lock was not taken therefore no list 5875 * activity can be necessary. 5876 */ 5877 stat(s, FREE_FROZEN); 5878 } else if (kmem_cache_has_cpu_partial(s) && !prior) { 5879 /* 5880 * If we started with a full slab then put it onto the 5881 * per cpu partial list. 5882 */ 5883 put_cpu_partial(s, slab, 1); 5884 stat(s, CPU_PARTIAL_FREE); 5885 } 5886 5887 return; 5888 } 5889 5890 /* 5891 * This slab was partially empty but not on the per-node partial list, 5892 * in which case we shouldn't manipulate its list, just return. 5893 */ 5894 if (prior && !on_node_partial) { 5895 spin_unlock_irqrestore(&n->list_lock, flags); 5896 return; 5897 } 5898 5899 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 5900 goto slab_empty; 5901 5902 /* 5903 * Objects left in the slab. If it was not on the partial list before 5904 * then add it. 5905 */ 5906 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 5907 add_partial(n, slab, DEACTIVATE_TO_TAIL); 5908 stat(s, FREE_ADD_PARTIAL); 5909 } 5910 spin_unlock_irqrestore(&n->list_lock, flags); 5911 return; 5912 5913 slab_empty: 5914 if (prior) { 5915 /* 5916 * Slab on the partial list. 5917 */ 5918 remove_partial(n, slab); 5919 stat(s, FREE_REMOVE_PARTIAL); 5920 } 5921 5922 spin_unlock_irqrestore(&n->list_lock, flags); 5923 stat(s, FREE_SLAB); 5924 discard_slab(s, slab); 5925 } 5926 5927 /* 5928 * pcs is locked. We should have get rid of the spare sheaf and obtained an 5929 * empty sheaf, while the main sheaf is full. We want to install the empty sheaf 5930 * as a main sheaf, and make the current main sheaf a spare sheaf. 5931 * 5932 * However due to having relinquished the cpu_sheaves lock when obtaining 5933 * the empty sheaf, we need to handle some unlikely but possible cases. 5934 * 5935 * If we put any sheaf to barn here, it's because we were interrupted or have 5936 * been migrated to a different cpu, which should be rare enough so just ignore 5937 * the barn's limits to simplify the handling. 5938 * 5939 * An alternative scenario that gets us here is when we fail 5940 * barn_replace_full_sheaf(), because there's no empty sheaf available in the 5941 * barn, so we had to allocate it by alloc_empty_sheaf(). But because we saw the 5942 * limit on full sheaves was not exceeded, we assume it didn't change and just 5943 * put the full sheaf there. 5944 */ 5945 static void __pcs_install_empty_sheaf(struct kmem_cache *s, 5946 struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty) 5947 { 5948 struct node_barn *barn; 5949 5950 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); 5951 5952 /* This is what we expect to find if nobody interrupted us. */ 5953 if (likely(!pcs->spare)) { 5954 pcs->spare = pcs->main; 5955 pcs->main = empty; 5956 return; 5957 } 5958 5959 barn = get_barn(s); 5960 5961 /* 5962 * Unlikely because if the main sheaf had space, we would have just 5963 * freed to it. Get rid of our empty sheaf. 5964 */ 5965 if (pcs->main->size < s->sheaf_capacity) { 5966 barn_put_empty_sheaf(barn, empty); 5967 return; 5968 } 5969 5970 /* Also unlikely for the same reason */ 5971 if (pcs->spare->size < s->sheaf_capacity) { 5972 swap(pcs->main, pcs->spare); 5973 barn_put_empty_sheaf(barn, empty); 5974 return; 5975 } 5976 5977 /* 5978 * We probably failed barn_replace_full_sheaf() due to no empty sheaf 5979 * available there, but we allocated one, so finish the job. 5980 */ 5981 barn_put_full_sheaf(barn, pcs->main); 5982 stat(s, BARN_PUT); 5983 pcs->main = empty; 5984 } 5985 5986 /* 5987 * Replace the full main sheaf with a (at least partially) empty sheaf. 5988 * 5989 * Must be called with the cpu_sheaves local lock locked. If successful, returns 5990 * the pcs pointer and the local lock locked (possibly on a different cpu than 5991 * initially called). If not successful, returns NULL and the local lock 5992 * unlocked. 5993 */ 5994 static struct slub_percpu_sheaves * 5995 __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs) 5996 { 5997 struct slab_sheaf *empty; 5998 struct node_barn *barn; 5999 bool put_fail; 6000 6001 restart: 6002 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); 6003 6004 barn = get_barn(s); 6005 put_fail = false; 6006 6007 if (!pcs->spare) { 6008 empty = barn_get_empty_sheaf(barn); 6009 if (empty) { 6010 pcs->spare = pcs->main; 6011 pcs->main = empty; 6012 return pcs; 6013 } 6014 goto alloc_empty; 6015 } 6016 6017 if (pcs->spare->size < s->sheaf_capacity) { 6018 swap(pcs->main, pcs->spare); 6019 return pcs; 6020 } 6021 6022 empty = barn_replace_full_sheaf(barn, pcs->main); 6023 6024 if (!IS_ERR(empty)) { 6025 stat(s, BARN_PUT); 6026 pcs->main = empty; 6027 return pcs; 6028 } 6029 6030 if (PTR_ERR(empty) == -E2BIG) { 6031 /* Since we got here, spare exists and is full */ 6032 struct slab_sheaf *to_flush = pcs->spare; 6033 6034 stat(s, BARN_PUT_FAIL); 6035 6036 pcs->spare = NULL; 6037 local_unlock(&s->cpu_sheaves->lock); 6038 6039 sheaf_flush_unused(s, to_flush); 6040 empty = to_flush; 6041 goto got_empty; 6042 } 6043 6044 /* 6045 * We could not replace full sheaf because barn had no empty 6046 * sheaves. We can still allocate it and put the full sheaf in 6047 * __pcs_install_empty_sheaf(), but if we fail to allocate it, 6048 * make sure to count the fail. 6049 */ 6050 put_fail = true; 6051 6052 alloc_empty: 6053 local_unlock(&s->cpu_sheaves->lock); 6054 6055 empty = alloc_empty_sheaf(s, GFP_NOWAIT); 6056 if (empty) 6057 goto got_empty; 6058 6059 if (put_fail) 6060 stat(s, BARN_PUT_FAIL); 6061 6062 if (!sheaf_flush_main(s)) 6063 return NULL; 6064 6065 if (!local_trylock(&s->cpu_sheaves->lock)) 6066 return NULL; 6067 6068 pcs = this_cpu_ptr(s->cpu_sheaves); 6069 6070 /* 6071 * we flushed the main sheaf so it should be empty now, 6072 * but in case we got preempted or migrated, we need to 6073 * check again 6074 */ 6075 if (pcs->main->size == s->sheaf_capacity) 6076 goto restart; 6077 6078 return pcs; 6079 6080 got_empty: 6081 if (!local_trylock(&s->cpu_sheaves->lock)) { 6082 barn_put_empty_sheaf(barn, empty); 6083 return NULL; 6084 } 6085 6086 pcs = this_cpu_ptr(s->cpu_sheaves); 6087 __pcs_install_empty_sheaf(s, pcs, empty); 6088 6089 return pcs; 6090 } 6091 6092 /* 6093 * Free an object to the percpu sheaves. 6094 * The object is expected to have passed slab_free_hook() already. 6095 */ 6096 static __fastpath_inline 6097 bool free_to_pcs(struct kmem_cache *s, void *object) 6098 { 6099 struct slub_percpu_sheaves *pcs; 6100 6101 if (!local_trylock(&s->cpu_sheaves->lock)) 6102 return false; 6103 6104 pcs = this_cpu_ptr(s->cpu_sheaves); 6105 6106 if (unlikely(pcs->main->size == s->sheaf_capacity)) { 6107 6108 pcs = __pcs_replace_full_main(s, pcs); 6109 if (unlikely(!pcs)) 6110 return false; 6111 } 6112 6113 pcs->main->objects[pcs->main->size++] = object; 6114 6115 local_unlock(&s->cpu_sheaves->lock); 6116 6117 stat(s, FREE_PCS); 6118 6119 return true; 6120 } 6121 6122 static void rcu_free_sheaf(struct rcu_head *head) 6123 { 6124 struct slab_sheaf *sheaf; 6125 struct node_barn *barn; 6126 struct kmem_cache *s; 6127 6128 sheaf = container_of(head, struct slab_sheaf, rcu_head); 6129 6130 s = sheaf->cache; 6131 6132 /* 6133 * This may remove some objects due to slab_free_hook() returning false, 6134 * so that the sheaf might no longer be completely full. But it's easier 6135 * to handle it as full (unless it became completely empty), as the code 6136 * handles it fine. The only downside is that sheaf will serve fewer 6137 * allocations when reused. It only happens due to debugging, which is a 6138 * performance hit anyway. 6139 */ 6140 __rcu_free_sheaf_prepare(s, sheaf); 6141 6142 barn = get_node(s, sheaf->node)->barn; 6143 6144 /* due to slab_free_hook() */ 6145 if (unlikely(sheaf->size == 0)) 6146 goto empty; 6147 6148 /* 6149 * Checking nr_full/nr_empty outside lock avoids contention in case the 6150 * barn is at the respective limit. Due to the race we might go over the 6151 * limit but that should be rare and harmless. 6152 */ 6153 6154 if (data_race(barn->nr_full) < MAX_FULL_SHEAVES) { 6155 stat(s, BARN_PUT); 6156 barn_put_full_sheaf(barn, sheaf); 6157 return; 6158 } 6159 6160 stat(s, BARN_PUT_FAIL); 6161 sheaf_flush_unused(s, sheaf); 6162 6163 empty: 6164 if (data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) { 6165 barn_put_empty_sheaf(barn, sheaf); 6166 return; 6167 } 6168 6169 free_empty_sheaf(s, sheaf); 6170 } 6171 6172 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj) 6173 { 6174 struct slub_percpu_sheaves *pcs; 6175 struct slab_sheaf *rcu_sheaf; 6176 6177 if (!local_trylock(&s->cpu_sheaves->lock)) 6178 goto fail; 6179 6180 pcs = this_cpu_ptr(s->cpu_sheaves); 6181 6182 if (unlikely(!pcs->rcu_free)) { 6183 6184 struct slab_sheaf *empty; 6185 struct node_barn *barn; 6186 6187 if (pcs->spare && pcs->spare->size == 0) { 6188 pcs->rcu_free = pcs->spare; 6189 pcs->spare = NULL; 6190 goto do_free; 6191 } 6192 6193 barn = get_barn(s); 6194 6195 empty = barn_get_empty_sheaf(barn); 6196 6197 if (empty) { 6198 pcs->rcu_free = empty; 6199 goto do_free; 6200 } 6201 6202 local_unlock(&s->cpu_sheaves->lock); 6203 6204 empty = alloc_empty_sheaf(s, GFP_NOWAIT); 6205 6206 if (!empty) 6207 goto fail; 6208 6209 if (!local_trylock(&s->cpu_sheaves->lock)) { 6210 barn_put_empty_sheaf(barn, empty); 6211 goto fail; 6212 } 6213 6214 pcs = this_cpu_ptr(s->cpu_sheaves); 6215 6216 if (unlikely(pcs->rcu_free)) 6217 barn_put_empty_sheaf(barn, empty); 6218 else 6219 pcs->rcu_free = empty; 6220 } 6221 6222 do_free: 6223 6224 rcu_sheaf = pcs->rcu_free; 6225 6226 /* 6227 * Since we flush immediately when size reaches capacity, we never reach 6228 * this with size already at capacity, so no OOB write is possible. 6229 */ 6230 rcu_sheaf->objects[rcu_sheaf->size++] = obj; 6231 6232 if (likely(rcu_sheaf->size < s->sheaf_capacity)) { 6233 rcu_sheaf = NULL; 6234 } else { 6235 pcs->rcu_free = NULL; 6236 rcu_sheaf->node = numa_mem_id(); 6237 } 6238 6239 /* 6240 * we flush before local_unlock to make sure a racing 6241 * flush_all_rcu_sheaves() doesn't miss this sheaf 6242 */ 6243 if (rcu_sheaf) 6244 call_rcu(&rcu_sheaf->rcu_head, rcu_free_sheaf); 6245 6246 local_unlock(&s->cpu_sheaves->lock); 6247 6248 stat(s, FREE_RCU_SHEAF); 6249 return true; 6250 6251 fail: 6252 stat(s, FREE_RCU_SHEAF_FAIL); 6253 return false; 6254 } 6255 6256 /* 6257 * Bulk free objects to the percpu sheaves. 6258 * Unlike free_to_pcs() this includes the calls to all necessary hooks 6259 * and the fallback to freeing to slab pages. 6260 */ 6261 static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p) 6262 { 6263 struct slub_percpu_sheaves *pcs; 6264 struct slab_sheaf *main, *empty; 6265 bool init = slab_want_init_on_free(s); 6266 unsigned int batch, i = 0; 6267 struct node_barn *barn; 6268 void *remote_objects[PCS_BATCH_MAX]; 6269 unsigned int remote_nr = 0; 6270 int node = numa_mem_id(); 6271 6272 next_remote_batch: 6273 while (i < size) { 6274 struct slab *slab = virt_to_slab(p[i]); 6275 6276 memcg_slab_free_hook(s, slab, p + i, 1); 6277 alloc_tagging_slab_free_hook(s, slab, p + i, 1); 6278 6279 if (unlikely(!slab_free_hook(s, p[i], init, false))) { 6280 p[i] = p[--size]; 6281 if (!size) 6282 goto flush_remote; 6283 continue; 6284 } 6285 6286 if (unlikely(IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)) { 6287 remote_objects[remote_nr] = p[i]; 6288 p[i] = p[--size]; 6289 if (++remote_nr >= PCS_BATCH_MAX) 6290 goto flush_remote; 6291 continue; 6292 } 6293 6294 i++; 6295 } 6296 6297 next_batch: 6298 if (!local_trylock(&s->cpu_sheaves->lock)) 6299 goto fallback; 6300 6301 pcs = this_cpu_ptr(s->cpu_sheaves); 6302 6303 if (likely(pcs->main->size < s->sheaf_capacity)) 6304 goto do_free; 6305 6306 barn = get_barn(s); 6307 6308 if (!pcs->spare) { 6309 empty = barn_get_empty_sheaf(barn); 6310 if (!empty) 6311 goto no_empty; 6312 6313 pcs->spare = pcs->main; 6314 pcs->main = empty; 6315 goto do_free; 6316 } 6317 6318 if (pcs->spare->size < s->sheaf_capacity) { 6319 swap(pcs->main, pcs->spare); 6320 goto do_free; 6321 } 6322 6323 empty = barn_replace_full_sheaf(barn, pcs->main); 6324 if (IS_ERR(empty)) { 6325 stat(s, BARN_PUT_FAIL); 6326 goto no_empty; 6327 } 6328 6329 stat(s, BARN_PUT); 6330 pcs->main = empty; 6331 6332 do_free: 6333 main = pcs->main; 6334 batch = min(size, s->sheaf_capacity - main->size); 6335 6336 memcpy(main->objects + main->size, p, batch * sizeof(void *)); 6337 main->size += batch; 6338 6339 local_unlock(&s->cpu_sheaves->lock); 6340 6341 stat_add(s, FREE_PCS, batch); 6342 6343 if (batch < size) { 6344 p += batch; 6345 size -= batch; 6346 goto next_batch; 6347 } 6348 6349 return; 6350 6351 no_empty: 6352 local_unlock(&s->cpu_sheaves->lock); 6353 6354 /* 6355 * if we depleted all empty sheaves in the barn or there are too 6356 * many full sheaves, free the rest to slab pages 6357 */ 6358 fallback: 6359 __kmem_cache_free_bulk(s, size, p); 6360 6361 flush_remote: 6362 if (remote_nr) { 6363 __kmem_cache_free_bulk(s, remote_nr, &remote_objects[0]); 6364 if (i < size) { 6365 remote_nr = 0; 6366 goto next_remote_batch; 6367 } 6368 } 6369 } 6370 6371 struct defer_free { 6372 struct llist_head objects; 6373 struct llist_head slabs; 6374 struct irq_work work; 6375 }; 6376 6377 static void free_deferred_objects(struct irq_work *work); 6378 6379 static DEFINE_PER_CPU(struct defer_free, defer_free_objects) = { 6380 .objects = LLIST_HEAD_INIT(objects), 6381 .slabs = LLIST_HEAD_INIT(slabs), 6382 .work = IRQ_WORK_INIT(free_deferred_objects), 6383 }; 6384 6385 /* 6386 * In PREEMPT_RT irq_work runs in per-cpu kthread, so it's safe 6387 * to take sleeping spin_locks from __slab_free() and deactivate_slab(). 6388 * In !PREEMPT_RT irq_work will run after local_unlock_irqrestore(). 6389 */ 6390 static void free_deferred_objects(struct irq_work *work) 6391 { 6392 struct defer_free *df = container_of(work, struct defer_free, work); 6393 struct llist_head *objs = &df->objects; 6394 struct llist_head *slabs = &df->slabs; 6395 struct llist_node *llnode, *pos, *t; 6396 6397 if (llist_empty(objs) && llist_empty(slabs)) 6398 return; 6399 6400 llnode = llist_del_all(objs); 6401 llist_for_each_safe(pos, t, llnode) { 6402 struct kmem_cache *s; 6403 struct slab *slab; 6404 void *x = pos; 6405 6406 slab = virt_to_slab(x); 6407 s = slab->slab_cache; 6408 6409 /* 6410 * We used freepointer in 'x' to link 'x' into df->objects. 6411 * Clear it to NULL to avoid false positive detection 6412 * of "Freepointer corruption". 6413 */ 6414 *(void **)x = NULL; 6415 6416 /* Point 'x' back to the beginning of allocated object */ 6417 x -= s->offset; 6418 __slab_free(s, slab, x, x, 1, _THIS_IP_); 6419 } 6420 6421 llnode = llist_del_all(slabs); 6422 llist_for_each_safe(pos, t, llnode) { 6423 struct slab *slab = container_of(pos, struct slab, llnode); 6424 6425 #ifdef CONFIG_SLUB_TINY 6426 discard_slab(slab->slab_cache, slab); 6427 #else 6428 deactivate_slab(slab->slab_cache, slab, slab->flush_freelist); 6429 #endif 6430 } 6431 } 6432 6433 static void defer_free(struct kmem_cache *s, void *head) 6434 { 6435 struct defer_free *df; 6436 6437 guard(preempt)(); 6438 6439 df = this_cpu_ptr(&defer_free_objects); 6440 if (llist_add(head + s->offset, &df->objects)) 6441 irq_work_queue(&df->work); 6442 } 6443 6444 static void defer_deactivate_slab(struct slab *slab, void *flush_freelist) 6445 { 6446 struct defer_free *df; 6447 6448 slab->flush_freelist = flush_freelist; 6449 6450 guard(preempt)(); 6451 6452 df = this_cpu_ptr(&defer_free_objects); 6453 if (llist_add(&slab->llnode, &df->slabs)) 6454 irq_work_queue(&df->work); 6455 } 6456 6457 void defer_free_barrier(void) 6458 { 6459 int cpu; 6460 6461 for_each_possible_cpu(cpu) 6462 irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work); 6463 } 6464 6465 #ifndef CONFIG_SLUB_TINY 6466 /* 6467 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 6468 * can perform fastpath freeing without additional function calls. 6469 * 6470 * The fastpath is only possible if we are freeing to the current cpu slab 6471 * of this processor. This typically the case if we have just allocated 6472 * the item before. 6473 * 6474 * If fastpath is not possible then fall back to __slab_free where we deal 6475 * with all sorts of special processing. 6476 * 6477 * Bulk free of a freelist with several objects (all pointing to the 6478 * same slab) possible by specifying head and tail ptr, plus objects 6479 * count (cnt). Bulk free indicated by tail pointer being set. 6480 */ 6481 static __always_inline void do_slab_free(struct kmem_cache *s, 6482 struct slab *slab, void *head, void *tail, 6483 int cnt, unsigned long addr) 6484 { 6485 /* cnt == 0 signals that it's called from kfree_nolock() */ 6486 bool allow_spin = cnt; 6487 struct kmem_cache_cpu *c; 6488 unsigned long tid; 6489 void **freelist; 6490 6491 redo: 6492 /* 6493 * Determine the currently cpus per cpu slab. 6494 * The cpu may change afterward. However that does not matter since 6495 * data is retrieved via this pointer. If we are on the same cpu 6496 * during the cmpxchg then the free will succeed. 6497 */ 6498 c = raw_cpu_ptr(s->cpu_slab); 6499 tid = READ_ONCE(c->tid); 6500 6501 /* Same with comment on barrier() in __slab_alloc_node() */ 6502 barrier(); 6503 6504 if (unlikely(slab != c->slab)) { 6505 if (unlikely(!allow_spin)) { 6506 /* 6507 * __slab_free() can locklessly cmpxchg16 into a slab, 6508 * but then it might need to take spin_lock or local_lock 6509 * in put_cpu_partial() for further processing. 6510 * Avoid the complexity and simply add to a deferred list. 6511 */ 6512 defer_free(s, head); 6513 } else { 6514 __slab_free(s, slab, head, tail, cnt, addr); 6515 } 6516 return; 6517 } 6518 6519 if (unlikely(!allow_spin)) { 6520 if ((in_nmi() || !USE_LOCKLESS_FAST_PATH()) && 6521 local_lock_is_locked(&s->cpu_slab->lock)) { 6522 defer_free(s, head); 6523 return; 6524 } 6525 cnt = 1; /* restore cnt. kfree_nolock() frees one object at a time */ 6526 } 6527 6528 if (USE_LOCKLESS_FAST_PATH()) { 6529 freelist = READ_ONCE(c->freelist); 6530 6531 set_freepointer(s, tail, freelist); 6532 6533 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { 6534 note_cmpxchg_failure("slab_free", s, tid); 6535 goto redo; 6536 } 6537 } else { 6538 __maybe_unused unsigned long flags = 0; 6539 6540 /* Update the free list under the local lock */ 6541 local_lock_cpu_slab(s, flags); 6542 c = this_cpu_ptr(s->cpu_slab); 6543 if (unlikely(slab != c->slab)) { 6544 local_unlock_cpu_slab(s, flags); 6545 goto redo; 6546 } 6547 tid = c->tid; 6548 freelist = c->freelist; 6549 6550 set_freepointer(s, tail, freelist); 6551 c->freelist = head; 6552 c->tid = next_tid(tid); 6553 6554 local_unlock_cpu_slab(s, flags); 6555 } 6556 stat_add(s, FREE_FASTPATH, cnt); 6557 } 6558 #else /* CONFIG_SLUB_TINY */ 6559 static void do_slab_free(struct kmem_cache *s, 6560 struct slab *slab, void *head, void *tail, 6561 int cnt, unsigned long addr) 6562 { 6563 __slab_free(s, slab, head, tail, cnt, addr); 6564 } 6565 #endif /* CONFIG_SLUB_TINY */ 6566 6567 static __fastpath_inline 6568 void slab_free(struct kmem_cache *s, struct slab *slab, void *object, 6569 unsigned long addr) 6570 { 6571 memcg_slab_free_hook(s, slab, &object, 1); 6572 alloc_tagging_slab_free_hook(s, slab, &object, 1); 6573 6574 if (unlikely(!slab_free_hook(s, object, slab_want_init_on_free(s), false))) 6575 return; 6576 6577 if (s->cpu_sheaves && likely(!IS_ENABLED(CONFIG_NUMA) || 6578 slab_nid(slab) == numa_mem_id())) { 6579 if (likely(free_to_pcs(s, object))) 6580 return; 6581 } 6582 6583 do_slab_free(s, slab, object, object, 1, addr); 6584 } 6585 6586 #ifdef CONFIG_MEMCG 6587 /* Do not inline the rare memcg charging failed path into the allocation path */ 6588 static noinline 6589 void memcg_alloc_abort_single(struct kmem_cache *s, void *object) 6590 { 6591 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false))) 6592 do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_); 6593 } 6594 #endif 6595 6596 static __fastpath_inline 6597 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, 6598 void *tail, void **p, int cnt, unsigned long addr) 6599 { 6600 memcg_slab_free_hook(s, slab, p, cnt); 6601 alloc_tagging_slab_free_hook(s, slab, p, cnt); 6602 /* 6603 * With KASAN enabled slab_free_freelist_hook modifies the freelist 6604 * to remove objects, whose reuse must be delayed. 6605 */ 6606 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) 6607 do_slab_free(s, slab, head, tail, cnt, addr); 6608 } 6609 6610 #ifdef CONFIG_SLUB_RCU_DEBUG 6611 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head) 6612 { 6613 struct rcu_delayed_free *delayed_free = 6614 container_of(rcu_head, struct rcu_delayed_free, head); 6615 void *object = delayed_free->object; 6616 struct slab *slab = virt_to_slab(object); 6617 struct kmem_cache *s; 6618 6619 kfree(delayed_free); 6620 6621 if (WARN_ON(is_kfence_address(object))) 6622 return; 6623 6624 /* find the object and the cache again */ 6625 if (WARN_ON(!slab)) 6626 return; 6627 s = slab->slab_cache; 6628 if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU))) 6629 return; 6630 6631 /* resume freeing */ 6632 if (slab_free_hook(s, object, slab_want_init_on_free(s), true)) 6633 do_slab_free(s, slab, object, object, 1, _THIS_IP_); 6634 } 6635 #endif /* CONFIG_SLUB_RCU_DEBUG */ 6636 6637 #ifdef CONFIG_KASAN_GENERIC 6638 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 6639 { 6640 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr); 6641 } 6642 #endif 6643 6644 static inline struct kmem_cache *virt_to_cache(const void *obj) 6645 { 6646 struct slab *slab; 6647 6648 slab = virt_to_slab(obj); 6649 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__)) 6650 return NULL; 6651 return slab->slab_cache; 6652 } 6653 6654 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 6655 { 6656 struct kmem_cache *cachep; 6657 6658 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 6659 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 6660 return s; 6661 6662 cachep = virt_to_cache(x); 6663 if (WARN(cachep && cachep != s, 6664 "%s: Wrong slab cache. %s but object is from %s\n", 6665 __func__, s->name, cachep->name)) 6666 print_tracking(cachep, x); 6667 return cachep; 6668 } 6669 6670 /** 6671 * kmem_cache_free - Deallocate an object 6672 * @s: The cache the allocation was from. 6673 * @x: The previously allocated object. 6674 * 6675 * Free an object which was previously allocated from this 6676 * cache. 6677 */ 6678 void kmem_cache_free(struct kmem_cache *s, void *x) 6679 { 6680 s = cache_from_obj(s, x); 6681 if (!s) 6682 return; 6683 trace_kmem_cache_free(_RET_IP_, x, s); 6684 slab_free(s, virt_to_slab(x), x, _RET_IP_); 6685 } 6686 EXPORT_SYMBOL(kmem_cache_free); 6687 6688 static void free_large_kmalloc(struct folio *folio, void *object) 6689 { 6690 unsigned int order = folio_order(folio); 6691 6692 if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) { 6693 dump_page(&folio->page, "Not a kmalloc allocation"); 6694 return; 6695 } 6696 6697 if (WARN_ON_ONCE(order == 0)) 6698 pr_warn_once("object pointer: 0x%p\n", object); 6699 6700 kmemleak_free(object); 6701 kasan_kfree_large(object); 6702 kmsan_kfree_large(object); 6703 6704 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 6705 -(PAGE_SIZE << order)); 6706 __folio_clear_large_kmalloc(folio); 6707 free_frozen_pages(&folio->page, order); 6708 } 6709 6710 /* 6711 * Given an rcu_head embedded within an object obtained from kvmalloc at an 6712 * offset < 4k, free the object in question. 6713 */ 6714 void kvfree_rcu_cb(struct rcu_head *head) 6715 { 6716 void *obj = head; 6717 struct folio *folio; 6718 struct slab *slab; 6719 struct kmem_cache *s; 6720 void *slab_addr; 6721 6722 if (is_vmalloc_addr(obj)) { 6723 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); 6724 vfree(obj); 6725 return; 6726 } 6727 6728 folio = virt_to_folio(obj); 6729 if (!folio_test_slab(folio)) { 6730 /* 6731 * rcu_head offset can be only less than page size so no need to 6732 * consider folio order 6733 */ 6734 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); 6735 free_large_kmalloc(folio, obj); 6736 return; 6737 } 6738 6739 slab = folio_slab(folio); 6740 s = slab->slab_cache; 6741 slab_addr = folio_address(folio); 6742 6743 if (is_kfence_address(obj)) { 6744 obj = kfence_object_start(obj); 6745 } else { 6746 unsigned int idx = __obj_to_index(s, slab_addr, obj); 6747 6748 obj = slab_addr + s->size * idx; 6749 obj = fixup_red_left(s, obj); 6750 } 6751 6752 slab_free(s, slab, obj, _RET_IP_); 6753 } 6754 6755 /** 6756 * kfree - free previously allocated memory 6757 * @object: pointer returned by kmalloc() or kmem_cache_alloc() 6758 * 6759 * If @object is NULL, no operation is performed. 6760 */ 6761 void kfree(const void *object) 6762 { 6763 struct folio *folio; 6764 struct slab *slab; 6765 struct kmem_cache *s; 6766 void *x = (void *)object; 6767 6768 trace_kfree(_RET_IP_, object); 6769 6770 if (unlikely(ZERO_OR_NULL_PTR(object))) 6771 return; 6772 6773 folio = virt_to_folio(object); 6774 if (unlikely(!folio_test_slab(folio))) { 6775 free_large_kmalloc(folio, (void *)object); 6776 return; 6777 } 6778 6779 slab = folio_slab(folio); 6780 s = slab->slab_cache; 6781 slab_free(s, slab, x, _RET_IP_); 6782 } 6783 EXPORT_SYMBOL(kfree); 6784 6785 /* 6786 * Can be called while holding raw_spinlock_t or from IRQ and NMI, 6787 * but ONLY for objects allocated by kmalloc_nolock(). 6788 * Debug checks (like kmemleak and kfence) were skipped on allocation, 6789 * hence 6790 * obj = kmalloc(); kfree_nolock(obj); 6791 * will miss kmemleak/kfence book keeping and will cause false positives. 6792 * large_kmalloc is not supported either. 6793 */ 6794 void kfree_nolock(const void *object) 6795 { 6796 struct folio *folio; 6797 struct slab *slab; 6798 struct kmem_cache *s; 6799 void *x = (void *)object; 6800 6801 if (unlikely(ZERO_OR_NULL_PTR(object))) 6802 return; 6803 6804 folio = virt_to_folio(object); 6805 if (unlikely(!folio_test_slab(folio))) { 6806 WARN_ONCE(1, "large_kmalloc is not supported by kfree_nolock()"); 6807 return; 6808 } 6809 6810 slab = folio_slab(folio); 6811 s = slab->slab_cache; 6812 6813 memcg_slab_free_hook(s, slab, &x, 1); 6814 alloc_tagging_slab_free_hook(s, slab, &x, 1); 6815 /* 6816 * Unlike slab_free() do NOT call the following: 6817 * kmemleak_free_recursive(x, s->flags); 6818 * debug_check_no_locks_freed(x, s->object_size); 6819 * debug_check_no_obj_freed(x, s->object_size); 6820 * __kcsan_check_access(x, s->object_size, ..); 6821 * kfence_free(x); 6822 * since they take spinlocks or not safe from any context. 6823 */ 6824 kmsan_slab_free(s, x); 6825 /* 6826 * If KASAN finds a kernel bug it will do kasan_report_invalid_free() 6827 * which will call raw_spin_lock_irqsave() which is technically 6828 * unsafe from NMI, but take chance and report kernel bug. 6829 * The sequence of 6830 * kasan_report_invalid_free() -> raw_spin_lock_irqsave() -> NMI 6831 * -> kfree_nolock() -> kasan_report_invalid_free() on the same CPU 6832 * is double buggy and deserves to deadlock. 6833 */ 6834 if (kasan_slab_pre_free(s, x)) 6835 return; 6836 /* 6837 * memcg, kasan_slab_pre_free are done for 'x'. 6838 * The only thing left is kasan_poison without quarantine, 6839 * since kasan quarantine takes locks and not supported from NMI. 6840 */ 6841 kasan_slab_free(s, x, false, false, /* skip quarantine */true); 6842 #ifndef CONFIG_SLUB_TINY 6843 do_slab_free(s, slab, x, x, 0, _RET_IP_); 6844 #else 6845 defer_free(s, x); 6846 #endif 6847 } 6848 EXPORT_SYMBOL_GPL(kfree_nolock); 6849 6850 static __always_inline __realloc_size(2) void * 6851 __do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags, int nid) 6852 { 6853 void *ret; 6854 size_t ks = 0; 6855 int orig_size = 0; 6856 struct kmem_cache *s = NULL; 6857 6858 if (unlikely(ZERO_OR_NULL_PTR(p))) 6859 goto alloc_new; 6860 6861 /* Check for double-free. */ 6862 if (!kasan_check_byte(p)) 6863 return NULL; 6864 6865 /* 6866 * If reallocation is not necessary (e. g. the new size is less 6867 * than the current allocated size), the current allocation will be 6868 * preserved unless __GFP_THISNODE is set. In the latter case a new 6869 * allocation on the requested node will be attempted. 6870 */ 6871 if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE && 6872 nid != page_to_nid(virt_to_page(p))) 6873 goto alloc_new; 6874 6875 if (is_kfence_address(p)) { 6876 ks = orig_size = kfence_ksize(p); 6877 } else { 6878 struct folio *folio; 6879 6880 folio = virt_to_folio(p); 6881 if (unlikely(!folio_test_slab(folio))) { 6882 /* Big kmalloc object */ 6883 WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE); 6884 WARN_ON(p != folio_address(folio)); 6885 ks = folio_size(folio); 6886 } else { 6887 s = folio_slab(folio)->slab_cache; 6888 orig_size = get_orig_size(s, (void *)p); 6889 ks = s->object_size; 6890 } 6891 } 6892 6893 /* If the old object doesn't fit, allocate a bigger one */ 6894 if (new_size > ks) 6895 goto alloc_new; 6896 6897 /* If the old object doesn't satisfy the new alignment, allocate a new one */ 6898 if (!IS_ALIGNED((unsigned long)p, align)) 6899 goto alloc_new; 6900 6901 /* Zero out spare memory. */ 6902 if (want_init_on_alloc(flags)) { 6903 kasan_disable_current(); 6904 if (orig_size && orig_size < new_size) 6905 memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size); 6906 else 6907 memset(kasan_reset_tag(p) + new_size, 0, ks - new_size); 6908 kasan_enable_current(); 6909 } 6910 6911 /* Setup kmalloc redzone when needed */ 6912 if (s && slub_debug_orig_size(s)) { 6913 set_orig_size(s, (void *)p, new_size); 6914 if (s->flags & SLAB_RED_ZONE && new_size < ks) 6915 memset_no_sanitize_memory(kasan_reset_tag(p) + new_size, 6916 SLUB_RED_ACTIVE, ks - new_size); 6917 } 6918 6919 p = kasan_krealloc(p, new_size, flags); 6920 return (void *)p; 6921 6922 alloc_new: 6923 ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_); 6924 if (ret && p) { 6925 /* Disable KASAN checks as the object's redzone is accessed. */ 6926 kasan_disable_current(); 6927 memcpy(ret, kasan_reset_tag(p), orig_size ?: ks); 6928 kasan_enable_current(); 6929 } 6930 6931 return ret; 6932 } 6933 6934 /** 6935 * krealloc_node_align - reallocate memory. The contents will remain unchanged. 6936 * @p: object to reallocate memory for. 6937 * @new_size: how many bytes of memory are required. 6938 * @align: desired alignment. 6939 * @flags: the type of memory to allocate. 6940 * @nid: NUMA node or NUMA_NO_NODE 6941 * 6942 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size 6943 * is 0 and @p is not a %NULL pointer, the object pointed to is freed. 6944 * 6945 * Only alignments up to those guaranteed by kmalloc() will be honored. Please see 6946 * Documentation/core-api/memory-allocation.rst for more details. 6947 * 6948 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 6949 * initial memory allocation, every subsequent call to this API for the same 6950 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 6951 * __GFP_ZERO is not fully honored by this API. 6952 * 6953 * When slub_debug_orig_size() is off, krealloc() only knows about the bucket 6954 * size of an allocation (but not the exact size it was allocated with) and 6955 * hence implements the following semantics for shrinking and growing buffers 6956 * with __GFP_ZERO:: 6957 * 6958 * new bucket 6959 * 0 size size 6960 * |--------|----------------| 6961 * | keep | zero | 6962 * 6963 * Otherwise, the original allocation size 'orig_size' could be used to 6964 * precisely clear the requested size, and the new size will also be stored 6965 * as the new 'orig_size'. 6966 * 6967 * In any case, the contents of the object pointed to are preserved up to the 6968 * lesser of the new and old sizes. 6969 * 6970 * Return: pointer to the allocated memory or %NULL in case of error 6971 */ 6972 void *krealloc_node_align_noprof(const void *p, size_t new_size, unsigned long align, 6973 gfp_t flags, int nid) 6974 { 6975 void *ret; 6976 6977 if (unlikely(!new_size)) { 6978 kfree(p); 6979 return ZERO_SIZE_PTR; 6980 } 6981 6982 ret = __do_krealloc(p, new_size, align, flags, nid); 6983 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) 6984 kfree(p); 6985 6986 return ret; 6987 } 6988 EXPORT_SYMBOL(krealloc_node_align_noprof); 6989 6990 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size) 6991 { 6992 /* 6993 * We want to attempt a large physically contiguous block first because 6994 * it is less likely to fragment multiple larger blocks and therefore 6995 * contribute to a long term fragmentation less than vmalloc fallback. 6996 * However make sure that larger requests are not too disruptive - i.e. 6997 * do not direct reclaim unless physically continuous memory is preferred 6998 * (__GFP_RETRY_MAYFAIL mode). We still kick in kswapd/kcompactd to 6999 * start working in the background 7000 */ 7001 if (size > PAGE_SIZE) { 7002 flags |= __GFP_NOWARN; 7003 7004 if (!(flags & __GFP_RETRY_MAYFAIL)) 7005 flags &= ~__GFP_DIRECT_RECLAIM; 7006 7007 /* nofail semantic is implemented by the vmalloc fallback */ 7008 flags &= ~__GFP_NOFAIL; 7009 } 7010 7011 return flags; 7012 } 7013 7014 /** 7015 * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon 7016 * failure, fall back to non-contiguous (vmalloc) allocation. 7017 * @size: size of the request. 7018 * @b: which set of kmalloc buckets to allocate from. 7019 * @align: desired alignment. 7020 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 7021 * @node: numa node to allocate from 7022 * 7023 * Only alignments up to those guaranteed by kmalloc() will be honored. Please see 7024 * Documentation/core-api/memory-allocation.rst for more details. 7025 * 7026 * Uses kmalloc to get the memory but if the allocation fails then falls back 7027 * to the vmalloc allocator. Use kvfree for freeing the memory. 7028 * 7029 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. 7030 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is 7031 * preferable to the vmalloc fallback, due to visible performance drawbacks. 7032 * 7033 * Return: pointer to the allocated memory of %NULL in case of failure 7034 */ 7035 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align, 7036 gfp_t flags, int node) 7037 { 7038 void *ret; 7039 7040 /* 7041 * It doesn't really make sense to fallback to vmalloc for sub page 7042 * requests 7043 */ 7044 ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), 7045 kmalloc_gfp_adjust(flags, size), 7046 node, _RET_IP_); 7047 if (ret || size <= PAGE_SIZE) 7048 return ret; 7049 7050 /* non-sleeping allocations are not supported by vmalloc */ 7051 if (!gfpflags_allow_blocking(flags)) 7052 return NULL; 7053 7054 /* Don't even allow crazy sizes */ 7055 if (unlikely(size > INT_MAX)) { 7056 WARN_ON_ONCE(!(flags & __GFP_NOWARN)); 7057 return NULL; 7058 } 7059 7060 /* 7061 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP, 7062 * since the callers already cannot assume anything 7063 * about the resulting pointer, and cannot play 7064 * protection games. 7065 */ 7066 return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END, 7067 flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 7068 node, __builtin_return_address(0)); 7069 } 7070 EXPORT_SYMBOL(__kvmalloc_node_noprof); 7071 7072 /** 7073 * kvfree() - Free memory. 7074 * @addr: Pointer to allocated memory. 7075 * 7076 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). 7077 * It is slightly more efficient to use kfree() or vfree() if you are certain 7078 * that you know which one to use. 7079 * 7080 * Context: Either preemptible task context or not-NMI interrupt. 7081 */ 7082 void kvfree(const void *addr) 7083 { 7084 if (is_vmalloc_addr(addr)) 7085 vfree(addr); 7086 else 7087 kfree(addr); 7088 } 7089 EXPORT_SYMBOL(kvfree); 7090 7091 /** 7092 * kvfree_sensitive - Free a data object containing sensitive information. 7093 * @addr: address of the data object to be freed. 7094 * @len: length of the data object. 7095 * 7096 * Use the special memzero_explicit() function to clear the content of a 7097 * kvmalloc'ed object containing sensitive data to make sure that the 7098 * compiler won't optimize out the data clearing. 7099 */ 7100 void kvfree_sensitive(const void *addr, size_t len) 7101 { 7102 if (likely(!ZERO_OR_NULL_PTR(addr))) { 7103 memzero_explicit((void *)addr, len); 7104 kvfree(addr); 7105 } 7106 } 7107 EXPORT_SYMBOL(kvfree_sensitive); 7108 7109 /** 7110 * kvrealloc_node_align - reallocate memory; contents remain unchanged 7111 * @p: object to reallocate memory for 7112 * @size: the size to reallocate 7113 * @align: desired alignment 7114 * @flags: the flags for the page level allocator 7115 * @nid: NUMA node id 7116 * 7117 * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0 7118 * and @p is not a %NULL pointer, the object pointed to is freed. 7119 * 7120 * Only alignments up to those guaranteed by kmalloc() will be honored. Please see 7121 * Documentation/core-api/memory-allocation.rst for more details. 7122 * 7123 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 7124 * initial memory allocation, every subsequent call to this API for the same 7125 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 7126 * __GFP_ZERO is not fully honored by this API. 7127 * 7128 * In any case, the contents of the object pointed to are preserved up to the 7129 * lesser of the new and old sizes. 7130 * 7131 * This function must not be called concurrently with itself or kvfree() for the 7132 * same memory allocation. 7133 * 7134 * Return: pointer to the allocated memory or %NULL in case of error 7135 */ 7136 void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align, 7137 gfp_t flags, int nid) 7138 { 7139 void *n; 7140 7141 if (is_vmalloc_addr(p)) 7142 return vrealloc_node_align_noprof(p, size, align, flags, nid); 7143 7144 n = krealloc_node_align_noprof(p, size, align, kmalloc_gfp_adjust(flags, size), nid); 7145 if (!n) { 7146 /* We failed to krealloc(), fall back to kvmalloc(). */ 7147 n = kvmalloc_node_align_noprof(size, align, flags, nid); 7148 if (!n) 7149 return NULL; 7150 7151 if (p) { 7152 /* We already know that `p` is not a vmalloc address. */ 7153 kasan_disable_current(); 7154 memcpy(n, kasan_reset_tag(p), ksize(p)); 7155 kasan_enable_current(); 7156 7157 kfree(p); 7158 } 7159 } 7160 7161 return n; 7162 } 7163 EXPORT_SYMBOL(kvrealloc_node_align_noprof); 7164 7165 struct detached_freelist { 7166 struct slab *slab; 7167 void *tail; 7168 void *freelist; 7169 int cnt; 7170 struct kmem_cache *s; 7171 }; 7172 7173 /* 7174 * This function progressively scans the array with free objects (with 7175 * a limited look ahead) and extract objects belonging to the same 7176 * slab. It builds a detached freelist directly within the given 7177 * slab/objects. This can happen without any need for 7178 * synchronization, because the objects are owned by running process. 7179 * The freelist is build up as a single linked list in the objects. 7180 * The idea is, that this detached freelist can then be bulk 7181 * transferred to the real freelist(s), but only requiring a single 7182 * synchronization primitive. Look ahead in the array is limited due 7183 * to performance reasons. 7184 */ 7185 static inline 7186 int build_detached_freelist(struct kmem_cache *s, size_t size, 7187 void **p, struct detached_freelist *df) 7188 { 7189 int lookahead = 3; 7190 void *object; 7191 struct folio *folio; 7192 size_t same; 7193 7194 object = p[--size]; 7195 folio = virt_to_folio(object); 7196 if (!s) { 7197 /* Handle kalloc'ed objects */ 7198 if (unlikely(!folio_test_slab(folio))) { 7199 free_large_kmalloc(folio, object); 7200 df->slab = NULL; 7201 return size; 7202 } 7203 /* Derive kmem_cache from object */ 7204 df->slab = folio_slab(folio); 7205 df->s = df->slab->slab_cache; 7206 } else { 7207 df->slab = folio_slab(folio); 7208 df->s = cache_from_obj(s, object); /* Support for memcg */ 7209 } 7210 7211 /* Start new detached freelist */ 7212 df->tail = object; 7213 df->freelist = object; 7214 df->cnt = 1; 7215 7216 if (is_kfence_address(object)) 7217 return size; 7218 7219 set_freepointer(df->s, object, NULL); 7220 7221 same = size; 7222 while (size) { 7223 object = p[--size]; 7224 /* df->slab is always set at this point */ 7225 if (df->slab == virt_to_slab(object)) { 7226 /* Opportunity build freelist */ 7227 set_freepointer(df->s, object, df->freelist); 7228 df->freelist = object; 7229 df->cnt++; 7230 same--; 7231 if (size != same) 7232 swap(p[size], p[same]); 7233 continue; 7234 } 7235 7236 /* Limit look ahead search */ 7237 if (!--lookahead) 7238 break; 7239 } 7240 7241 return same; 7242 } 7243 7244 /* 7245 * Internal bulk free of objects that were not initialised by the post alloc 7246 * hooks and thus should not be processed by the free hooks 7247 */ 7248 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 7249 { 7250 if (!size) 7251 return; 7252 7253 do { 7254 struct detached_freelist df; 7255 7256 size = build_detached_freelist(s, size, p, &df); 7257 if (!df.slab) 7258 continue; 7259 7260 if (kfence_free(df.freelist)) 7261 continue; 7262 7263 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, 7264 _RET_IP_); 7265 } while (likely(size)); 7266 } 7267 7268 /* Note that interrupts must be enabled when calling this function. */ 7269 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 7270 { 7271 if (!size) 7272 return; 7273 7274 /* 7275 * freeing to sheaves is so incompatible with the detached freelist so 7276 * once we go that way, we have to do everything differently 7277 */ 7278 if (s && s->cpu_sheaves) { 7279 free_to_pcs_bulk(s, size, p); 7280 return; 7281 } 7282 7283 do { 7284 struct detached_freelist df; 7285 7286 size = build_detached_freelist(s, size, p, &df); 7287 if (!df.slab) 7288 continue; 7289 7290 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size], 7291 df.cnt, _RET_IP_); 7292 } while (likely(size)); 7293 } 7294 EXPORT_SYMBOL(kmem_cache_free_bulk); 7295 7296 #ifndef CONFIG_SLUB_TINY 7297 static inline 7298 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 7299 void **p) 7300 { 7301 struct kmem_cache_cpu *c; 7302 unsigned long irqflags; 7303 int i; 7304 7305 /* 7306 * Drain objects in the per cpu slab, while disabling local 7307 * IRQs, which protects against PREEMPT and interrupts 7308 * handlers invoking normal fastpath. 7309 */ 7310 c = slub_get_cpu_ptr(s->cpu_slab); 7311 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 7312 7313 for (i = 0; i < size; i++) { 7314 void *object = kfence_alloc(s, s->object_size, flags); 7315 7316 if (unlikely(object)) { 7317 p[i] = object; 7318 continue; 7319 } 7320 7321 object = c->freelist; 7322 if (unlikely(!object)) { 7323 /* 7324 * We may have removed an object from c->freelist using 7325 * the fastpath in the previous iteration; in that case, 7326 * c->tid has not been bumped yet. 7327 * Since ___slab_alloc() may reenable interrupts while 7328 * allocating memory, we should bump c->tid now. 7329 */ 7330 c->tid = next_tid(c->tid); 7331 7332 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 7333 7334 /* 7335 * Invoking slow path likely have side-effect 7336 * of re-populating per CPU c->freelist 7337 */ 7338 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 7339 _RET_IP_, c, s->object_size); 7340 if (unlikely(!p[i])) 7341 goto error; 7342 7343 c = this_cpu_ptr(s->cpu_slab); 7344 maybe_wipe_obj_freeptr(s, p[i]); 7345 7346 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 7347 7348 continue; /* goto for-loop */ 7349 } 7350 c->freelist = get_freepointer(s, object); 7351 p[i] = object; 7352 maybe_wipe_obj_freeptr(s, p[i]); 7353 stat(s, ALLOC_FASTPATH); 7354 } 7355 c->tid = next_tid(c->tid); 7356 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 7357 slub_put_cpu_ptr(s->cpu_slab); 7358 7359 return i; 7360 7361 error: 7362 slub_put_cpu_ptr(s->cpu_slab); 7363 __kmem_cache_free_bulk(s, i, p); 7364 return 0; 7365 7366 } 7367 #else /* CONFIG_SLUB_TINY */ 7368 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 7369 size_t size, void **p) 7370 { 7371 int i; 7372 7373 for (i = 0; i < size; i++) { 7374 void *object = kfence_alloc(s, s->object_size, flags); 7375 7376 if (unlikely(object)) { 7377 p[i] = object; 7378 continue; 7379 } 7380 7381 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE, 7382 _RET_IP_, s->object_size); 7383 if (unlikely(!p[i])) 7384 goto error; 7385 7386 maybe_wipe_obj_freeptr(s, p[i]); 7387 } 7388 7389 return i; 7390 7391 error: 7392 __kmem_cache_free_bulk(s, i, p); 7393 return 0; 7394 } 7395 #endif /* CONFIG_SLUB_TINY */ 7396 7397 /* Note that interrupts must be enabled when calling this function. */ 7398 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, 7399 void **p) 7400 { 7401 unsigned int i = 0; 7402 7403 if (!size) 7404 return 0; 7405 7406 s = slab_pre_alloc_hook(s, flags); 7407 if (unlikely(!s)) 7408 return 0; 7409 7410 if (s->cpu_sheaves) 7411 i = alloc_from_pcs_bulk(s, size, p); 7412 7413 if (i < size) { 7414 /* 7415 * If we ran out of memory, don't bother with freeing back to 7416 * the percpu sheaves, we have bigger problems. 7417 */ 7418 if (unlikely(__kmem_cache_alloc_bulk(s, flags, size - i, p + i) == 0)) { 7419 if (i > 0) 7420 __kmem_cache_free_bulk(s, i, p); 7421 return 0; 7422 } 7423 } 7424 7425 /* 7426 * memcg and kmem_cache debug support and memory initialization. 7427 * Done outside of the IRQ disabled fastpath loop. 7428 */ 7429 if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p, 7430 slab_want_init_on_alloc(flags, s), s->object_size))) { 7431 return 0; 7432 } 7433 7434 return size; 7435 } 7436 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof); 7437 7438 /* 7439 * Object placement in a slab is made very easy because we always start at 7440 * offset 0. If we tune the size of the object to the alignment then we can 7441 * get the required alignment by putting one properly sized object after 7442 * another. 7443 * 7444 * Notice that the allocation order determines the sizes of the per cpu 7445 * caches. Each processor has always one slab available for allocations. 7446 * Increasing the allocation order reduces the number of times that slabs 7447 * must be moved on and off the partial lists and is therefore a factor in 7448 * locking overhead. 7449 */ 7450 7451 /* 7452 * Minimum / Maximum order of slab pages. This influences locking overhead 7453 * and slab fragmentation. A higher order reduces the number of partial slabs 7454 * and increases the number of allocations possible without having to 7455 * take the list_lock. 7456 */ 7457 static unsigned int slub_min_order; 7458 static unsigned int slub_max_order = 7459 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; 7460 static unsigned int slub_min_objects; 7461 7462 /* 7463 * Calculate the order of allocation given an slab object size. 7464 * 7465 * The order of allocation has significant impact on performance and other 7466 * system components. Generally order 0 allocations should be preferred since 7467 * order 0 does not cause fragmentation in the page allocator. Larger objects 7468 * be problematic to put into order 0 slabs because there may be too much 7469 * unused space left. We go to a higher order if more than 1/16th of the slab 7470 * would be wasted. 7471 * 7472 * In order to reach satisfactory performance we must ensure that a minimum 7473 * number of objects is in one slab. Otherwise we may generate too much 7474 * activity on the partial lists which requires taking the list_lock. This is 7475 * less a concern for large slabs though which are rarely used. 7476 * 7477 * slab_max_order specifies the order where we begin to stop considering the 7478 * number of objects in a slab as critical. If we reach slab_max_order then 7479 * we try to keep the page order as low as possible. So we accept more waste 7480 * of space in favor of a small page order. 7481 * 7482 * Higher order allocations also allow the placement of more objects in a 7483 * slab and thereby reduce object handling overhead. If the user has 7484 * requested a higher minimum order then we start with that one instead of 7485 * the smallest order which will fit the object. 7486 */ 7487 static inline unsigned int calc_slab_order(unsigned int size, 7488 unsigned int min_order, unsigned int max_order, 7489 unsigned int fract_leftover) 7490 { 7491 unsigned int order; 7492 7493 for (order = min_order; order <= max_order; order++) { 7494 7495 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 7496 unsigned int rem; 7497 7498 rem = slab_size % size; 7499 7500 if (rem <= slab_size / fract_leftover) 7501 break; 7502 } 7503 7504 return order; 7505 } 7506 7507 static inline int calculate_order(unsigned int size) 7508 { 7509 unsigned int order; 7510 unsigned int min_objects; 7511 unsigned int max_objects; 7512 unsigned int min_order; 7513 7514 min_objects = slub_min_objects; 7515 if (!min_objects) { 7516 /* 7517 * Some architectures will only update present cpus when 7518 * onlining them, so don't trust the number if it's just 1. But 7519 * we also don't want to use nr_cpu_ids always, as on some other 7520 * architectures, there can be many possible cpus, but never 7521 * onlined. Here we compromise between trying to avoid too high 7522 * order on systems that appear larger than they are, and too 7523 * low order on systems that appear smaller than they are. 7524 */ 7525 unsigned int nr_cpus = num_present_cpus(); 7526 if (nr_cpus <= 1) 7527 nr_cpus = nr_cpu_ids; 7528 min_objects = 4 * (fls(nr_cpus) + 1); 7529 } 7530 /* min_objects can't be 0 because get_order(0) is undefined */ 7531 max_objects = max(order_objects(slub_max_order, size), 1U); 7532 min_objects = min(min_objects, max_objects); 7533 7534 min_order = max_t(unsigned int, slub_min_order, 7535 get_order(min_objects * size)); 7536 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 7537 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 7538 7539 /* 7540 * Attempt to find best configuration for a slab. This works by first 7541 * attempting to generate a layout with the best possible configuration 7542 * and backing off gradually. 7543 * 7544 * We start with accepting at most 1/16 waste and try to find the 7545 * smallest order from min_objects-derived/slab_min_order up to 7546 * slab_max_order that will satisfy the constraint. Note that increasing 7547 * the order can only result in same or less fractional waste, not more. 7548 * 7549 * If that fails, we increase the acceptable fraction of waste and try 7550 * again. The last iteration with fraction of 1/2 would effectively 7551 * accept any waste and give us the order determined by min_objects, as 7552 * long as at least single object fits within slab_max_order. 7553 */ 7554 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { 7555 order = calc_slab_order(size, min_order, slub_max_order, 7556 fraction); 7557 if (order <= slub_max_order) 7558 return order; 7559 } 7560 7561 /* 7562 * Doh this slab cannot be placed using slab_max_order. 7563 */ 7564 order = get_order(size); 7565 if (order <= MAX_PAGE_ORDER) 7566 return order; 7567 return -ENOSYS; 7568 } 7569 7570 static void 7571 init_kmem_cache_node(struct kmem_cache_node *n, struct node_barn *barn) 7572 { 7573 n->nr_partial = 0; 7574 spin_lock_init(&n->list_lock); 7575 INIT_LIST_HEAD(&n->partial); 7576 #ifdef CONFIG_SLUB_DEBUG 7577 atomic_long_set(&n->nr_slabs, 0); 7578 atomic_long_set(&n->total_objects, 0); 7579 INIT_LIST_HEAD(&n->full); 7580 #endif 7581 n->barn = barn; 7582 if (barn) 7583 barn_init(barn); 7584 } 7585 7586 #ifndef CONFIG_SLUB_TINY 7587 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 7588 { 7589 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 7590 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * 7591 sizeof(struct kmem_cache_cpu)); 7592 7593 /* 7594 * Must align to double word boundary for the double cmpxchg 7595 * instructions to work; see __pcpu_double_call_return_bool(). 7596 */ 7597 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 7598 2 * sizeof(void *)); 7599 7600 if (!s->cpu_slab) 7601 return 0; 7602 7603 init_kmem_cache_cpus(s); 7604 7605 return 1; 7606 } 7607 #else 7608 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 7609 { 7610 return 1; 7611 } 7612 #endif /* CONFIG_SLUB_TINY */ 7613 7614 static int init_percpu_sheaves(struct kmem_cache *s) 7615 { 7616 int cpu; 7617 7618 for_each_possible_cpu(cpu) { 7619 struct slub_percpu_sheaves *pcs; 7620 7621 pcs = per_cpu_ptr(s->cpu_sheaves, cpu); 7622 7623 local_trylock_init(&pcs->lock); 7624 7625 pcs->main = alloc_empty_sheaf(s, GFP_KERNEL); 7626 7627 if (!pcs->main) 7628 return -ENOMEM; 7629 } 7630 7631 return 0; 7632 } 7633 7634 static struct kmem_cache *kmem_cache_node; 7635 7636 /* 7637 * No kmalloc_node yet so do it by hand. We know that this is the first 7638 * slab on the node for this slabcache. There are no concurrent accesses 7639 * possible. 7640 * 7641 * Note that this function only works on the kmem_cache_node 7642 * when allocating for the kmem_cache_node. This is used for bootstrapping 7643 * memory on a fresh node that has no slab structures yet. 7644 */ 7645 static void early_kmem_cache_node_alloc(int node) 7646 { 7647 struct slab *slab; 7648 struct kmem_cache_node *n; 7649 7650 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 7651 7652 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); 7653 7654 BUG_ON(!slab); 7655 if (slab_nid(slab) != node) { 7656 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 7657 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 7658 } 7659 7660 n = slab->freelist; 7661 BUG_ON(!n); 7662 #ifdef CONFIG_SLUB_DEBUG 7663 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 7664 #endif 7665 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 7666 slab->freelist = get_freepointer(kmem_cache_node, n); 7667 slab->inuse = 1; 7668 kmem_cache_node->node[node] = n; 7669 init_kmem_cache_node(n, NULL); 7670 inc_slabs_node(kmem_cache_node, node, slab->objects); 7671 7672 /* 7673 * No locks need to be taken here as it has just been 7674 * initialized and there is no concurrent access. 7675 */ 7676 __add_partial(n, slab, DEACTIVATE_TO_HEAD); 7677 } 7678 7679 static void free_kmem_cache_nodes(struct kmem_cache *s) 7680 { 7681 int node; 7682 struct kmem_cache_node *n; 7683 7684 for_each_kmem_cache_node(s, node, n) { 7685 if (n->barn) { 7686 WARN_ON(n->barn->nr_full); 7687 WARN_ON(n->barn->nr_empty); 7688 kfree(n->barn); 7689 n->barn = NULL; 7690 } 7691 7692 s->node[node] = NULL; 7693 kmem_cache_free(kmem_cache_node, n); 7694 } 7695 } 7696 7697 void __kmem_cache_release(struct kmem_cache *s) 7698 { 7699 cache_random_seq_destroy(s); 7700 if (s->cpu_sheaves) 7701 pcs_destroy(s); 7702 #ifndef CONFIG_SLUB_TINY 7703 #ifdef CONFIG_PREEMPT_RT 7704 if (s->cpu_slab) 7705 lockdep_unregister_key(&s->lock_key); 7706 #endif 7707 free_percpu(s->cpu_slab); 7708 #endif 7709 free_kmem_cache_nodes(s); 7710 } 7711 7712 static int init_kmem_cache_nodes(struct kmem_cache *s) 7713 { 7714 int node; 7715 7716 for_each_node_mask(node, slab_nodes) { 7717 struct kmem_cache_node *n; 7718 struct node_barn *barn = NULL; 7719 7720 if (slab_state == DOWN) { 7721 early_kmem_cache_node_alloc(node); 7722 continue; 7723 } 7724 7725 if (s->cpu_sheaves) { 7726 barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node); 7727 7728 if (!barn) 7729 return 0; 7730 } 7731 7732 n = kmem_cache_alloc_node(kmem_cache_node, 7733 GFP_KERNEL, node); 7734 if (!n) { 7735 kfree(barn); 7736 return 0; 7737 } 7738 7739 init_kmem_cache_node(n, barn); 7740 7741 s->node[node] = n; 7742 } 7743 return 1; 7744 } 7745 7746 static void set_cpu_partial(struct kmem_cache *s) 7747 { 7748 #ifdef CONFIG_SLUB_CPU_PARTIAL 7749 unsigned int nr_objects; 7750 7751 /* 7752 * cpu_partial determined the maximum number of objects kept in the 7753 * per cpu partial lists of a processor. 7754 * 7755 * Per cpu partial lists mainly contain slabs that just have one 7756 * object freed. If they are used for allocation then they can be 7757 * filled up again with minimal effort. The slab will never hit the 7758 * per node partial lists and therefore no locking will be required. 7759 * 7760 * For backwards compatibility reasons, this is determined as number 7761 * of objects, even though we now limit maximum number of pages, see 7762 * slub_set_cpu_partial() 7763 */ 7764 if (!kmem_cache_has_cpu_partial(s)) 7765 nr_objects = 0; 7766 else if (s->size >= PAGE_SIZE) 7767 nr_objects = 6; 7768 else if (s->size >= 1024) 7769 nr_objects = 24; 7770 else if (s->size >= 256) 7771 nr_objects = 52; 7772 else 7773 nr_objects = 120; 7774 7775 slub_set_cpu_partial(s, nr_objects); 7776 #endif 7777 } 7778 7779 /* 7780 * calculate_sizes() determines the order and the distribution of data within 7781 * a slab object. 7782 */ 7783 static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) 7784 { 7785 slab_flags_t flags = s->flags; 7786 unsigned int size = s->object_size; 7787 unsigned int order; 7788 7789 /* 7790 * Round up object size to the next word boundary. We can only 7791 * place the free pointer at word boundaries and this determines 7792 * the possible location of the free pointer. 7793 */ 7794 size = ALIGN(size, sizeof(void *)); 7795 7796 #ifdef CONFIG_SLUB_DEBUG 7797 /* 7798 * Determine if we can poison the object itself. If the user of 7799 * the slab may touch the object after free or before allocation 7800 * then we should never poison the object itself. 7801 */ 7802 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 7803 !s->ctor) 7804 s->flags |= __OBJECT_POISON; 7805 else 7806 s->flags &= ~__OBJECT_POISON; 7807 7808 7809 /* 7810 * If we are Redzoning then check if there is some space between the 7811 * end of the object and the free pointer. If not then add an 7812 * additional word to have some bytes to store Redzone information. 7813 */ 7814 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 7815 size += sizeof(void *); 7816 #endif 7817 7818 /* 7819 * With that we have determined the number of bytes in actual use 7820 * by the object and redzoning. 7821 */ 7822 s->inuse = size; 7823 7824 if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) || 7825 (flags & SLAB_POISON) || s->ctor || 7826 ((flags & SLAB_RED_ZONE) && 7827 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { 7828 /* 7829 * Relocate free pointer after the object if it is not 7830 * permitted to overwrite the first word of the object on 7831 * kmem_cache_free. 7832 * 7833 * This is the case if we do RCU, have a constructor or 7834 * destructor, are poisoning the objects, or are 7835 * redzoning an object smaller than sizeof(void *) or are 7836 * redzoning an object with slub_debug_orig_size() enabled, 7837 * in which case the right redzone may be extended. 7838 * 7839 * The assumption that s->offset >= s->inuse means free 7840 * pointer is outside of the object is used in the 7841 * freeptr_outside_object() function. If that is no 7842 * longer true, the function needs to be modified. 7843 */ 7844 s->offset = size; 7845 size += sizeof(void *); 7846 } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) { 7847 s->offset = args->freeptr_offset; 7848 } else { 7849 /* 7850 * Store freelist pointer near middle of object to keep 7851 * it away from the edges of the object to avoid small 7852 * sized over/underflows from neighboring allocations. 7853 */ 7854 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 7855 } 7856 7857 #ifdef CONFIG_SLUB_DEBUG 7858 if (flags & SLAB_STORE_USER) { 7859 /* 7860 * Need to store information about allocs and frees after 7861 * the object. 7862 */ 7863 size += 2 * sizeof(struct track); 7864 7865 /* Save the original kmalloc request size */ 7866 if (flags & SLAB_KMALLOC) 7867 size += sizeof(unsigned int); 7868 } 7869 #endif 7870 7871 kasan_cache_create(s, &size, &s->flags); 7872 #ifdef CONFIG_SLUB_DEBUG 7873 if (flags & SLAB_RED_ZONE) { 7874 /* 7875 * Add some empty padding so that we can catch 7876 * overwrites from earlier objects rather than let 7877 * tracking information or the free pointer be 7878 * corrupted if a user writes before the start 7879 * of the object. 7880 */ 7881 size += sizeof(void *); 7882 7883 s->red_left_pad = sizeof(void *); 7884 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 7885 size += s->red_left_pad; 7886 } 7887 #endif 7888 7889 /* 7890 * SLUB stores one object immediately after another beginning from 7891 * offset 0. In order to align the objects we have to simply size 7892 * each object to conform to the alignment. 7893 */ 7894 size = ALIGN(size, s->align); 7895 s->size = size; 7896 s->reciprocal_size = reciprocal_value(size); 7897 order = calculate_order(size); 7898 7899 if ((int)order < 0) 7900 return 0; 7901 7902 s->allocflags = __GFP_COMP; 7903 7904 if (s->flags & SLAB_CACHE_DMA) 7905 s->allocflags |= GFP_DMA; 7906 7907 if (s->flags & SLAB_CACHE_DMA32) 7908 s->allocflags |= GFP_DMA32; 7909 7910 if (s->flags & SLAB_RECLAIM_ACCOUNT) 7911 s->allocflags |= __GFP_RECLAIMABLE; 7912 7913 /* 7914 * Determine the number of objects per slab 7915 */ 7916 s->oo = oo_make(order, size); 7917 s->min = oo_make(get_order(size), size); 7918 7919 return !!oo_objects(s->oo); 7920 } 7921 7922 static void list_slab_objects(struct kmem_cache *s, struct slab *slab) 7923 { 7924 #ifdef CONFIG_SLUB_DEBUG 7925 void *addr = slab_address(slab); 7926 void *p; 7927 7928 if (!slab_add_kunit_errors()) 7929 slab_bug(s, "Objects remaining on __kmem_cache_shutdown()"); 7930 7931 spin_lock(&object_map_lock); 7932 __fill_map(object_map, s, slab); 7933 7934 for_each_object(p, s, addr, slab->objects) { 7935 7936 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { 7937 if (slab_add_kunit_errors()) 7938 continue; 7939 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 7940 print_tracking(s, p); 7941 } 7942 } 7943 spin_unlock(&object_map_lock); 7944 7945 __slab_err(slab); 7946 #endif 7947 } 7948 7949 /* 7950 * Attempt to free all partial slabs on a node. 7951 * This is called from __kmem_cache_shutdown(). We must take list_lock 7952 * because sysfs file might still access partial list after the shutdowning. 7953 */ 7954 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 7955 { 7956 LIST_HEAD(discard); 7957 struct slab *slab, *h; 7958 7959 BUG_ON(irqs_disabled()); 7960 spin_lock_irq(&n->list_lock); 7961 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { 7962 if (!slab->inuse) { 7963 remove_partial(n, slab); 7964 list_add(&slab->slab_list, &discard); 7965 } else { 7966 list_slab_objects(s, slab); 7967 } 7968 } 7969 spin_unlock_irq(&n->list_lock); 7970 7971 list_for_each_entry_safe(slab, h, &discard, slab_list) 7972 discard_slab(s, slab); 7973 } 7974 7975 bool __kmem_cache_empty(struct kmem_cache *s) 7976 { 7977 int node; 7978 struct kmem_cache_node *n; 7979 7980 for_each_kmem_cache_node(s, node, n) 7981 if (n->nr_partial || node_nr_slabs(n)) 7982 return false; 7983 return true; 7984 } 7985 7986 /* 7987 * Release all resources used by a slab cache. 7988 */ 7989 int __kmem_cache_shutdown(struct kmem_cache *s) 7990 { 7991 int node; 7992 struct kmem_cache_node *n; 7993 7994 flush_all_cpus_locked(s); 7995 7996 /* we might have rcu sheaves in flight */ 7997 if (s->cpu_sheaves) 7998 rcu_barrier(); 7999 8000 /* Attempt to free all objects */ 8001 for_each_kmem_cache_node(s, node, n) { 8002 if (n->barn) 8003 barn_shrink(s, n->barn); 8004 free_partial(s, n); 8005 if (n->nr_partial || node_nr_slabs(n)) 8006 return 1; 8007 } 8008 return 0; 8009 } 8010 8011 #ifdef CONFIG_PRINTK 8012 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 8013 { 8014 void *base; 8015 int __maybe_unused i; 8016 unsigned int objnr; 8017 void *objp; 8018 void *objp0; 8019 struct kmem_cache *s = slab->slab_cache; 8020 struct track __maybe_unused *trackp; 8021 8022 kpp->kp_ptr = object; 8023 kpp->kp_slab = slab; 8024 kpp->kp_slab_cache = s; 8025 base = slab_address(slab); 8026 objp0 = kasan_reset_tag(object); 8027 #ifdef CONFIG_SLUB_DEBUG 8028 objp = restore_red_left(s, objp0); 8029 #else 8030 objp = objp0; 8031 #endif 8032 objnr = obj_to_index(s, slab, objp); 8033 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 8034 objp = base + s->size * objnr; 8035 kpp->kp_objp = objp; 8036 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size 8037 || (objp - base) % s->size) || 8038 !(s->flags & SLAB_STORE_USER)) 8039 return; 8040 #ifdef CONFIG_SLUB_DEBUG 8041 objp = fixup_red_left(s, objp); 8042 trackp = get_track(s, objp, TRACK_ALLOC); 8043 kpp->kp_ret = (void *)trackp->addr; 8044 #ifdef CONFIG_STACKDEPOT 8045 { 8046 depot_stack_handle_t handle; 8047 unsigned long *entries; 8048 unsigned int nr_entries; 8049 8050 handle = READ_ONCE(trackp->handle); 8051 if (handle) { 8052 nr_entries = stack_depot_fetch(handle, &entries); 8053 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 8054 kpp->kp_stack[i] = (void *)entries[i]; 8055 } 8056 8057 trackp = get_track(s, objp, TRACK_FREE); 8058 handle = READ_ONCE(trackp->handle); 8059 if (handle) { 8060 nr_entries = stack_depot_fetch(handle, &entries); 8061 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 8062 kpp->kp_free_stack[i] = (void *)entries[i]; 8063 } 8064 } 8065 #endif 8066 #endif 8067 } 8068 #endif 8069 8070 /******************************************************************** 8071 * Kmalloc subsystem 8072 *******************************************************************/ 8073 8074 static int __init setup_slub_min_order(char *str) 8075 { 8076 get_option(&str, (int *)&slub_min_order); 8077 8078 if (slub_min_order > slub_max_order) 8079 slub_max_order = slub_min_order; 8080 8081 return 1; 8082 } 8083 8084 __setup("slab_min_order=", setup_slub_min_order); 8085 __setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0); 8086 8087 8088 static int __init setup_slub_max_order(char *str) 8089 { 8090 get_option(&str, (int *)&slub_max_order); 8091 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER); 8092 8093 if (slub_min_order > slub_max_order) 8094 slub_min_order = slub_max_order; 8095 8096 return 1; 8097 } 8098 8099 __setup("slab_max_order=", setup_slub_max_order); 8100 __setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0); 8101 8102 static int __init setup_slub_min_objects(char *str) 8103 { 8104 get_option(&str, (int *)&slub_min_objects); 8105 8106 return 1; 8107 } 8108 8109 __setup("slab_min_objects=", setup_slub_min_objects); 8110 __setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0); 8111 8112 #ifdef CONFIG_NUMA 8113 static int __init setup_slab_strict_numa(char *str) 8114 { 8115 if (nr_node_ids > 1) { 8116 static_branch_enable(&strict_numa); 8117 pr_info("SLUB: Strict NUMA enabled.\n"); 8118 } else { 8119 pr_warn("slab_strict_numa parameter set on non NUMA system.\n"); 8120 } 8121 8122 return 1; 8123 } 8124 8125 __setup("slab_strict_numa", setup_slab_strict_numa); 8126 #endif 8127 8128 8129 #ifdef CONFIG_HARDENED_USERCOPY 8130 /* 8131 * Rejects incorrectly sized objects and objects that are to be copied 8132 * to/from userspace but do not fall entirely within the containing slab 8133 * cache's usercopy region. 8134 * 8135 * Returns NULL if check passes, otherwise const char * to name of cache 8136 * to indicate an error. 8137 */ 8138 void __check_heap_object(const void *ptr, unsigned long n, 8139 const struct slab *slab, bool to_user) 8140 { 8141 struct kmem_cache *s; 8142 unsigned int offset; 8143 bool is_kfence = is_kfence_address(ptr); 8144 8145 ptr = kasan_reset_tag(ptr); 8146 8147 /* Find object and usable object size. */ 8148 s = slab->slab_cache; 8149 8150 /* Reject impossible pointers. */ 8151 if (ptr < slab_address(slab)) 8152 usercopy_abort("SLUB object not in SLUB page?!", NULL, 8153 to_user, 0, n); 8154 8155 /* Find offset within object. */ 8156 if (is_kfence) 8157 offset = ptr - kfence_object_start(ptr); 8158 else 8159 offset = (ptr - slab_address(slab)) % s->size; 8160 8161 /* Adjust for redzone and reject if within the redzone. */ 8162 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 8163 if (offset < s->red_left_pad) 8164 usercopy_abort("SLUB object in left red zone", 8165 s->name, to_user, offset, n); 8166 offset -= s->red_left_pad; 8167 } 8168 8169 /* Allow address range falling entirely within usercopy region. */ 8170 if (offset >= s->useroffset && 8171 offset - s->useroffset <= s->usersize && 8172 n <= s->useroffset - offset + s->usersize) 8173 return; 8174 8175 usercopy_abort("SLUB object", s->name, to_user, offset, n); 8176 } 8177 #endif /* CONFIG_HARDENED_USERCOPY */ 8178 8179 #define SHRINK_PROMOTE_MAX 32 8180 8181 /* 8182 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 8183 * up most to the head of the partial lists. New allocations will then 8184 * fill those up and thus they can be removed from the partial lists. 8185 * 8186 * The slabs with the least items are placed last. This results in them 8187 * being allocated from last increasing the chance that the last objects 8188 * are freed in them. 8189 */ 8190 static int __kmem_cache_do_shrink(struct kmem_cache *s) 8191 { 8192 int node; 8193 int i; 8194 struct kmem_cache_node *n; 8195 struct slab *slab; 8196 struct slab *t; 8197 struct list_head discard; 8198 struct list_head promote[SHRINK_PROMOTE_MAX]; 8199 unsigned long flags; 8200 int ret = 0; 8201 8202 for_each_kmem_cache_node(s, node, n) { 8203 INIT_LIST_HEAD(&discard); 8204 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 8205 INIT_LIST_HEAD(promote + i); 8206 8207 if (n->barn) 8208 barn_shrink(s, n->barn); 8209 8210 spin_lock_irqsave(&n->list_lock, flags); 8211 8212 /* 8213 * Build lists of slabs to discard or promote. 8214 * 8215 * Note that concurrent frees may occur while we hold the 8216 * list_lock. slab->inuse here is the upper limit. 8217 */ 8218 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { 8219 int free = slab->objects - slab->inuse; 8220 8221 /* Do not reread slab->inuse */ 8222 barrier(); 8223 8224 /* We do not keep full slabs on the list */ 8225 BUG_ON(free <= 0); 8226 8227 if (free == slab->objects) { 8228 list_move(&slab->slab_list, &discard); 8229 slab_clear_node_partial(slab); 8230 n->nr_partial--; 8231 dec_slabs_node(s, node, slab->objects); 8232 } else if (free <= SHRINK_PROMOTE_MAX) 8233 list_move(&slab->slab_list, promote + free - 1); 8234 } 8235 8236 /* 8237 * Promote the slabs filled up most to the head of the 8238 * partial list. 8239 */ 8240 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 8241 list_splice(promote + i, &n->partial); 8242 8243 spin_unlock_irqrestore(&n->list_lock, flags); 8244 8245 /* Release empty slabs */ 8246 list_for_each_entry_safe(slab, t, &discard, slab_list) 8247 free_slab(s, slab); 8248 8249 if (node_nr_slabs(n)) 8250 ret = 1; 8251 } 8252 8253 return ret; 8254 } 8255 8256 int __kmem_cache_shrink(struct kmem_cache *s) 8257 { 8258 flush_all(s); 8259 return __kmem_cache_do_shrink(s); 8260 } 8261 8262 static int slab_mem_going_offline_callback(void) 8263 { 8264 struct kmem_cache *s; 8265 8266 mutex_lock(&slab_mutex); 8267 list_for_each_entry(s, &slab_caches, list) { 8268 flush_all_cpus_locked(s); 8269 __kmem_cache_do_shrink(s); 8270 } 8271 mutex_unlock(&slab_mutex); 8272 8273 return 0; 8274 } 8275 8276 static int slab_mem_going_online_callback(int nid) 8277 { 8278 struct kmem_cache_node *n; 8279 struct kmem_cache *s; 8280 int ret = 0; 8281 8282 /* 8283 * We are bringing a node online. No memory is available yet. We must 8284 * allocate a kmem_cache_node structure in order to bring the node 8285 * online. 8286 */ 8287 mutex_lock(&slab_mutex); 8288 list_for_each_entry(s, &slab_caches, list) { 8289 struct node_barn *barn = NULL; 8290 8291 /* 8292 * The structure may already exist if the node was previously 8293 * onlined and offlined. 8294 */ 8295 if (get_node(s, nid)) 8296 continue; 8297 8298 if (s->cpu_sheaves) { 8299 barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, nid); 8300 8301 if (!barn) { 8302 ret = -ENOMEM; 8303 goto out; 8304 } 8305 } 8306 8307 /* 8308 * XXX: kmem_cache_alloc_node will fallback to other nodes 8309 * since memory is not yet available from the node that 8310 * is brought up. 8311 */ 8312 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 8313 if (!n) { 8314 kfree(barn); 8315 ret = -ENOMEM; 8316 goto out; 8317 } 8318 8319 init_kmem_cache_node(n, barn); 8320 8321 s->node[nid] = n; 8322 } 8323 /* 8324 * Any cache created after this point will also have kmem_cache_node 8325 * initialized for the new node. 8326 */ 8327 node_set(nid, slab_nodes); 8328 out: 8329 mutex_unlock(&slab_mutex); 8330 return ret; 8331 } 8332 8333 static int slab_memory_callback(struct notifier_block *self, 8334 unsigned long action, void *arg) 8335 { 8336 struct node_notify *nn = arg; 8337 int nid = nn->nid; 8338 int ret = 0; 8339 8340 switch (action) { 8341 case NODE_ADDING_FIRST_MEMORY: 8342 ret = slab_mem_going_online_callback(nid); 8343 break; 8344 case NODE_REMOVING_LAST_MEMORY: 8345 ret = slab_mem_going_offline_callback(); 8346 break; 8347 } 8348 if (ret) 8349 ret = notifier_from_errno(ret); 8350 else 8351 ret = NOTIFY_OK; 8352 return ret; 8353 } 8354 8355 /******************************************************************** 8356 * Basic setup of slabs 8357 *******************************************************************/ 8358 8359 /* 8360 * Used for early kmem_cache structures that were allocated using 8361 * the page allocator. Allocate them properly then fix up the pointers 8362 * that may be pointing to the wrong kmem_cache structure. 8363 */ 8364 8365 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 8366 { 8367 int node; 8368 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 8369 struct kmem_cache_node *n; 8370 8371 memcpy(s, static_cache, kmem_cache->object_size); 8372 8373 /* 8374 * This runs very early, and only the boot processor is supposed to be 8375 * up. Even if it weren't true, IRQs are not up so we couldn't fire 8376 * IPIs around. 8377 */ 8378 __flush_cpu_slab(s, smp_processor_id()); 8379 for_each_kmem_cache_node(s, node, n) { 8380 struct slab *p; 8381 8382 list_for_each_entry(p, &n->partial, slab_list) 8383 p->slab_cache = s; 8384 8385 #ifdef CONFIG_SLUB_DEBUG 8386 list_for_each_entry(p, &n->full, slab_list) 8387 p->slab_cache = s; 8388 #endif 8389 } 8390 list_add(&s->list, &slab_caches); 8391 return s; 8392 } 8393 8394 void __init kmem_cache_init(void) 8395 { 8396 static __initdata struct kmem_cache boot_kmem_cache, 8397 boot_kmem_cache_node; 8398 int node; 8399 8400 if (debug_guardpage_minorder()) 8401 slub_max_order = 0; 8402 8403 /* Inform pointer hashing choice about slub debugging state. */ 8404 hash_pointers_finalize(__slub_debug_enabled()); 8405 8406 kmem_cache_node = &boot_kmem_cache_node; 8407 kmem_cache = &boot_kmem_cache; 8408 8409 /* 8410 * Initialize the nodemask for which we will allocate per node 8411 * structures. Here we don't need taking slab_mutex yet. 8412 */ 8413 for_each_node_state(node, N_MEMORY) 8414 node_set(node, slab_nodes); 8415 8416 create_boot_cache(kmem_cache_node, "kmem_cache_node", 8417 sizeof(struct kmem_cache_node), 8418 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 8419 8420 hotplug_node_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 8421 8422 /* Able to allocate the per node structures */ 8423 slab_state = PARTIAL; 8424 8425 create_boot_cache(kmem_cache, "kmem_cache", 8426 offsetof(struct kmem_cache, node) + 8427 nr_node_ids * sizeof(struct kmem_cache_node *), 8428 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 8429 8430 kmem_cache = bootstrap(&boot_kmem_cache); 8431 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 8432 8433 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 8434 setup_kmalloc_cache_index_table(); 8435 create_kmalloc_caches(); 8436 8437 /* Setup random freelists for each cache */ 8438 init_freelist_randomization(); 8439 8440 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 8441 slub_cpu_dead); 8442 8443 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 8444 cache_line_size(), 8445 slub_min_order, slub_max_order, slub_min_objects, 8446 nr_cpu_ids, nr_node_ids); 8447 } 8448 8449 void __init kmem_cache_init_late(void) 8450 { 8451 #ifndef CONFIG_SLUB_TINY 8452 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); 8453 WARN_ON(!flushwq); 8454 #endif 8455 } 8456 8457 struct kmem_cache * 8458 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 8459 slab_flags_t flags, void (*ctor)(void *)) 8460 { 8461 struct kmem_cache *s; 8462 8463 s = find_mergeable(size, align, flags, name, ctor); 8464 if (s) { 8465 if (sysfs_slab_alias(s, name)) 8466 pr_err("SLUB: Unable to add cache alias %s to sysfs\n", 8467 name); 8468 8469 s->refcount++; 8470 8471 /* 8472 * Adjust the object sizes so that we clear 8473 * the complete object on kzalloc. 8474 */ 8475 s->object_size = max(s->object_size, size); 8476 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 8477 } 8478 8479 return s; 8480 } 8481 8482 int do_kmem_cache_create(struct kmem_cache *s, const char *name, 8483 unsigned int size, struct kmem_cache_args *args, 8484 slab_flags_t flags) 8485 { 8486 int err = -EINVAL; 8487 8488 s->name = name; 8489 s->size = s->object_size = size; 8490 8491 s->flags = kmem_cache_flags(flags, s->name); 8492 #ifdef CONFIG_SLAB_FREELIST_HARDENED 8493 s->random = get_random_long(); 8494 #endif 8495 s->align = args->align; 8496 s->ctor = args->ctor; 8497 #ifdef CONFIG_HARDENED_USERCOPY 8498 s->useroffset = args->useroffset; 8499 s->usersize = args->usersize; 8500 #endif 8501 8502 if (!calculate_sizes(args, s)) 8503 goto out; 8504 if (disable_higher_order_debug) { 8505 /* 8506 * Disable debugging flags that store metadata if the min slab 8507 * order increased. 8508 */ 8509 if (get_order(s->size) > get_order(s->object_size)) { 8510 s->flags &= ~DEBUG_METADATA_FLAGS; 8511 s->offset = 0; 8512 if (!calculate_sizes(args, s)) 8513 goto out; 8514 } 8515 } 8516 8517 #ifdef system_has_freelist_aba 8518 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { 8519 /* Enable fast mode */ 8520 s->flags |= __CMPXCHG_DOUBLE; 8521 } 8522 #endif 8523 8524 /* 8525 * The larger the object size is, the more slabs we want on the partial 8526 * list to avoid pounding the page allocator excessively. 8527 */ 8528 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); 8529 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); 8530 8531 set_cpu_partial(s); 8532 8533 if (args->sheaf_capacity && !IS_ENABLED(CONFIG_SLUB_TINY) 8534 && !(s->flags & SLAB_DEBUG_FLAGS)) { 8535 s->cpu_sheaves = alloc_percpu(struct slub_percpu_sheaves); 8536 if (!s->cpu_sheaves) { 8537 err = -ENOMEM; 8538 goto out; 8539 } 8540 // TODO: increase capacity to grow slab_sheaf up to next kmalloc size? 8541 s->sheaf_capacity = args->sheaf_capacity; 8542 } 8543 8544 #ifdef CONFIG_NUMA 8545 s->remote_node_defrag_ratio = 1000; 8546 #endif 8547 8548 /* Initialize the pre-computed randomized freelist if slab is up */ 8549 if (slab_state >= UP) { 8550 if (init_cache_random_seq(s)) 8551 goto out; 8552 } 8553 8554 if (!init_kmem_cache_nodes(s)) 8555 goto out; 8556 8557 if (!alloc_kmem_cache_cpus(s)) 8558 goto out; 8559 8560 if (s->cpu_sheaves) { 8561 err = init_percpu_sheaves(s); 8562 if (err) 8563 goto out; 8564 } 8565 8566 err = 0; 8567 8568 /* Mutex is not taken during early boot */ 8569 if (slab_state <= UP) 8570 goto out; 8571 8572 /* 8573 * Failing to create sysfs files is not critical to SLUB functionality. 8574 * If it fails, proceed with cache creation without these files. 8575 */ 8576 if (sysfs_slab_add(s)) 8577 pr_err("SLUB: Unable to add cache %s to sysfs\n", s->name); 8578 8579 if (s->flags & SLAB_STORE_USER) 8580 debugfs_slab_add(s); 8581 8582 out: 8583 if (err) 8584 __kmem_cache_release(s); 8585 return err; 8586 } 8587 8588 #ifdef SLAB_SUPPORTS_SYSFS 8589 static int count_inuse(struct slab *slab) 8590 { 8591 return slab->inuse; 8592 } 8593 8594 static int count_total(struct slab *slab) 8595 { 8596 return slab->objects; 8597 } 8598 #endif 8599 8600 #ifdef CONFIG_SLUB_DEBUG 8601 static void validate_slab(struct kmem_cache *s, struct slab *slab, 8602 unsigned long *obj_map) 8603 { 8604 void *p; 8605 void *addr = slab_address(slab); 8606 8607 if (!validate_slab_ptr(slab)) { 8608 slab_err(s, slab, "Not a valid slab page"); 8609 return; 8610 } 8611 8612 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) 8613 return; 8614 8615 /* Now we know that a valid freelist exists */ 8616 __fill_map(obj_map, s, slab); 8617 for_each_object(p, s, addr, slab->objects) { 8618 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 8619 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 8620 8621 if (!check_object(s, slab, p, val)) 8622 break; 8623 } 8624 } 8625 8626 static int validate_slab_node(struct kmem_cache *s, 8627 struct kmem_cache_node *n, unsigned long *obj_map) 8628 { 8629 unsigned long count = 0; 8630 struct slab *slab; 8631 unsigned long flags; 8632 8633 spin_lock_irqsave(&n->list_lock, flags); 8634 8635 list_for_each_entry(slab, &n->partial, slab_list) { 8636 validate_slab(s, slab, obj_map); 8637 count++; 8638 } 8639 if (count != n->nr_partial) { 8640 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 8641 s->name, count, n->nr_partial); 8642 slab_add_kunit_errors(); 8643 } 8644 8645 if (!(s->flags & SLAB_STORE_USER)) 8646 goto out; 8647 8648 list_for_each_entry(slab, &n->full, slab_list) { 8649 validate_slab(s, slab, obj_map); 8650 count++; 8651 } 8652 if (count != node_nr_slabs(n)) { 8653 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 8654 s->name, count, node_nr_slabs(n)); 8655 slab_add_kunit_errors(); 8656 } 8657 8658 out: 8659 spin_unlock_irqrestore(&n->list_lock, flags); 8660 return count; 8661 } 8662 8663 long validate_slab_cache(struct kmem_cache *s) 8664 { 8665 int node; 8666 unsigned long count = 0; 8667 struct kmem_cache_node *n; 8668 unsigned long *obj_map; 8669 8670 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 8671 if (!obj_map) 8672 return -ENOMEM; 8673 8674 flush_all(s); 8675 for_each_kmem_cache_node(s, node, n) 8676 count += validate_slab_node(s, n, obj_map); 8677 8678 bitmap_free(obj_map); 8679 8680 return count; 8681 } 8682 EXPORT_SYMBOL(validate_slab_cache); 8683 8684 #ifdef CONFIG_DEBUG_FS 8685 /* 8686 * Generate lists of code addresses where slabcache objects are allocated 8687 * and freed. 8688 */ 8689 8690 struct location { 8691 depot_stack_handle_t handle; 8692 unsigned long count; 8693 unsigned long addr; 8694 unsigned long waste; 8695 long long sum_time; 8696 long min_time; 8697 long max_time; 8698 long min_pid; 8699 long max_pid; 8700 DECLARE_BITMAP(cpus, NR_CPUS); 8701 nodemask_t nodes; 8702 }; 8703 8704 struct loc_track { 8705 unsigned long max; 8706 unsigned long count; 8707 struct location *loc; 8708 loff_t idx; 8709 }; 8710 8711 static struct dentry *slab_debugfs_root; 8712 8713 static void free_loc_track(struct loc_track *t) 8714 { 8715 if (t->max) 8716 free_pages((unsigned long)t->loc, 8717 get_order(sizeof(struct location) * t->max)); 8718 } 8719 8720 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 8721 { 8722 struct location *l; 8723 int order; 8724 8725 order = get_order(sizeof(struct location) * max); 8726 8727 l = (void *)__get_free_pages(flags, order); 8728 if (!l) 8729 return 0; 8730 8731 if (t->count) { 8732 memcpy(l, t->loc, sizeof(struct location) * t->count); 8733 free_loc_track(t); 8734 } 8735 t->max = max; 8736 t->loc = l; 8737 return 1; 8738 } 8739 8740 static int add_location(struct loc_track *t, struct kmem_cache *s, 8741 const struct track *track, 8742 unsigned int orig_size) 8743 { 8744 long start, end, pos; 8745 struct location *l; 8746 unsigned long caddr, chandle, cwaste; 8747 unsigned long age = jiffies - track->when; 8748 depot_stack_handle_t handle = 0; 8749 unsigned int waste = s->object_size - orig_size; 8750 8751 #ifdef CONFIG_STACKDEPOT 8752 handle = READ_ONCE(track->handle); 8753 #endif 8754 start = -1; 8755 end = t->count; 8756 8757 for ( ; ; ) { 8758 pos = start + (end - start + 1) / 2; 8759 8760 /* 8761 * There is nothing at "end". If we end up there 8762 * we need to add something to before end. 8763 */ 8764 if (pos == end) 8765 break; 8766 8767 l = &t->loc[pos]; 8768 caddr = l->addr; 8769 chandle = l->handle; 8770 cwaste = l->waste; 8771 if ((track->addr == caddr) && (handle == chandle) && 8772 (waste == cwaste)) { 8773 8774 l->count++; 8775 if (track->when) { 8776 l->sum_time += age; 8777 if (age < l->min_time) 8778 l->min_time = age; 8779 if (age > l->max_time) 8780 l->max_time = age; 8781 8782 if (track->pid < l->min_pid) 8783 l->min_pid = track->pid; 8784 if (track->pid > l->max_pid) 8785 l->max_pid = track->pid; 8786 8787 cpumask_set_cpu(track->cpu, 8788 to_cpumask(l->cpus)); 8789 } 8790 node_set(page_to_nid(virt_to_page(track)), l->nodes); 8791 return 1; 8792 } 8793 8794 if (track->addr < caddr) 8795 end = pos; 8796 else if (track->addr == caddr && handle < chandle) 8797 end = pos; 8798 else if (track->addr == caddr && handle == chandle && 8799 waste < cwaste) 8800 end = pos; 8801 else 8802 start = pos; 8803 } 8804 8805 /* 8806 * Not found. Insert new tracking element. 8807 */ 8808 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 8809 return 0; 8810 8811 l = t->loc + pos; 8812 if (pos < t->count) 8813 memmove(l + 1, l, 8814 (t->count - pos) * sizeof(struct location)); 8815 t->count++; 8816 l->count = 1; 8817 l->addr = track->addr; 8818 l->sum_time = age; 8819 l->min_time = age; 8820 l->max_time = age; 8821 l->min_pid = track->pid; 8822 l->max_pid = track->pid; 8823 l->handle = handle; 8824 l->waste = waste; 8825 cpumask_clear(to_cpumask(l->cpus)); 8826 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 8827 nodes_clear(l->nodes); 8828 node_set(page_to_nid(virt_to_page(track)), l->nodes); 8829 return 1; 8830 } 8831 8832 static void process_slab(struct loc_track *t, struct kmem_cache *s, 8833 struct slab *slab, enum track_item alloc, 8834 unsigned long *obj_map) 8835 { 8836 void *addr = slab_address(slab); 8837 bool is_alloc = (alloc == TRACK_ALLOC); 8838 void *p; 8839 8840 __fill_map(obj_map, s, slab); 8841 8842 for_each_object(p, s, addr, slab->objects) 8843 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 8844 add_location(t, s, get_track(s, p, alloc), 8845 is_alloc ? get_orig_size(s, p) : 8846 s->object_size); 8847 } 8848 #endif /* CONFIG_DEBUG_FS */ 8849 #endif /* CONFIG_SLUB_DEBUG */ 8850 8851 #ifdef SLAB_SUPPORTS_SYSFS 8852 enum slab_stat_type { 8853 SL_ALL, /* All slabs */ 8854 SL_PARTIAL, /* Only partially allocated slabs */ 8855 SL_CPU, /* Only slabs used for cpu caches */ 8856 SL_OBJECTS, /* Determine allocated objects not slabs */ 8857 SL_TOTAL /* Determine object capacity not slabs */ 8858 }; 8859 8860 #define SO_ALL (1 << SL_ALL) 8861 #define SO_PARTIAL (1 << SL_PARTIAL) 8862 #define SO_CPU (1 << SL_CPU) 8863 #define SO_OBJECTS (1 << SL_OBJECTS) 8864 #define SO_TOTAL (1 << SL_TOTAL) 8865 8866 static ssize_t show_slab_objects(struct kmem_cache *s, 8867 char *buf, unsigned long flags) 8868 { 8869 unsigned long total = 0; 8870 int node; 8871 int x; 8872 unsigned long *nodes; 8873 int len = 0; 8874 8875 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 8876 if (!nodes) 8877 return -ENOMEM; 8878 8879 if (flags & SO_CPU) { 8880 int cpu; 8881 8882 for_each_possible_cpu(cpu) { 8883 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 8884 cpu); 8885 int node; 8886 struct slab *slab; 8887 8888 slab = READ_ONCE(c->slab); 8889 if (!slab) 8890 continue; 8891 8892 node = slab_nid(slab); 8893 if (flags & SO_TOTAL) 8894 x = slab->objects; 8895 else if (flags & SO_OBJECTS) 8896 x = slab->inuse; 8897 else 8898 x = 1; 8899 8900 total += x; 8901 nodes[node] += x; 8902 8903 #ifdef CONFIG_SLUB_CPU_PARTIAL 8904 slab = slub_percpu_partial_read_once(c); 8905 if (slab) { 8906 node = slab_nid(slab); 8907 if (flags & SO_TOTAL) 8908 WARN_ON_ONCE(1); 8909 else if (flags & SO_OBJECTS) 8910 WARN_ON_ONCE(1); 8911 else 8912 x = data_race(slab->slabs); 8913 total += x; 8914 nodes[node] += x; 8915 } 8916 #endif 8917 } 8918 } 8919 8920 /* 8921 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 8922 * already held which will conflict with an existing lock order: 8923 * 8924 * mem_hotplug_lock->slab_mutex->kernfs_mutex 8925 * 8926 * We don't really need mem_hotplug_lock (to hold off 8927 * slab_mem_going_offline_callback) here because slab's memory hot 8928 * unplug code doesn't destroy the kmem_cache->node[] data. 8929 */ 8930 8931 #ifdef CONFIG_SLUB_DEBUG 8932 if (flags & SO_ALL) { 8933 struct kmem_cache_node *n; 8934 8935 for_each_kmem_cache_node(s, node, n) { 8936 8937 if (flags & SO_TOTAL) 8938 x = node_nr_objs(n); 8939 else if (flags & SO_OBJECTS) 8940 x = node_nr_objs(n) - count_partial(n, count_free); 8941 else 8942 x = node_nr_slabs(n); 8943 total += x; 8944 nodes[node] += x; 8945 } 8946 8947 } else 8948 #endif 8949 if (flags & SO_PARTIAL) { 8950 struct kmem_cache_node *n; 8951 8952 for_each_kmem_cache_node(s, node, n) { 8953 if (flags & SO_TOTAL) 8954 x = count_partial(n, count_total); 8955 else if (flags & SO_OBJECTS) 8956 x = count_partial(n, count_inuse); 8957 else 8958 x = n->nr_partial; 8959 total += x; 8960 nodes[node] += x; 8961 } 8962 } 8963 8964 len += sysfs_emit_at(buf, len, "%lu", total); 8965 #ifdef CONFIG_NUMA 8966 for (node = 0; node < nr_node_ids; node++) { 8967 if (nodes[node]) 8968 len += sysfs_emit_at(buf, len, " N%d=%lu", 8969 node, nodes[node]); 8970 } 8971 #endif 8972 len += sysfs_emit_at(buf, len, "\n"); 8973 kfree(nodes); 8974 8975 return len; 8976 } 8977 8978 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 8979 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 8980 8981 struct slab_attribute { 8982 struct attribute attr; 8983 ssize_t (*show)(struct kmem_cache *s, char *buf); 8984 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 8985 }; 8986 8987 #define SLAB_ATTR_RO(_name) \ 8988 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) 8989 8990 #define SLAB_ATTR(_name) \ 8991 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) 8992 8993 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 8994 { 8995 return sysfs_emit(buf, "%u\n", s->size); 8996 } 8997 SLAB_ATTR_RO(slab_size); 8998 8999 static ssize_t align_show(struct kmem_cache *s, char *buf) 9000 { 9001 return sysfs_emit(buf, "%u\n", s->align); 9002 } 9003 SLAB_ATTR_RO(align); 9004 9005 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 9006 { 9007 return sysfs_emit(buf, "%u\n", s->object_size); 9008 } 9009 SLAB_ATTR_RO(object_size); 9010 9011 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 9012 { 9013 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 9014 } 9015 SLAB_ATTR_RO(objs_per_slab); 9016 9017 static ssize_t order_show(struct kmem_cache *s, char *buf) 9018 { 9019 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 9020 } 9021 SLAB_ATTR_RO(order); 9022 9023 static ssize_t sheaf_capacity_show(struct kmem_cache *s, char *buf) 9024 { 9025 return sysfs_emit(buf, "%u\n", s->sheaf_capacity); 9026 } 9027 SLAB_ATTR_RO(sheaf_capacity); 9028 9029 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 9030 { 9031 return sysfs_emit(buf, "%lu\n", s->min_partial); 9032 } 9033 9034 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 9035 size_t length) 9036 { 9037 unsigned long min; 9038 int err; 9039 9040 err = kstrtoul(buf, 10, &min); 9041 if (err) 9042 return err; 9043 9044 s->min_partial = min; 9045 return length; 9046 } 9047 SLAB_ATTR(min_partial); 9048 9049 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 9050 { 9051 unsigned int nr_partial = 0; 9052 #ifdef CONFIG_SLUB_CPU_PARTIAL 9053 nr_partial = s->cpu_partial; 9054 #endif 9055 9056 return sysfs_emit(buf, "%u\n", nr_partial); 9057 } 9058 9059 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 9060 size_t length) 9061 { 9062 unsigned int objects; 9063 int err; 9064 9065 err = kstrtouint(buf, 10, &objects); 9066 if (err) 9067 return err; 9068 if (objects && !kmem_cache_has_cpu_partial(s)) 9069 return -EINVAL; 9070 9071 slub_set_cpu_partial(s, objects); 9072 flush_all(s); 9073 return length; 9074 } 9075 SLAB_ATTR(cpu_partial); 9076 9077 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 9078 { 9079 if (!s->ctor) 9080 return 0; 9081 return sysfs_emit(buf, "%pS\n", s->ctor); 9082 } 9083 SLAB_ATTR_RO(ctor); 9084 9085 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 9086 { 9087 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 9088 } 9089 SLAB_ATTR_RO(aliases); 9090 9091 static ssize_t partial_show(struct kmem_cache *s, char *buf) 9092 { 9093 return show_slab_objects(s, buf, SO_PARTIAL); 9094 } 9095 SLAB_ATTR_RO(partial); 9096 9097 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 9098 { 9099 return show_slab_objects(s, buf, SO_CPU); 9100 } 9101 SLAB_ATTR_RO(cpu_slabs); 9102 9103 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 9104 { 9105 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 9106 } 9107 SLAB_ATTR_RO(objects_partial); 9108 9109 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 9110 { 9111 int objects = 0; 9112 int slabs = 0; 9113 int cpu __maybe_unused; 9114 int len = 0; 9115 9116 #ifdef CONFIG_SLUB_CPU_PARTIAL 9117 for_each_online_cpu(cpu) { 9118 struct slab *slab; 9119 9120 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 9121 9122 if (slab) 9123 slabs += data_race(slab->slabs); 9124 } 9125 #endif 9126 9127 /* Approximate half-full slabs, see slub_set_cpu_partial() */ 9128 objects = (slabs * oo_objects(s->oo)) / 2; 9129 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); 9130 9131 #ifdef CONFIG_SLUB_CPU_PARTIAL 9132 for_each_online_cpu(cpu) { 9133 struct slab *slab; 9134 9135 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 9136 if (slab) { 9137 slabs = data_race(slab->slabs); 9138 objects = (slabs * oo_objects(s->oo)) / 2; 9139 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 9140 cpu, objects, slabs); 9141 } 9142 } 9143 #endif 9144 len += sysfs_emit_at(buf, len, "\n"); 9145 9146 return len; 9147 } 9148 SLAB_ATTR_RO(slabs_cpu_partial); 9149 9150 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 9151 { 9152 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 9153 } 9154 SLAB_ATTR_RO(reclaim_account); 9155 9156 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 9157 { 9158 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 9159 } 9160 SLAB_ATTR_RO(hwcache_align); 9161 9162 #ifdef CONFIG_ZONE_DMA 9163 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 9164 { 9165 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 9166 } 9167 SLAB_ATTR_RO(cache_dma); 9168 #endif 9169 9170 #ifdef CONFIG_HARDENED_USERCOPY 9171 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 9172 { 9173 return sysfs_emit(buf, "%u\n", s->usersize); 9174 } 9175 SLAB_ATTR_RO(usersize); 9176 #endif 9177 9178 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 9179 { 9180 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 9181 } 9182 SLAB_ATTR_RO(destroy_by_rcu); 9183 9184 #ifdef CONFIG_SLUB_DEBUG 9185 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 9186 { 9187 return show_slab_objects(s, buf, SO_ALL); 9188 } 9189 SLAB_ATTR_RO(slabs); 9190 9191 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 9192 { 9193 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 9194 } 9195 SLAB_ATTR_RO(total_objects); 9196 9197 static ssize_t objects_show(struct kmem_cache *s, char *buf) 9198 { 9199 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 9200 } 9201 SLAB_ATTR_RO(objects); 9202 9203 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 9204 { 9205 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 9206 } 9207 SLAB_ATTR_RO(sanity_checks); 9208 9209 static ssize_t trace_show(struct kmem_cache *s, char *buf) 9210 { 9211 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 9212 } 9213 SLAB_ATTR_RO(trace); 9214 9215 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 9216 { 9217 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 9218 } 9219 9220 SLAB_ATTR_RO(red_zone); 9221 9222 static ssize_t poison_show(struct kmem_cache *s, char *buf) 9223 { 9224 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 9225 } 9226 9227 SLAB_ATTR_RO(poison); 9228 9229 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 9230 { 9231 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 9232 } 9233 9234 SLAB_ATTR_RO(store_user); 9235 9236 static ssize_t validate_show(struct kmem_cache *s, char *buf) 9237 { 9238 return 0; 9239 } 9240 9241 static ssize_t validate_store(struct kmem_cache *s, 9242 const char *buf, size_t length) 9243 { 9244 int ret = -EINVAL; 9245 9246 if (buf[0] == '1' && kmem_cache_debug(s)) { 9247 ret = validate_slab_cache(s); 9248 if (ret >= 0) 9249 ret = length; 9250 } 9251 return ret; 9252 } 9253 SLAB_ATTR(validate); 9254 9255 #endif /* CONFIG_SLUB_DEBUG */ 9256 9257 #ifdef CONFIG_FAILSLAB 9258 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 9259 { 9260 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 9261 } 9262 9263 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 9264 size_t length) 9265 { 9266 if (s->refcount > 1) 9267 return -EINVAL; 9268 9269 if (buf[0] == '1') 9270 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); 9271 else 9272 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); 9273 9274 return length; 9275 } 9276 SLAB_ATTR(failslab); 9277 #endif 9278 9279 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 9280 { 9281 return 0; 9282 } 9283 9284 static ssize_t shrink_store(struct kmem_cache *s, 9285 const char *buf, size_t length) 9286 { 9287 if (buf[0] == '1') 9288 kmem_cache_shrink(s); 9289 else 9290 return -EINVAL; 9291 return length; 9292 } 9293 SLAB_ATTR(shrink); 9294 9295 #ifdef CONFIG_NUMA 9296 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 9297 { 9298 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 9299 } 9300 9301 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 9302 const char *buf, size_t length) 9303 { 9304 unsigned int ratio; 9305 int err; 9306 9307 err = kstrtouint(buf, 10, &ratio); 9308 if (err) 9309 return err; 9310 if (ratio > 100) 9311 return -ERANGE; 9312 9313 s->remote_node_defrag_ratio = ratio * 10; 9314 9315 return length; 9316 } 9317 SLAB_ATTR(remote_node_defrag_ratio); 9318 #endif 9319 9320 #ifdef CONFIG_SLUB_STATS 9321 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 9322 { 9323 unsigned long sum = 0; 9324 int cpu; 9325 int len = 0; 9326 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 9327 9328 if (!data) 9329 return -ENOMEM; 9330 9331 for_each_online_cpu(cpu) { 9332 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 9333 9334 data[cpu] = x; 9335 sum += x; 9336 } 9337 9338 len += sysfs_emit_at(buf, len, "%lu", sum); 9339 9340 #ifdef CONFIG_SMP 9341 for_each_online_cpu(cpu) { 9342 if (data[cpu]) 9343 len += sysfs_emit_at(buf, len, " C%d=%u", 9344 cpu, data[cpu]); 9345 } 9346 #endif 9347 kfree(data); 9348 len += sysfs_emit_at(buf, len, "\n"); 9349 9350 return len; 9351 } 9352 9353 static void clear_stat(struct kmem_cache *s, enum stat_item si) 9354 { 9355 int cpu; 9356 9357 for_each_online_cpu(cpu) 9358 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 9359 } 9360 9361 #define STAT_ATTR(si, text) \ 9362 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 9363 { \ 9364 return show_stat(s, buf, si); \ 9365 } \ 9366 static ssize_t text##_store(struct kmem_cache *s, \ 9367 const char *buf, size_t length) \ 9368 { \ 9369 if (buf[0] != '0') \ 9370 return -EINVAL; \ 9371 clear_stat(s, si); \ 9372 return length; \ 9373 } \ 9374 SLAB_ATTR(text); \ 9375 9376 STAT_ATTR(ALLOC_PCS, alloc_cpu_sheaf); 9377 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 9378 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 9379 STAT_ATTR(FREE_PCS, free_cpu_sheaf); 9380 STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf); 9381 STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail); 9382 STAT_ATTR(FREE_FASTPATH, free_fastpath); 9383 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 9384 STAT_ATTR(FREE_FROZEN, free_frozen); 9385 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 9386 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 9387 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 9388 STAT_ATTR(ALLOC_SLAB, alloc_slab); 9389 STAT_ATTR(ALLOC_REFILL, alloc_refill); 9390 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 9391 STAT_ATTR(FREE_SLAB, free_slab); 9392 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 9393 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 9394 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 9395 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 9396 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 9397 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 9398 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 9399 STAT_ATTR(ORDER_FALLBACK, order_fallback); 9400 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 9401 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 9402 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 9403 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 9404 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 9405 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 9406 STAT_ATTR(SHEAF_FLUSH, sheaf_flush); 9407 STAT_ATTR(SHEAF_REFILL, sheaf_refill); 9408 STAT_ATTR(SHEAF_ALLOC, sheaf_alloc); 9409 STAT_ATTR(SHEAF_FREE, sheaf_free); 9410 STAT_ATTR(BARN_GET, barn_get); 9411 STAT_ATTR(BARN_GET_FAIL, barn_get_fail); 9412 STAT_ATTR(BARN_PUT, barn_put); 9413 STAT_ATTR(BARN_PUT_FAIL, barn_put_fail); 9414 STAT_ATTR(SHEAF_PREFILL_FAST, sheaf_prefill_fast); 9415 STAT_ATTR(SHEAF_PREFILL_SLOW, sheaf_prefill_slow); 9416 STAT_ATTR(SHEAF_PREFILL_OVERSIZE, sheaf_prefill_oversize); 9417 STAT_ATTR(SHEAF_RETURN_FAST, sheaf_return_fast); 9418 STAT_ATTR(SHEAF_RETURN_SLOW, sheaf_return_slow); 9419 #endif /* CONFIG_SLUB_STATS */ 9420 9421 #ifdef CONFIG_KFENCE 9422 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) 9423 { 9424 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); 9425 } 9426 9427 static ssize_t skip_kfence_store(struct kmem_cache *s, 9428 const char *buf, size_t length) 9429 { 9430 int ret = length; 9431 9432 if (buf[0] == '0') 9433 s->flags &= ~SLAB_SKIP_KFENCE; 9434 else if (buf[0] == '1') 9435 s->flags |= SLAB_SKIP_KFENCE; 9436 else 9437 ret = -EINVAL; 9438 9439 return ret; 9440 } 9441 SLAB_ATTR(skip_kfence); 9442 #endif 9443 9444 static struct attribute *slab_attrs[] = { 9445 &slab_size_attr.attr, 9446 &object_size_attr.attr, 9447 &objs_per_slab_attr.attr, 9448 &order_attr.attr, 9449 &sheaf_capacity_attr.attr, 9450 &min_partial_attr.attr, 9451 &cpu_partial_attr.attr, 9452 &objects_partial_attr.attr, 9453 &partial_attr.attr, 9454 &cpu_slabs_attr.attr, 9455 &ctor_attr.attr, 9456 &aliases_attr.attr, 9457 &align_attr.attr, 9458 &hwcache_align_attr.attr, 9459 &reclaim_account_attr.attr, 9460 &destroy_by_rcu_attr.attr, 9461 &shrink_attr.attr, 9462 &slabs_cpu_partial_attr.attr, 9463 #ifdef CONFIG_SLUB_DEBUG 9464 &total_objects_attr.attr, 9465 &objects_attr.attr, 9466 &slabs_attr.attr, 9467 &sanity_checks_attr.attr, 9468 &trace_attr.attr, 9469 &red_zone_attr.attr, 9470 &poison_attr.attr, 9471 &store_user_attr.attr, 9472 &validate_attr.attr, 9473 #endif 9474 #ifdef CONFIG_ZONE_DMA 9475 &cache_dma_attr.attr, 9476 #endif 9477 #ifdef CONFIG_NUMA 9478 &remote_node_defrag_ratio_attr.attr, 9479 #endif 9480 #ifdef CONFIG_SLUB_STATS 9481 &alloc_cpu_sheaf_attr.attr, 9482 &alloc_fastpath_attr.attr, 9483 &alloc_slowpath_attr.attr, 9484 &free_cpu_sheaf_attr.attr, 9485 &free_rcu_sheaf_attr.attr, 9486 &free_rcu_sheaf_fail_attr.attr, 9487 &free_fastpath_attr.attr, 9488 &free_slowpath_attr.attr, 9489 &free_frozen_attr.attr, 9490 &free_add_partial_attr.attr, 9491 &free_remove_partial_attr.attr, 9492 &alloc_from_partial_attr.attr, 9493 &alloc_slab_attr.attr, 9494 &alloc_refill_attr.attr, 9495 &alloc_node_mismatch_attr.attr, 9496 &free_slab_attr.attr, 9497 &cpuslab_flush_attr.attr, 9498 &deactivate_full_attr.attr, 9499 &deactivate_empty_attr.attr, 9500 &deactivate_to_head_attr.attr, 9501 &deactivate_to_tail_attr.attr, 9502 &deactivate_remote_frees_attr.attr, 9503 &deactivate_bypass_attr.attr, 9504 &order_fallback_attr.attr, 9505 &cmpxchg_double_fail_attr.attr, 9506 &cmpxchg_double_cpu_fail_attr.attr, 9507 &cpu_partial_alloc_attr.attr, 9508 &cpu_partial_free_attr.attr, 9509 &cpu_partial_node_attr.attr, 9510 &cpu_partial_drain_attr.attr, 9511 &sheaf_flush_attr.attr, 9512 &sheaf_refill_attr.attr, 9513 &sheaf_alloc_attr.attr, 9514 &sheaf_free_attr.attr, 9515 &barn_get_attr.attr, 9516 &barn_get_fail_attr.attr, 9517 &barn_put_attr.attr, 9518 &barn_put_fail_attr.attr, 9519 &sheaf_prefill_fast_attr.attr, 9520 &sheaf_prefill_slow_attr.attr, 9521 &sheaf_prefill_oversize_attr.attr, 9522 &sheaf_return_fast_attr.attr, 9523 &sheaf_return_slow_attr.attr, 9524 #endif 9525 #ifdef CONFIG_FAILSLAB 9526 &failslab_attr.attr, 9527 #endif 9528 #ifdef CONFIG_HARDENED_USERCOPY 9529 &usersize_attr.attr, 9530 #endif 9531 #ifdef CONFIG_KFENCE 9532 &skip_kfence_attr.attr, 9533 #endif 9534 9535 NULL 9536 }; 9537 9538 static const struct attribute_group slab_attr_group = { 9539 .attrs = slab_attrs, 9540 }; 9541 9542 static ssize_t slab_attr_show(struct kobject *kobj, 9543 struct attribute *attr, 9544 char *buf) 9545 { 9546 struct slab_attribute *attribute; 9547 struct kmem_cache *s; 9548 9549 attribute = to_slab_attr(attr); 9550 s = to_slab(kobj); 9551 9552 if (!attribute->show) 9553 return -EIO; 9554 9555 return attribute->show(s, buf); 9556 } 9557 9558 static ssize_t slab_attr_store(struct kobject *kobj, 9559 struct attribute *attr, 9560 const char *buf, size_t len) 9561 { 9562 struct slab_attribute *attribute; 9563 struct kmem_cache *s; 9564 9565 attribute = to_slab_attr(attr); 9566 s = to_slab(kobj); 9567 9568 if (!attribute->store) 9569 return -EIO; 9570 9571 return attribute->store(s, buf, len); 9572 } 9573 9574 static void kmem_cache_release(struct kobject *k) 9575 { 9576 slab_kmem_cache_release(to_slab(k)); 9577 } 9578 9579 static const struct sysfs_ops slab_sysfs_ops = { 9580 .show = slab_attr_show, 9581 .store = slab_attr_store, 9582 }; 9583 9584 static const struct kobj_type slab_ktype = { 9585 .sysfs_ops = &slab_sysfs_ops, 9586 .release = kmem_cache_release, 9587 }; 9588 9589 static struct kset *slab_kset; 9590 9591 static inline struct kset *cache_kset(struct kmem_cache *s) 9592 { 9593 return slab_kset; 9594 } 9595 9596 #define ID_STR_LENGTH 32 9597 9598 /* Create a unique string id for a slab cache: 9599 * 9600 * Format :[flags-]size 9601 */ 9602 static char *create_unique_id(struct kmem_cache *s) 9603 { 9604 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 9605 char *p = name; 9606 9607 if (!name) 9608 return ERR_PTR(-ENOMEM); 9609 9610 *p++ = ':'; 9611 /* 9612 * First flags affecting slabcache operations. We will only 9613 * get here for aliasable slabs so we do not need to support 9614 * too many flags. The flags here must cover all flags that 9615 * are matched during merging to guarantee that the id is 9616 * unique. 9617 */ 9618 if (s->flags & SLAB_CACHE_DMA) 9619 *p++ = 'd'; 9620 if (s->flags & SLAB_CACHE_DMA32) 9621 *p++ = 'D'; 9622 if (s->flags & SLAB_RECLAIM_ACCOUNT) 9623 *p++ = 'a'; 9624 if (s->flags & SLAB_CONSISTENCY_CHECKS) 9625 *p++ = 'F'; 9626 if (s->flags & SLAB_ACCOUNT) 9627 *p++ = 'A'; 9628 if (p != name + 1) 9629 *p++ = '-'; 9630 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); 9631 9632 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { 9633 kfree(name); 9634 return ERR_PTR(-EINVAL); 9635 } 9636 kmsan_unpoison_memory(name, p - name); 9637 return name; 9638 } 9639 9640 static int sysfs_slab_add(struct kmem_cache *s) 9641 { 9642 int err; 9643 const char *name; 9644 struct kset *kset = cache_kset(s); 9645 int unmergeable = slab_unmergeable(s); 9646 9647 if (!unmergeable && disable_higher_order_debug && 9648 (slub_debug & DEBUG_METADATA_FLAGS)) 9649 unmergeable = 1; 9650 9651 if (unmergeable) { 9652 /* 9653 * Slabcache can never be merged so we can use the name proper. 9654 * This is typically the case for debug situations. In that 9655 * case we can catch duplicate names easily. 9656 */ 9657 sysfs_remove_link(&slab_kset->kobj, s->name); 9658 name = s->name; 9659 } else { 9660 /* 9661 * Create a unique name for the slab as a target 9662 * for the symlinks. 9663 */ 9664 name = create_unique_id(s); 9665 if (IS_ERR(name)) 9666 return PTR_ERR(name); 9667 } 9668 9669 s->kobj.kset = kset; 9670 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 9671 if (err) 9672 goto out; 9673 9674 err = sysfs_create_group(&s->kobj, &slab_attr_group); 9675 if (err) 9676 goto out_del_kobj; 9677 9678 if (!unmergeable) { 9679 /* Setup first alias */ 9680 sysfs_slab_alias(s, s->name); 9681 } 9682 out: 9683 if (!unmergeable) 9684 kfree(name); 9685 return err; 9686 out_del_kobj: 9687 kobject_del(&s->kobj); 9688 goto out; 9689 } 9690 9691 void sysfs_slab_unlink(struct kmem_cache *s) 9692 { 9693 if (s->kobj.state_in_sysfs) 9694 kobject_del(&s->kobj); 9695 } 9696 9697 void sysfs_slab_release(struct kmem_cache *s) 9698 { 9699 kobject_put(&s->kobj); 9700 } 9701 9702 /* 9703 * Need to buffer aliases during bootup until sysfs becomes 9704 * available lest we lose that information. 9705 */ 9706 struct saved_alias { 9707 struct kmem_cache *s; 9708 const char *name; 9709 struct saved_alias *next; 9710 }; 9711 9712 static struct saved_alias *alias_list; 9713 9714 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 9715 { 9716 struct saved_alias *al; 9717 9718 if (slab_state == FULL) { 9719 /* 9720 * If we have a leftover link then remove it. 9721 */ 9722 sysfs_remove_link(&slab_kset->kobj, name); 9723 /* 9724 * The original cache may have failed to generate sysfs file. 9725 * In that case, sysfs_create_link() returns -ENOENT and 9726 * symbolic link creation is skipped. 9727 */ 9728 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 9729 } 9730 9731 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 9732 if (!al) 9733 return -ENOMEM; 9734 9735 al->s = s; 9736 al->name = name; 9737 al->next = alias_list; 9738 alias_list = al; 9739 kmsan_unpoison_memory(al, sizeof(*al)); 9740 return 0; 9741 } 9742 9743 static int __init slab_sysfs_init(void) 9744 { 9745 struct kmem_cache *s; 9746 int err; 9747 9748 mutex_lock(&slab_mutex); 9749 9750 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 9751 if (!slab_kset) { 9752 mutex_unlock(&slab_mutex); 9753 pr_err("Cannot register slab subsystem.\n"); 9754 return -ENOMEM; 9755 } 9756 9757 slab_state = FULL; 9758 9759 list_for_each_entry(s, &slab_caches, list) { 9760 err = sysfs_slab_add(s); 9761 if (err) 9762 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 9763 s->name); 9764 } 9765 9766 while (alias_list) { 9767 struct saved_alias *al = alias_list; 9768 9769 alias_list = alias_list->next; 9770 err = sysfs_slab_alias(al->s, al->name); 9771 if (err) 9772 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 9773 al->name); 9774 kfree(al); 9775 } 9776 9777 mutex_unlock(&slab_mutex); 9778 return 0; 9779 } 9780 late_initcall(slab_sysfs_init); 9781 #endif /* SLAB_SUPPORTS_SYSFS */ 9782 9783 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 9784 static int slab_debugfs_show(struct seq_file *seq, void *v) 9785 { 9786 struct loc_track *t = seq->private; 9787 struct location *l; 9788 unsigned long idx; 9789 9790 idx = (unsigned long) t->idx; 9791 if (idx < t->count) { 9792 l = &t->loc[idx]; 9793 9794 seq_printf(seq, "%7ld ", l->count); 9795 9796 if (l->addr) 9797 seq_printf(seq, "%pS", (void *)l->addr); 9798 else 9799 seq_puts(seq, "<not-available>"); 9800 9801 if (l->waste) 9802 seq_printf(seq, " waste=%lu/%lu", 9803 l->count * l->waste, l->waste); 9804 9805 if (l->sum_time != l->min_time) { 9806 seq_printf(seq, " age=%ld/%llu/%ld", 9807 l->min_time, div_u64(l->sum_time, l->count), 9808 l->max_time); 9809 } else 9810 seq_printf(seq, " age=%ld", l->min_time); 9811 9812 if (l->min_pid != l->max_pid) 9813 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 9814 else 9815 seq_printf(seq, " pid=%ld", 9816 l->min_pid); 9817 9818 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 9819 seq_printf(seq, " cpus=%*pbl", 9820 cpumask_pr_args(to_cpumask(l->cpus))); 9821 9822 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 9823 seq_printf(seq, " nodes=%*pbl", 9824 nodemask_pr_args(&l->nodes)); 9825 9826 #ifdef CONFIG_STACKDEPOT 9827 { 9828 depot_stack_handle_t handle; 9829 unsigned long *entries; 9830 unsigned int nr_entries, j; 9831 9832 handle = READ_ONCE(l->handle); 9833 if (handle) { 9834 nr_entries = stack_depot_fetch(handle, &entries); 9835 seq_puts(seq, "\n"); 9836 for (j = 0; j < nr_entries; j++) 9837 seq_printf(seq, " %pS\n", (void *)entries[j]); 9838 } 9839 } 9840 #endif 9841 seq_puts(seq, "\n"); 9842 } 9843 9844 if (!idx && !t->count) 9845 seq_puts(seq, "No data\n"); 9846 9847 return 0; 9848 } 9849 9850 static void slab_debugfs_stop(struct seq_file *seq, void *v) 9851 { 9852 } 9853 9854 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 9855 { 9856 struct loc_track *t = seq->private; 9857 9858 t->idx = ++(*ppos); 9859 if (*ppos <= t->count) 9860 return ppos; 9861 9862 return NULL; 9863 } 9864 9865 static int cmp_loc_by_count(const void *a, const void *b) 9866 { 9867 struct location *loc1 = (struct location *)a; 9868 struct location *loc2 = (struct location *)b; 9869 9870 return cmp_int(loc2->count, loc1->count); 9871 } 9872 9873 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 9874 { 9875 struct loc_track *t = seq->private; 9876 9877 t->idx = *ppos; 9878 return ppos; 9879 } 9880 9881 static const struct seq_operations slab_debugfs_sops = { 9882 .start = slab_debugfs_start, 9883 .next = slab_debugfs_next, 9884 .stop = slab_debugfs_stop, 9885 .show = slab_debugfs_show, 9886 }; 9887 9888 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 9889 { 9890 9891 struct kmem_cache_node *n; 9892 enum track_item alloc; 9893 int node; 9894 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 9895 sizeof(struct loc_track)); 9896 struct kmem_cache *s = file_inode(filep)->i_private; 9897 unsigned long *obj_map; 9898 9899 if (!t) 9900 return -ENOMEM; 9901 9902 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 9903 if (!obj_map) { 9904 seq_release_private(inode, filep); 9905 return -ENOMEM; 9906 } 9907 9908 alloc = debugfs_get_aux_num(filep); 9909 9910 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 9911 bitmap_free(obj_map); 9912 seq_release_private(inode, filep); 9913 return -ENOMEM; 9914 } 9915 9916 for_each_kmem_cache_node(s, node, n) { 9917 unsigned long flags; 9918 struct slab *slab; 9919 9920 if (!node_nr_slabs(n)) 9921 continue; 9922 9923 spin_lock_irqsave(&n->list_lock, flags); 9924 list_for_each_entry(slab, &n->partial, slab_list) 9925 process_slab(t, s, slab, alloc, obj_map); 9926 list_for_each_entry(slab, &n->full, slab_list) 9927 process_slab(t, s, slab, alloc, obj_map); 9928 spin_unlock_irqrestore(&n->list_lock, flags); 9929 } 9930 9931 /* Sort locations by count */ 9932 sort(t->loc, t->count, sizeof(struct location), 9933 cmp_loc_by_count, NULL); 9934 9935 bitmap_free(obj_map); 9936 return 0; 9937 } 9938 9939 static int slab_debug_trace_release(struct inode *inode, struct file *file) 9940 { 9941 struct seq_file *seq = file->private_data; 9942 struct loc_track *t = seq->private; 9943 9944 free_loc_track(t); 9945 return seq_release_private(inode, file); 9946 } 9947 9948 static const struct file_operations slab_debugfs_fops = { 9949 .open = slab_debug_trace_open, 9950 .read = seq_read, 9951 .llseek = seq_lseek, 9952 .release = slab_debug_trace_release, 9953 }; 9954 9955 static void debugfs_slab_add(struct kmem_cache *s) 9956 { 9957 struct dentry *slab_cache_dir; 9958 9959 if (unlikely(!slab_debugfs_root)) 9960 return; 9961 9962 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 9963 9964 debugfs_create_file_aux_num("alloc_traces", 0400, slab_cache_dir, s, 9965 TRACK_ALLOC, &slab_debugfs_fops); 9966 9967 debugfs_create_file_aux_num("free_traces", 0400, slab_cache_dir, s, 9968 TRACK_FREE, &slab_debugfs_fops); 9969 } 9970 9971 void debugfs_slab_release(struct kmem_cache *s) 9972 { 9973 debugfs_lookup_and_remove(s->name, slab_debugfs_root); 9974 } 9975 9976 static int __init slab_debugfs_init(void) 9977 { 9978 struct kmem_cache *s; 9979 9980 slab_debugfs_root = debugfs_create_dir("slab", NULL); 9981 9982 list_for_each_entry(s, &slab_caches, list) 9983 if (s->flags & SLAB_STORE_USER) 9984 debugfs_slab_add(s); 9985 9986 return 0; 9987 9988 } 9989 __initcall(slab_debugfs_init); 9990 #endif 9991 /* 9992 * The /proc/slabinfo ABI 9993 */ 9994 #ifdef CONFIG_SLUB_DEBUG 9995 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 9996 { 9997 unsigned long nr_slabs = 0; 9998 unsigned long nr_objs = 0; 9999 unsigned long nr_free = 0; 10000 int node; 10001 struct kmem_cache_node *n; 10002 10003 for_each_kmem_cache_node(s, node, n) { 10004 nr_slabs += node_nr_slabs(n); 10005 nr_objs += node_nr_objs(n); 10006 nr_free += count_partial_free_approx(n); 10007 } 10008 10009 sinfo->active_objs = nr_objs - nr_free; 10010 sinfo->num_objs = nr_objs; 10011 sinfo->active_slabs = nr_slabs; 10012 sinfo->num_slabs = nr_slabs; 10013 sinfo->objects_per_slab = oo_objects(s->oo); 10014 sinfo->cache_order = oo_order(s->oo); 10015 } 10016 #endif /* CONFIG_SLUB_DEBUG */ 10017