1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/vmalloc.h> 23 #include <linux/proc_fs.h> 24 #include <linux/seq_file.h> 25 #include <linux/kasan.h> 26 #include <linux/node.h> 27 #include <linux/kmsan.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/mempolicy.h> 31 #include <linux/ctype.h> 32 #include <linux/stackdepot.h> 33 #include <linux/debugobjects.h> 34 #include <linux/kallsyms.h> 35 #include <linux/kfence.h> 36 #include <linux/memory.h> 37 #include <linux/math64.h> 38 #include <linux/fault-inject.h> 39 #include <linux/kmemleak.h> 40 #include <linux/stacktrace.h> 41 #include <linux/prefetch.h> 42 #include <linux/memcontrol.h> 43 #include <linux/random.h> 44 #include <kunit/test.h> 45 #include <kunit/test-bug.h> 46 #include <linux/sort.h> 47 #include <linux/irq_work.h> 48 #include <linux/kprobes.h> 49 #include <linux/debugfs.h> 50 #include <trace/events/kmem.h> 51 52 #include "internal.h" 53 54 /* 55 * Lock order: 56 * 1. slab_mutex (Global Mutex) 57 * 2. node->list_lock (Spinlock) 58 * 3. kmem_cache->cpu_slab->lock (Local lock) 59 * 4. slab_lock(slab) (Only on some arches) 60 * 5. object_map_lock (Only for debugging) 61 * 62 * slab_mutex 63 * 64 * The role of the slab_mutex is to protect the list of all the slabs 65 * and to synchronize major metadata changes to slab cache structures. 66 * Also synchronizes memory hotplug callbacks. 67 * 68 * slab_lock 69 * 70 * The slab_lock is a wrapper around the page lock, thus it is a bit 71 * spinlock. 72 * 73 * The slab_lock is only used on arches that do not have the ability 74 * to do a cmpxchg_double. It only protects: 75 * 76 * A. slab->freelist -> List of free objects in a slab 77 * B. slab->inuse -> Number of objects in use 78 * C. slab->objects -> Number of objects in slab 79 * D. slab->frozen -> frozen state 80 * 81 * Frozen slabs 82 * 83 * If a slab is frozen then it is exempt from list management. It is 84 * the cpu slab which is actively allocated from by the processor that 85 * froze it and it is not on any list. The processor that froze the 86 * slab is the one who can perform list operations on the slab. Other 87 * processors may put objects onto the freelist but the processor that 88 * froze the slab is the only one that can retrieve the objects from the 89 * slab's freelist. 90 * 91 * CPU partial slabs 92 * 93 * The partially empty slabs cached on the CPU partial list are used 94 * for performance reasons, which speeds up the allocation process. 95 * These slabs are not frozen, but are also exempt from list management, 96 * by clearing the SL_partial flag when moving out of the node 97 * partial list. Please see __slab_free() for more details. 98 * 99 * To sum up, the current scheme is: 100 * - node partial slab: SL_partial && !frozen 101 * - cpu partial slab: !SL_partial && !frozen 102 * - cpu slab: !SL_partial && frozen 103 * - full slab: !SL_partial && !frozen 104 * 105 * list_lock 106 * 107 * The list_lock protects the partial and full list on each node and 108 * the partial slab counter. If taken then no new slabs may be added or 109 * removed from the lists nor make the number of partial slabs be modified. 110 * (Note that the total number of slabs is an atomic value that may be 111 * modified without taking the list lock). 112 * 113 * The list_lock is a centralized lock and thus we avoid taking it as 114 * much as possible. As long as SLUB does not have to handle partial 115 * slabs, operations can continue without any centralized lock. F.e. 116 * allocating a long series of objects that fill up slabs does not require 117 * the list lock. 118 * 119 * For debug caches, all allocations are forced to go through a list_lock 120 * protected region to serialize against concurrent validation. 121 * 122 * cpu_slab->lock local lock 123 * 124 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 125 * except the stat counters. This is a percpu structure manipulated only by 126 * the local cpu, so the lock protects against being preempted or interrupted 127 * by an irq. Fast path operations rely on lockless operations instead. 128 * 129 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption 130 * which means the lockless fastpath cannot be used as it might interfere with 131 * an in-progress slow path operations. In this case the local lock is always 132 * taken but it still utilizes the freelist for the common operations. 133 * 134 * lockless fastpaths 135 * 136 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 137 * are fully lockless when satisfied from the percpu slab (and when 138 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 139 * They also don't disable preemption or migration or irqs. They rely on 140 * the transaction id (tid) field to detect being preempted or moved to 141 * another cpu. 142 * 143 * irq, preemption, migration considerations 144 * 145 * Interrupts are disabled as part of list_lock or local_lock operations, or 146 * around the slab_lock operation, in order to make the slab allocator safe 147 * to use in the context of an irq. 148 * 149 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 150 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 151 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 152 * doesn't have to be revalidated in each section protected by the local lock. 153 * 154 * SLUB assigns one slab for allocation to each processor. 155 * Allocations only occur from these slabs called cpu slabs. 156 * 157 * Slabs with free elements are kept on a partial list and during regular 158 * operations no list for full slabs is used. If an object in a full slab is 159 * freed then the slab will show up again on the partial lists. 160 * We track full slabs for debugging purposes though because otherwise we 161 * cannot scan all objects. 162 * 163 * Slabs are freed when they become empty. Teardown and setup is 164 * minimal so we rely on the page allocators per cpu caches for 165 * fast frees and allocs. 166 * 167 * slab->frozen The slab is frozen and exempt from list processing. 168 * This means that the slab is dedicated to a purpose 169 * such as satisfying allocations for a specific 170 * processor. Objects may be freed in the slab while 171 * it is frozen but slab_free will then skip the usual 172 * list operations. It is up to the processor holding 173 * the slab to integrate the slab into the slab lists 174 * when the slab is no longer needed. 175 * 176 * One use of this flag is to mark slabs that are 177 * used for allocations. Then such a slab becomes a cpu 178 * slab. The cpu slab may be equipped with an additional 179 * freelist that allows lockless access to 180 * free objects in addition to the regular freelist 181 * that requires the slab lock. 182 * 183 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 184 * options set. This moves slab handling out of 185 * the fast path and disables lockless freelists. 186 */ 187 188 /** 189 * enum slab_flags - How the slab flags bits are used. 190 * @SL_locked: Is locked with slab_lock() 191 * @SL_partial: On the per-node partial list 192 * @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves 193 * 194 * The slab flags share space with the page flags but some bits have 195 * different interpretations. The high bits are used for information 196 * like zone/node/section. 197 */ 198 enum slab_flags { 199 SL_locked = PG_locked, 200 SL_partial = PG_workingset, /* Historical reasons for this bit */ 201 SL_pfmemalloc = PG_active, /* Historical reasons for this bit */ 202 }; 203 204 /* 205 * We could simply use migrate_disable()/enable() but as long as it's a 206 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 207 */ 208 #ifndef CONFIG_PREEMPT_RT 209 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 210 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 211 #define USE_LOCKLESS_FAST_PATH() (true) 212 #else 213 #define slub_get_cpu_ptr(var) \ 214 ({ \ 215 migrate_disable(); \ 216 this_cpu_ptr(var); \ 217 }) 218 #define slub_put_cpu_ptr(var) \ 219 do { \ 220 (void)(var); \ 221 migrate_enable(); \ 222 } while (0) 223 #define USE_LOCKLESS_FAST_PATH() (false) 224 #endif 225 226 #ifndef CONFIG_SLUB_TINY 227 #define __fastpath_inline __always_inline 228 #else 229 #define __fastpath_inline 230 #endif 231 232 #ifdef CONFIG_SLUB_DEBUG 233 #ifdef CONFIG_SLUB_DEBUG_ON 234 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 235 #else 236 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 237 #endif 238 #endif /* CONFIG_SLUB_DEBUG */ 239 240 #ifdef CONFIG_NUMA 241 static DEFINE_STATIC_KEY_FALSE(strict_numa); 242 #endif 243 244 /* Structure holding parameters for get_partial() call chain */ 245 struct partial_context { 246 gfp_t flags; 247 unsigned int orig_size; 248 void *object; 249 }; 250 251 static inline bool kmem_cache_debug(struct kmem_cache *s) 252 { 253 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 254 } 255 256 void *fixup_red_left(struct kmem_cache *s, void *p) 257 { 258 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 259 p += s->red_left_pad; 260 261 return p; 262 } 263 264 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 265 { 266 #ifdef CONFIG_SLUB_CPU_PARTIAL 267 return !kmem_cache_debug(s); 268 #else 269 return false; 270 #endif 271 } 272 273 /* 274 * Issues still to be resolved: 275 * 276 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 277 * 278 * - Variable sizing of the per node arrays 279 */ 280 281 /* Enable to log cmpxchg failures */ 282 #undef SLUB_DEBUG_CMPXCHG 283 284 #ifndef CONFIG_SLUB_TINY 285 /* 286 * Minimum number of partial slabs. These will be left on the partial 287 * lists even if they are empty. kmem_cache_shrink may reclaim them. 288 */ 289 #define MIN_PARTIAL 5 290 291 /* 292 * Maximum number of desirable partial slabs. 293 * The existence of more partial slabs makes kmem_cache_shrink 294 * sort the partial list by the number of objects in use. 295 */ 296 #define MAX_PARTIAL 10 297 #else 298 #define MIN_PARTIAL 0 299 #define MAX_PARTIAL 0 300 #endif 301 302 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 303 SLAB_POISON | SLAB_STORE_USER) 304 305 /* 306 * These debug flags cannot use CMPXCHG because there might be consistency 307 * issues when checking or reading debug information 308 */ 309 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 310 SLAB_TRACE) 311 312 313 /* 314 * Debugging flags that require metadata to be stored in the slab. These get 315 * disabled when slab_debug=O is used and a cache's min order increases with 316 * metadata. 317 */ 318 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 319 320 #define OO_SHIFT 16 321 #define OO_MASK ((1 << OO_SHIFT) - 1) 322 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ 323 324 /* Internal SLUB flags */ 325 /* Poison object */ 326 #define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON) 327 /* Use cmpxchg_double */ 328 329 #ifdef system_has_freelist_aba 330 #define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE) 331 #else 332 #define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED 333 #endif 334 335 /* 336 * Tracking user of a slab. 337 */ 338 #define TRACK_ADDRS_COUNT 16 339 struct track { 340 unsigned long addr; /* Called from address */ 341 #ifdef CONFIG_STACKDEPOT 342 depot_stack_handle_t handle; 343 #endif 344 int cpu; /* Was running on cpu */ 345 int pid; /* Pid context */ 346 unsigned long when; /* When did the operation occur */ 347 }; 348 349 enum track_item { TRACK_ALLOC, TRACK_FREE }; 350 351 #ifdef SLAB_SUPPORTS_SYSFS 352 static int sysfs_slab_add(struct kmem_cache *); 353 static int sysfs_slab_alias(struct kmem_cache *, const char *); 354 #else 355 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 356 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 357 { return 0; } 358 #endif 359 360 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 361 static void debugfs_slab_add(struct kmem_cache *); 362 #else 363 static inline void debugfs_slab_add(struct kmem_cache *s) { } 364 #endif 365 366 enum stat_item { 367 ALLOC_PCS, /* Allocation from percpu sheaf */ 368 ALLOC_FASTPATH, /* Allocation from cpu slab */ 369 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 370 FREE_PCS, /* Free to percpu sheaf */ 371 FREE_RCU_SHEAF, /* Free to rcu_free sheaf */ 372 FREE_RCU_SHEAF_FAIL, /* Failed to free to a rcu_free sheaf */ 373 FREE_FASTPATH, /* Free to cpu slab */ 374 FREE_SLOWPATH, /* Freeing not to cpu slab */ 375 FREE_FROZEN, /* Freeing to frozen slab */ 376 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 377 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 378 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ 379 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 380 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 381 ALLOC_NODE_MISMATCH, /* Switching cpu slab */ 382 FREE_SLAB, /* Slab freed to the page allocator */ 383 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 384 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 385 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 386 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 387 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 388 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 389 DEACTIVATE_BYPASS, /* Implicit deactivation */ 390 ORDER_FALLBACK, /* Number of times fallback was necessary */ 391 CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */ 392 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */ 393 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ 394 CPU_PARTIAL_FREE, /* Refill cpu partial on free */ 395 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ 396 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ 397 SHEAF_FLUSH, /* Objects flushed from a sheaf */ 398 SHEAF_REFILL, /* Objects refilled to a sheaf */ 399 SHEAF_ALLOC, /* Allocation of an empty sheaf */ 400 SHEAF_FREE, /* Freeing of an empty sheaf */ 401 BARN_GET, /* Got full sheaf from barn */ 402 BARN_GET_FAIL, /* Failed to get full sheaf from barn */ 403 BARN_PUT, /* Put full sheaf to barn */ 404 BARN_PUT_FAIL, /* Failed to put full sheaf to barn */ 405 SHEAF_PREFILL_FAST, /* Sheaf prefill grabbed the spare sheaf */ 406 SHEAF_PREFILL_SLOW, /* Sheaf prefill found no spare sheaf */ 407 SHEAF_PREFILL_OVERSIZE, /* Allocation of oversize sheaf for prefill */ 408 SHEAF_RETURN_FAST, /* Sheaf return reattached spare sheaf */ 409 SHEAF_RETURN_SLOW, /* Sheaf return could not reattach spare */ 410 NR_SLUB_STAT_ITEMS 411 }; 412 413 struct freelist_tid { 414 union { 415 struct { 416 void *freelist; /* Pointer to next available object */ 417 unsigned long tid; /* Globally unique transaction id */ 418 }; 419 freelist_full_t freelist_tid; 420 }; 421 }; 422 423 /* 424 * When changing the layout, make sure freelist and tid are still compatible 425 * with this_cpu_cmpxchg_double() alignment requirements. 426 */ 427 struct kmem_cache_cpu { 428 struct freelist_tid; 429 struct slab *slab; /* The slab from which we are allocating */ 430 #ifdef CONFIG_SLUB_CPU_PARTIAL 431 struct slab *partial; /* Partially allocated slabs */ 432 #endif 433 local_trylock_t lock; /* Protects the fields above */ 434 #ifdef CONFIG_SLUB_STATS 435 unsigned int stat[NR_SLUB_STAT_ITEMS]; 436 #endif 437 }; 438 439 static inline void stat(const struct kmem_cache *s, enum stat_item si) 440 { 441 #ifdef CONFIG_SLUB_STATS 442 /* 443 * The rmw is racy on a preemptible kernel but this is acceptable, so 444 * avoid this_cpu_add()'s irq-disable overhead. 445 */ 446 raw_cpu_inc(s->cpu_slab->stat[si]); 447 #endif 448 } 449 450 static inline 451 void stat_add(const struct kmem_cache *s, enum stat_item si, int v) 452 { 453 #ifdef CONFIG_SLUB_STATS 454 raw_cpu_add(s->cpu_slab->stat[si], v); 455 #endif 456 } 457 458 #define MAX_FULL_SHEAVES 10 459 #define MAX_EMPTY_SHEAVES 10 460 461 struct node_barn { 462 spinlock_t lock; 463 struct list_head sheaves_full; 464 struct list_head sheaves_empty; 465 unsigned int nr_full; 466 unsigned int nr_empty; 467 }; 468 469 struct slab_sheaf { 470 union { 471 struct rcu_head rcu_head; 472 struct list_head barn_list; 473 /* only used for prefilled sheafs */ 474 struct { 475 unsigned int capacity; 476 bool pfmemalloc; 477 }; 478 }; 479 struct kmem_cache *cache; 480 unsigned int size; 481 int node; /* only used for rcu_sheaf */ 482 void *objects[]; 483 }; 484 485 struct slub_percpu_sheaves { 486 local_trylock_t lock; 487 struct slab_sheaf *main; /* never NULL when unlocked */ 488 struct slab_sheaf *spare; /* empty or full, may be NULL */ 489 struct slab_sheaf *rcu_free; /* for batching kfree_rcu() */ 490 }; 491 492 /* 493 * The slab lists for all objects. 494 */ 495 struct kmem_cache_node { 496 spinlock_t list_lock; 497 unsigned long nr_partial; 498 struct list_head partial; 499 #ifdef CONFIG_SLUB_DEBUG 500 atomic_long_t nr_slabs; 501 atomic_long_t total_objects; 502 struct list_head full; 503 #endif 504 struct node_barn *barn; 505 }; 506 507 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 508 { 509 return s->node[node]; 510 } 511 512 /* 513 * Get the barn of the current cpu's closest memory node. It may not exist on 514 * systems with memoryless nodes but without CONFIG_HAVE_MEMORYLESS_NODES 515 */ 516 static inline struct node_barn *get_barn(struct kmem_cache *s) 517 { 518 struct kmem_cache_node *n = get_node(s, numa_mem_id()); 519 520 if (!n) 521 return NULL; 522 523 return n->barn; 524 } 525 526 /* 527 * Iterator over all nodes. The body will be executed for each node that has 528 * a kmem_cache_node structure allocated (which is true for all online nodes) 529 */ 530 #define for_each_kmem_cache_node(__s, __node, __n) \ 531 for (__node = 0; __node < nr_node_ids; __node++) \ 532 if ((__n = get_node(__s, __node))) 533 534 /* 535 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 536 * Corresponds to node_state[N_MEMORY], but can temporarily 537 * differ during memory hotplug/hotremove operations. 538 * Protected by slab_mutex. 539 */ 540 static nodemask_t slab_nodes; 541 542 /* 543 * Workqueue used for flush_cpu_slab(). 544 */ 545 static struct workqueue_struct *flushwq; 546 547 struct slub_flush_work { 548 struct work_struct work; 549 struct kmem_cache *s; 550 bool skip; 551 }; 552 553 static DEFINE_MUTEX(flush_lock); 554 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 555 556 /******************************************************************** 557 * Core slab cache functions 558 *******************************************************************/ 559 560 /* 561 * Returns freelist pointer (ptr). With hardening, this is obfuscated 562 * with an XOR of the address where the pointer is held and a per-cache 563 * random number. 564 */ 565 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s, 566 void *ptr, unsigned long ptr_addr) 567 { 568 unsigned long encoded; 569 570 #ifdef CONFIG_SLAB_FREELIST_HARDENED 571 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); 572 #else 573 encoded = (unsigned long)ptr; 574 #endif 575 return (freeptr_t){.v = encoded}; 576 } 577 578 static inline void *freelist_ptr_decode(const struct kmem_cache *s, 579 freeptr_t ptr, unsigned long ptr_addr) 580 { 581 void *decoded; 582 583 #ifdef CONFIG_SLAB_FREELIST_HARDENED 584 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); 585 #else 586 decoded = (void *)ptr.v; 587 #endif 588 return decoded; 589 } 590 591 static inline void *get_freepointer(struct kmem_cache *s, void *object) 592 { 593 unsigned long ptr_addr; 594 freeptr_t p; 595 596 object = kasan_reset_tag(object); 597 ptr_addr = (unsigned long)object + s->offset; 598 p = *(freeptr_t *)(ptr_addr); 599 return freelist_ptr_decode(s, p, ptr_addr); 600 } 601 602 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 603 { 604 prefetchw(object + s->offset); 605 } 606 607 /* 608 * When running under KMSAN, get_freepointer_safe() may return an uninitialized 609 * pointer value in the case the current thread loses the race for the next 610 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in 611 * slab_alloc_node() will fail, so the uninitialized value won't be used, but 612 * KMSAN will still check all arguments of cmpxchg because of imperfect 613 * handling of inline assembly. 614 * To work around this problem, we apply __no_kmsan_checks to ensure that 615 * get_freepointer_safe() returns initialized memory. 616 */ 617 __no_kmsan_checks 618 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 619 { 620 unsigned long freepointer_addr; 621 freeptr_t p; 622 623 if (!debug_pagealloc_enabled_static()) 624 return get_freepointer(s, object); 625 626 object = kasan_reset_tag(object); 627 freepointer_addr = (unsigned long)object + s->offset; 628 copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p)); 629 return freelist_ptr_decode(s, p, freepointer_addr); 630 } 631 632 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 633 { 634 unsigned long freeptr_addr = (unsigned long)object + s->offset; 635 636 #ifdef CONFIG_SLAB_FREELIST_HARDENED 637 BUG_ON(object == fp); /* naive detection of double free or corruption */ 638 #endif 639 640 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 641 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); 642 } 643 644 /* 645 * See comment in calculate_sizes(). 646 */ 647 static inline bool freeptr_outside_object(struct kmem_cache *s) 648 { 649 return s->offset >= s->inuse; 650 } 651 652 /* 653 * Return offset of the end of info block which is inuse + free pointer if 654 * not overlapping with object. 655 */ 656 static inline unsigned int get_info_end(struct kmem_cache *s) 657 { 658 if (freeptr_outside_object(s)) 659 return s->inuse + sizeof(void *); 660 else 661 return s->inuse; 662 } 663 664 /* Loop over all objects in a slab */ 665 #define for_each_object(__p, __s, __addr, __objects) \ 666 for (__p = fixup_red_left(__s, __addr); \ 667 __p < (__addr) + (__objects) * (__s)->size; \ 668 __p += (__s)->size) 669 670 static inline unsigned int order_objects(unsigned int order, unsigned int size) 671 { 672 return ((unsigned int)PAGE_SIZE << order) / size; 673 } 674 675 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 676 unsigned int size) 677 { 678 struct kmem_cache_order_objects x = { 679 (order << OO_SHIFT) + order_objects(order, size) 680 }; 681 682 return x; 683 } 684 685 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 686 { 687 return x.x >> OO_SHIFT; 688 } 689 690 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 691 { 692 return x.x & OO_MASK; 693 } 694 695 #ifdef CONFIG_SLUB_CPU_PARTIAL 696 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 697 { 698 unsigned int nr_slabs; 699 700 s->cpu_partial = nr_objects; 701 702 /* 703 * We take the number of objects but actually limit the number of 704 * slabs on the per cpu partial list, in order to limit excessive 705 * growth of the list. For simplicity we assume that the slabs will 706 * be half-full. 707 */ 708 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); 709 s->cpu_partial_slabs = nr_slabs; 710 } 711 712 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 713 { 714 return s->cpu_partial_slabs; 715 } 716 #else 717 #ifdef SLAB_SUPPORTS_SYSFS 718 static inline void 719 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 720 { 721 } 722 #endif 723 724 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 725 { 726 return 0; 727 } 728 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 729 730 /* 731 * If network-based swap is enabled, slub must keep track of whether memory 732 * were allocated from pfmemalloc reserves. 733 */ 734 static inline bool slab_test_pfmemalloc(const struct slab *slab) 735 { 736 return test_bit(SL_pfmemalloc, &slab->flags.f); 737 } 738 739 static inline void slab_set_pfmemalloc(struct slab *slab) 740 { 741 set_bit(SL_pfmemalloc, &slab->flags.f); 742 } 743 744 static inline void __slab_clear_pfmemalloc(struct slab *slab) 745 { 746 __clear_bit(SL_pfmemalloc, &slab->flags.f); 747 } 748 749 /* 750 * Per slab locking using the pagelock 751 */ 752 static __always_inline void slab_lock(struct slab *slab) 753 { 754 bit_spin_lock(SL_locked, &slab->flags.f); 755 } 756 757 static __always_inline void slab_unlock(struct slab *slab) 758 { 759 bit_spin_unlock(SL_locked, &slab->flags.f); 760 } 761 762 static inline bool 763 __update_freelist_fast(struct slab *slab, struct freelist_counters *old, 764 struct freelist_counters *new) 765 { 766 #ifdef system_has_freelist_aba 767 return try_cmpxchg_freelist(&slab->freelist_counters, 768 &old->freelist_counters, 769 new->freelist_counters); 770 #else 771 return false; 772 #endif 773 } 774 775 static inline bool 776 __update_freelist_slow(struct slab *slab, struct freelist_counters *old, 777 struct freelist_counters *new) 778 { 779 bool ret = false; 780 781 slab_lock(slab); 782 if (slab->freelist == old->freelist && 783 slab->counters == old->counters) { 784 slab->freelist = new->freelist; 785 slab->counters = new->counters; 786 ret = true; 787 } 788 slab_unlock(slab); 789 790 return ret; 791 } 792 793 /* 794 * Interrupts must be disabled (for the fallback code to work right), typically 795 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is 796 * part of bit_spin_lock(), is sufficient because the policy is not to allow any 797 * allocation/ free operation in hardirq context. Therefore nothing can 798 * interrupt the operation. 799 */ 800 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, 801 struct freelist_counters *old, struct freelist_counters *new, const char *n) 802 { 803 bool ret; 804 805 if (USE_LOCKLESS_FAST_PATH()) 806 lockdep_assert_irqs_disabled(); 807 808 if (s->flags & __CMPXCHG_DOUBLE) 809 ret = __update_freelist_fast(slab, old, new); 810 else 811 ret = __update_freelist_slow(slab, old, new); 812 813 if (likely(ret)) 814 return true; 815 816 cpu_relax(); 817 stat(s, CMPXCHG_DOUBLE_FAIL); 818 819 #ifdef SLUB_DEBUG_CMPXCHG 820 pr_info("%s %s: cmpxchg double redo ", n, s->name); 821 #endif 822 823 return false; 824 } 825 826 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, 827 struct freelist_counters *old, struct freelist_counters *new, const char *n) 828 { 829 bool ret; 830 831 if (s->flags & __CMPXCHG_DOUBLE) { 832 ret = __update_freelist_fast(slab, old, new); 833 } else { 834 unsigned long flags; 835 836 local_irq_save(flags); 837 ret = __update_freelist_slow(slab, old, new); 838 local_irq_restore(flags); 839 } 840 if (likely(ret)) 841 return true; 842 843 cpu_relax(); 844 stat(s, CMPXCHG_DOUBLE_FAIL); 845 846 #ifdef SLUB_DEBUG_CMPXCHG 847 pr_info("%s %s: cmpxchg double redo ", n, s->name); 848 #endif 849 850 return false; 851 } 852 853 /* 854 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API 855 * family will round up the real request size to these fixed ones, so 856 * there could be an extra area than what is requested. Save the original 857 * request size in the meta data area, for better debug and sanity check. 858 */ 859 static inline void set_orig_size(struct kmem_cache *s, 860 void *object, unsigned int orig_size) 861 { 862 void *p = kasan_reset_tag(object); 863 864 if (!slub_debug_orig_size(s)) 865 return; 866 867 p += get_info_end(s); 868 p += sizeof(struct track) * 2; 869 870 *(unsigned int *)p = orig_size; 871 } 872 873 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) 874 { 875 void *p = kasan_reset_tag(object); 876 877 if (is_kfence_address(object)) 878 return kfence_ksize(object); 879 880 if (!slub_debug_orig_size(s)) 881 return s->object_size; 882 883 p += get_info_end(s); 884 p += sizeof(struct track) * 2; 885 886 return *(unsigned int *)p; 887 } 888 889 #ifdef CONFIG_SLUB_DEBUG 890 891 /* 892 * For debugging context when we want to check if the struct slab pointer 893 * appears to be valid. 894 */ 895 static inline bool validate_slab_ptr(struct slab *slab) 896 { 897 return PageSlab(slab_page(slab)); 898 } 899 900 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 901 static DEFINE_SPINLOCK(object_map_lock); 902 903 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 904 struct slab *slab) 905 { 906 void *addr = slab_address(slab); 907 void *p; 908 909 bitmap_zero(obj_map, slab->objects); 910 911 for (p = slab->freelist; p; p = get_freepointer(s, p)) 912 set_bit(__obj_to_index(s, addr, p), obj_map); 913 } 914 915 #if IS_ENABLED(CONFIG_KUNIT) 916 static bool slab_add_kunit_errors(void) 917 { 918 struct kunit_resource *resource; 919 920 if (!kunit_get_current_test()) 921 return false; 922 923 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 924 if (!resource) 925 return false; 926 927 (*(int *)resource->data)++; 928 kunit_put_resource(resource); 929 return true; 930 } 931 932 bool slab_in_kunit_test(void) 933 { 934 struct kunit_resource *resource; 935 936 if (!kunit_get_current_test()) 937 return false; 938 939 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 940 if (!resource) 941 return false; 942 943 kunit_put_resource(resource); 944 return true; 945 } 946 #else 947 static inline bool slab_add_kunit_errors(void) { return false; } 948 #endif 949 950 static inline unsigned int size_from_object(struct kmem_cache *s) 951 { 952 if (s->flags & SLAB_RED_ZONE) 953 return s->size - s->red_left_pad; 954 955 return s->size; 956 } 957 958 static inline void *restore_red_left(struct kmem_cache *s, void *p) 959 { 960 if (s->flags & SLAB_RED_ZONE) 961 p -= s->red_left_pad; 962 963 return p; 964 } 965 966 /* 967 * Debug settings: 968 */ 969 #if defined(CONFIG_SLUB_DEBUG_ON) 970 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 971 #else 972 static slab_flags_t slub_debug; 973 #endif 974 975 static const char *slub_debug_string __ro_after_init; 976 static int disable_higher_order_debug; 977 978 /* 979 * slub is about to manipulate internal object metadata. This memory lies 980 * outside the range of the allocated object, so accessing it would normally 981 * be reported by kasan as a bounds error. metadata_access_enable() is used 982 * to tell kasan that these accesses are OK. 983 */ 984 static inline void metadata_access_enable(void) 985 { 986 kasan_disable_current(); 987 kmsan_disable_current(); 988 } 989 990 static inline void metadata_access_disable(void) 991 { 992 kmsan_enable_current(); 993 kasan_enable_current(); 994 } 995 996 /* 997 * Object debugging 998 */ 999 1000 /* Verify that a pointer has an address that is valid within a slab page */ 1001 static inline int check_valid_pointer(struct kmem_cache *s, 1002 struct slab *slab, void *object) 1003 { 1004 void *base; 1005 1006 if (!object) 1007 return 1; 1008 1009 base = slab_address(slab); 1010 object = kasan_reset_tag(object); 1011 object = restore_red_left(s, object); 1012 if (object < base || object >= base + slab->objects * s->size || 1013 (object - base) % s->size) { 1014 return 0; 1015 } 1016 1017 return 1; 1018 } 1019 1020 static void print_section(char *level, char *text, u8 *addr, 1021 unsigned int length) 1022 { 1023 metadata_access_enable(); 1024 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 1025 16, 1, kasan_reset_tag((void *)addr), length, 1); 1026 metadata_access_disable(); 1027 } 1028 1029 static struct track *get_track(struct kmem_cache *s, void *object, 1030 enum track_item alloc) 1031 { 1032 struct track *p; 1033 1034 p = object + get_info_end(s); 1035 1036 return kasan_reset_tag(p + alloc); 1037 } 1038 1039 #ifdef CONFIG_STACKDEPOT 1040 static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) 1041 { 1042 depot_stack_handle_t handle; 1043 unsigned long entries[TRACK_ADDRS_COUNT]; 1044 unsigned int nr_entries; 1045 1046 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 1047 handle = stack_depot_save(entries, nr_entries, gfp_flags); 1048 1049 return handle; 1050 } 1051 #else 1052 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) 1053 { 1054 return 0; 1055 } 1056 #endif 1057 1058 static void set_track_update(struct kmem_cache *s, void *object, 1059 enum track_item alloc, unsigned long addr, 1060 depot_stack_handle_t handle) 1061 { 1062 struct track *p = get_track(s, object, alloc); 1063 1064 #ifdef CONFIG_STACKDEPOT 1065 p->handle = handle; 1066 #endif 1067 p->addr = addr; 1068 p->cpu = smp_processor_id(); 1069 p->pid = current->pid; 1070 p->when = jiffies; 1071 } 1072 1073 static __always_inline void set_track(struct kmem_cache *s, void *object, 1074 enum track_item alloc, unsigned long addr, gfp_t gfp_flags) 1075 { 1076 depot_stack_handle_t handle = set_track_prepare(gfp_flags); 1077 1078 set_track_update(s, object, alloc, addr, handle); 1079 } 1080 1081 static void init_tracking(struct kmem_cache *s, void *object) 1082 { 1083 struct track *p; 1084 1085 if (!(s->flags & SLAB_STORE_USER)) 1086 return; 1087 1088 p = get_track(s, object, TRACK_ALLOC); 1089 memset(p, 0, 2*sizeof(struct track)); 1090 } 1091 1092 static void print_track(const char *s, struct track *t, unsigned long pr_time) 1093 { 1094 depot_stack_handle_t handle __maybe_unused; 1095 1096 if (!t->addr) 1097 return; 1098 1099 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 1100 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 1101 #ifdef CONFIG_STACKDEPOT 1102 handle = READ_ONCE(t->handle); 1103 if (handle) 1104 stack_depot_print(handle); 1105 else 1106 pr_err("object allocation/free stack trace missing\n"); 1107 #endif 1108 } 1109 1110 void print_tracking(struct kmem_cache *s, void *object) 1111 { 1112 unsigned long pr_time = jiffies; 1113 if (!(s->flags & SLAB_STORE_USER)) 1114 return; 1115 1116 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 1117 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 1118 } 1119 1120 static void print_slab_info(const struct slab *slab) 1121 { 1122 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", 1123 slab, slab->objects, slab->inuse, slab->freelist, 1124 &slab->flags.f); 1125 } 1126 1127 void skip_orig_size_check(struct kmem_cache *s, const void *object) 1128 { 1129 set_orig_size(s, (void *)object, s->object_size); 1130 } 1131 1132 static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp) 1133 { 1134 struct va_format vaf; 1135 va_list args; 1136 1137 va_copy(args, argsp); 1138 vaf.fmt = fmt; 1139 vaf.va = &args; 1140 pr_err("=============================================================================\n"); 1141 pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf); 1142 pr_err("-----------------------------------------------------------------------------\n\n"); 1143 va_end(args); 1144 } 1145 1146 static void slab_bug(struct kmem_cache *s, const char *fmt, ...) 1147 { 1148 va_list args; 1149 1150 va_start(args, fmt); 1151 __slab_bug(s, fmt, args); 1152 va_end(args); 1153 } 1154 1155 __printf(2, 3) 1156 static void slab_fix(struct kmem_cache *s, const char *fmt, ...) 1157 { 1158 struct va_format vaf; 1159 va_list args; 1160 1161 if (slab_add_kunit_errors()) 1162 return; 1163 1164 va_start(args, fmt); 1165 vaf.fmt = fmt; 1166 vaf.va = &args; 1167 pr_err("FIX %s: %pV\n", s->name, &vaf); 1168 va_end(args); 1169 } 1170 1171 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) 1172 { 1173 unsigned int off; /* Offset of last byte */ 1174 u8 *addr = slab_address(slab); 1175 1176 print_tracking(s, p); 1177 1178 print_slab_info(slab); 1179 1180 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 1181 p, p - addr, get_freepointer(s, p)); 1182 1183 if (s->flags & SLAB_RED_ZONE) 1184 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 1185 s->red_left_pad); 1186 else if (p > addr + 16) 1187 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 1188 1189 print_section(KERN_ERR, "Object ", p, 1190 min_t(unsigned int, s->object_size, PAGE_SIZE)); 1191 if (s->flags & SLAB_RED_ZONE) 1192 print_section(KERN_ERR, "Redzone ", p + s->object_size, 1193 s->inuse - s->object_size); 1194 1195 off = get_info_end(s); 1196 1197 if (s->flags & SLAB_STORE_USER) 1198 off += 2 * sizeof(struct track); 1199 1200 if (slub_debug_orig_size(s)) 1201 off += sizeof(unsigned int); 1202 1203 off += kasan_metadata_size(s, false); 1204 1205 if (off != size_from_object(s)) 1206 /* Beginning of the filler is the free pointer */ 1207 print_section(KERN_ERR, "Padding ", p + off, 1208 size_from_object(s) - off); 1209 } 1210 1211 static void object_err(struct kmem_cache *s, struct slab *slab, 1212 u8 *object, const char *reason) 1213 { 1214 if (slab_add_kunit_errors()) 1215 return; 1216 1217 slab_bug(s, reason); 1218 if (!object || !check_valid_pointer(s, slab, object)) { 1219 print_slab_info(slab); 1220 pr_err("Invalid pointer 0x%p\n", object); 1221 } else { 1222 print_trailer(s, slab, object); 1223 } 1224 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1225 1226 WARN_ON(1); 1227 } 1228 1229 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1230 void **freelist, void *nextfree) 1231 { 1232 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 1233 !check_valid_pointer(s, slab, nextfree) && freelist) { 1234 object_err(s, slab, *freelist, "Freechain corrupt"); 1235 *freelist = NULL; 1236 slab_fix(s, "Isolate corrupted freechain"); 1237 return true; 1238 } 1239 1240 return false; 1241 } 1242 1243 static void __slab_err(struct slab *slab) 1244 { 1245 if (slab_in_kunit_test()) 1246 return; 1247 1248 print_slab_info(slab); 1249 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1250 1251 WARN_ON(1); 1252 } 1253 1254 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, 1255 const char *fmt, ...) 1256 { 1257 va_list args; 1258 1259 if (slab_add_kunit_errors()) 1260 return; 1261 1262 va_start(args, fmt); 1263 __slab_bug(s, fmt, args); 1264 va_end(args); 1265 1266 __slab_err(slab); 1267 } 1268 1269 static void init_object(struct kmem_cache *s, void *object, u8 val) 1270 { 1271 u8 *p = kasan_reset_tag(object); 1272 unsigned int poison_size = s->object_size; 1273 1274 if (s->flags & SLAB_RED_ZONE) { 1275 /* 1276 * Here and below, avoid overwriting the KMSAN shadow. Keeping 1277 * the shadow makes it possible to distinguish uninit-value 1278 * from use-after-free. 1279 */ 1280 memset_no_sanitize_memory(p - s->red_left_pad, val, 1281 s->red_left_pad); 1282 1283 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1284 /* 1285 * Redzone the extra allocated space by kmalloc than 1286 * requested, and the poison size will be limited to 1287 * the original request size accordingly. 1288 */ 1289 poison_size = get_orig_size(s, object); 1290 } 1291 } 1292 1293 if (s->flags & __OBJECT_POISON) { 1294 memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1); 1295 memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1); 1296 } 1297 1298 if (s->flags & SLAB_RED_ZONE) 1299 memset_no_sanitize_memory(p + poison_size, val, 1300 s->inuse - poison_size); 1301 } 1302 1303 static void restore_bytes(struct kmem_cache *s, const char *message, u8 data, 1304 void *from, void *to) 1305 { 1306 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 1307 memset(from, data, to - from); 1308 } 1309 1310 #ifdef CONFIG_KMSAN 1311 #define pad_check_attributes noinline __no_kmsan_checks 1312 #else 1313 #define pad_check_attributes 1314 #endif 1315 1316 static pad_check_attributes int 1317 check_bytes_and_report(struct kmem_cache *s, struct slab *slab, 1318 u8 *object, const char *what, u8 *start, unsigned int value, 1319 unsigned int bytes, bool slab_obj_print) 1320 { 1321 u8 *fault; 1322 u8 *end; 1323 u8 *addr = slab_address(slab); 1324 1325 metadata_access_enable(); 1326 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 1327 metadata_access_disable(); 1328 if (!fault) 1329 return 1; 1330 1331 end = start + bytes; 1332 while (end > fault && end[-1] == value) 1333 end--; 1334 1335 if (slab_add_kunit_errors()) 1336 goto skip_bug_print; 1337 1338 pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 1339 what, fault, end - 1, fault - addr, fault[0], value); 1340 1341 if (slab_obj_print) 1342 object_err(s, slab, object, "Object corrupt"); 1343 1344 skip_bug_print: 1345 restore_bytes(s, what, value, fault, end); 1346 return 0; 1347 } 1348 1349 /* 1350 * Object layout: 1351 * 1352 * object address 1353 * Bytes of the object to be managed. 1354 * If the freepointer may overlay the object then the free 1355 * pointer is at the middle of the object. 1356 * 1357 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 1358 * 0xa5 (POISON_END) 1359 * 1360 * object + s->object_size 1361 * Padding to reach word boundary. This is also used for Redzoning. 1362 * Padding is extended by another word if Redzoning is enabled and 1363 * object_size == inuse. 1364 * 1365 * We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with 1366 * 0xcc (SLUB_RED_ACTIVE) for objects in use. 1367 * 1368 * object + s->inuse 1369 * Meta data starts here. 1370 * 1371 * A. Free pointer (if we cannot overwrite object on free) 1372 * B. Tracking data for SLAB_STORE_USER 1373 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) 1374 * D. Padding to reach required alignment boundary or at minimum 1375 * one word if debugging is on to be able to detect writes 1376 * before the word boundary. 1377 * 1378 * Padding is done using 0x5a (POISON_INUSE) 1379 * 1380 * object + s->size 1381 * Nothing is used beyond s->size. 1382 * 1383 * If slabcaches are merged then the object_size and inuse boundaries are mostly 1384 * ignored. And therefore no slab options that rely on these boundaries 1385 * may be used with merged slabcaches. 1386 */ 1387 1388 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) 1389 { 1390 unsigned long off = get_info_end(s); /* The end of info */ 1391 1392 if (s->flags & SLAB_STORE_USER) { 1393 /* We also have user information there */ 1394 off += 2 * sizeof(struct track); 1395 1396 if (s->flags & SLAB_KMALLOC) 1397 off += sizeof(unsigned int); 1398 } 1399 1400 off += kasan_metadata_size(s, false); 1401 1402 if (size_from_object(s) == off) 1403 return 1; 1404 1405 return check_bytes_and_report(s, slab, p, "Object padding", 1406 p + off, POISON_INUSE, size_from_object(s) - off, true); 1407 } 1408 1409 /* Check the pad bytes at the end of a slab page */ 1410 static pad_check_attributes void 1411 slab_pad_check(struct kmem_cache *s, struct slab *slab) 1412 { 1413 u8 *start; 1414 u8 *fault; 1415 u8 *end; 1416 u8 *pad; 1417 int length; 1418 int remainder; 1419 1420 if (!(s->flags & SLAB_POISON)) 1421 return; 1422 1423 start = slab_address(slab); 1424 length = slab_size(slab); 1425 end = start + length; 1426 remainder = length % s->size; 1427 if (!remainder) 1428 return; 1429 1430 pad = end - remainder; 1431 metadata_access_enable(); 1432 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1433 metadata_access_disable(); 1434 if (!fault) 1435 return; 1436 while (end > fault && end[-1] == POISON_INUSE) 1437 end--; 1438 1439 slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1440 fault, end - 1, fault - start); 1441 print_section(KERN_ERR, "Padding ", pad, remainder); 1442 __slab_err(slab); 1443 1444 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1445 } 1446 1447 static int check_object(struct kmem_cache *s, struct slab *slab, 1448 void *object, u8 val) 1449 { 1450 u8 *p = object; 1451 u8 *endobject = object + s->object_size; 1452 unsigned int orig_size, kasan_meta_size; 1453 int ret = 1; 1454 1455 if (s->flags & SLAB_RED_ZONE) { 1456 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1457 object - s->red_left_pad, val, s->red_left_pad, ret)) 1458 ret = 0; 1459 1460 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1461 endobject, val, s->inuse - s->object_size, ret)) 1462 ret = 0; 1463 1464 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1465 orig_size = get_orig_size(s, object); 1466 1467 if (s->object_size > orig_size && 1468 !check_bytes_and_report(s, slab, object, 1469 "kmalloc Redzone", p + orig_size, 1470 val, s->object_size - orig_size, ret)) { 1471 ret = 0; 1472 } 1473 } 1474 } else { 1475 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1476 if (!check_bytes_and_report(s, slab, p, "Alignment padding", 1477 endobject, POISON_INUSE, 1478 s->inuse - s->object_size, ret)) 1479 ret = 0; 1480 } 1481 } 1482 1483 if (s->flags & SLAB_POISON) { 1484 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) { 1485 /* 1486 * KASAN can save its free meta data inside of the 1487 * object at offset 0. Thus, skip checking the part of 1488 * the redzone that overlaps with the meta data. 1489 */ 1490 kasan_meta_size = kasan_metadata_size(s, true); 1491 if (kasan_meta_size < s->object_size - 1 && 1492 !check_bytes_and_report(s, slab, p, "Poison", 1493 p + kasan_meta_size, POISON_FREE, 1494 s->object_size - kasan_meta_size - 1, ret)) 1495 ret = 0; 1496 if (kasan_meta_size < s->object_size && 1497 !check_bytes_and_report(s, slab, p, "End Poison", 1498 p + s->object_size - 1, POISON_END, 1, ret)) 1499 ret = 0; 1500 } 1501 /* 1502 * check_pad_bytes cleans up on its own. 1503 */ 1504 if (!check_pad_bytes(s, slab, p)) 1505 ret = 0; 1506 } 1507 1508 /* 1509 * Cannot check freepointer while object is allocated if 1510 * object and freepointer overlap. 1511 */ 1512 if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) && 1513 !check_valid_pointer(s, slab, get_freepointer(s, p))) { 1514 object_err(s, slab, p, "Freepointer corrupt"); 1515 /* 1516 * No choice but to zap it and thus lose the remainder 1517 * of the free objects in this slab. May cause 1518 * another error because the object count is now wrong. 1519 */ 1520 set_freepointer(s, p, NULL); 1521 ret = 0; 1522 } 1523 1524 return ret; 1525 } 1526 1527 /* 1528 * Checks if the slab state looks sane. Assumes the struct slab pointer 1529 * was either obtained in a way that ensures it's valid, or validated 1530 * by validate_slab_ptr() 1531 */ 1532 static int check_slab(struct kmem_cache *s, struct slab *slab) 1533 { 1534 int maxobj; 1535 1536 maxobj = order_objects(slab_order(slab), s->size); 1537 if (slab->objects > maxobj) { 1538 slab_err(s, slab, "objects %u > max %u", 1539 slab->objects, maxobj); 1540 return 0; 1541 } 1542 if (slab->inuse > slab->objects) { 1543 slab_err(s, slab, "inuse %u > max %u", 1544 slab->inuse, slab->objects); 1545 return 0; 1546 } 1547 if (slab->frozen) { 1548 slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed"); 1549 return 0; 1550 } 1551 1552 /* Slab_pad_check fixes things up after itself */ 1553 slab_pad_check(s, slab); 1554 return 1; 1555 } 1556 1557 /* 1558 * Determine if a certain object in a slab is on the freelist. Must hold the 1559 * slab lock to guarantee that the chains are in a consistent state. 1560 */ 1561 static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search) 1562 { 1563 int nr = 0; 1564 void *fp; 1565 void *object = NULL; 1566 int max_objects; 1567 1568 fp = slab->freelist; 1569 while (fp && nr <= slab->objects) { 1570 if (fp == search) 1571 return true; 1572 if (!check_valid_pointer(s, slab, fp)) { 1573 if (object) { 1574 object_err(s, slab, object, 1575 "Freechain corrupt"); 1576 set_freepointer(s, object, NULL); 1577 break; 1578 } else { 1579 slab_err(s, slab, "Freepointer corrupt"); 1580 slab->freelist = NULL; 1581 slab->inuse = slab->objects; 1582 slab_fix(s, "Freelist cleared"); 1583 return false; 1584 } 1585 } 1586 object = fp; 1587 fp = get_freepointer(s, object); 1588 nr++; 1589 } 1590 1591 if (nr > slab->objects) { 1592 slab_err(s, slab, "Freelist cycle detected"); 1593 slab->freelist = NULL; 1594 slab->inuse = slab->objects; 1595 slab_fix(s, "Freelist cleared"); 1596 return false; 1597 } 1598 1599 max_objects = order_objects(slab_order(slab), s->size); 1600 if (max_objects > MAX_OBJS_PER_PAGE) 1601 max_objects = MAX_OBJS_PER_PAGE; 1602 1603 if (slab->objects != max_objects) { 1604 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", 1605 slab->objects, max_objects); 1606 slab->objects = max_objects; 1607 slab_fix(s, "Number of objects adjusted"); 1608 } 1609 if (slab->inuse != slab->objects - nr) { 1610 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", 1611 slab->inuse, slab->objects - nr); 1612 slab->inuse = slab->objects - nr; 1613 slab_fix(s, "Object count adjusted"); 1614 } 1615 return search == NULL; 1616 } 1617 1618 static void trace(struct kmem_cache *s, struct slab *slab, void *object, 1619 int alloc) 1620 { 1621 if (s->flags & SLAB_TRACE) { 1622 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1623 s->name, 1624 alloc ? "alloc" : "free", 1625 object, slab->inuse, 1626 slab->freelist); 1627 1628 if (!alloc) 1629 print_section(KERN_INFO, "Object ", (void *)object, 1630 s->object_size); 1631 1632 dump_stack(); 1633 } 1634 } 1635 1636 /* 1637 * Tracking of fully allocated slabs for debugging purposes. 1638 */ 1639 static void add_full(struct kmem_cache *s, 1640 struct kmem_cache_node *n, struct slab *slab) 1641 { 1642 if (!(s->flags & SLAB_STORE_USER)) 1643 return; 1644 1645 lockdep_assert_held(&n->list_lock); 1646 list_add(&slab->slab_list, &n->full); 1647 } 1648 1649 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) 1650 { 1651 if (!(s->flags & SLAB_STORE_USER)) 1652 return; 1653 1654 lockdep_assert_held(&n->list_lock); 1655 list_del(&slab->slab_list); 1656 } 1657 1658 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1659 { 1660 return atomic_long_read(&n->nr_slabs); 1661 } 1662 1663 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1664 { 1665 struct kmem_cache_node *n = get_node(s, node); 1666 1667 atomic_long_inc(&n->nr_slabs); 1668 atomic_long_add(objects, &n->total_objects); 1669 } 1670 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1671 { 1672 struct kmem_cache_node *n = get_node(s, node); 1673 1674 atomic_long_dec(&n->nr_slabs); 1675 atomic_long_sub(objects, &n->total_objects); 1676 } 1677 1678 /* Object debug checks for alloc/free paths */ 1679 static void setup_object_debug(struct kmem_cache *s, void *object) 1680 { 1681 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1682 return; 1683 1684 init_object(s, object, SLUB_RED_INACTIVE); 1685 init_tracking(s, object); 1686 } 1687 1688 static 1689 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) 1690 { 1691 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1692 return; 1693 1694 metadata_access_enable(); 1695 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); 1696 metadata_access_disable(); 1697 } 1698 1699 static inline int alloc_consistency_checks(struct kmem_cache *s, 1700 struct slab *slab, void *object) 1701 { 1702 if (!check_slab(s, slab)) 1703 return 0; 1704 1705 if (!check_valid_pointer(s, slab, object)) { 1706 object_err(s, slab, object, "Freelist Pointer check fails"); 1707 return 0; 1708 } 1709 1710 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) 1711 return 0; 1712 1713 return 1; 1714 } 1715 1716 static noinline bool alloc_debug_processing(struct kmem_cache *s, 1717 struct slab *slab, void *object, int orig_size) 1718 { 1719 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1720 if (!alloc_consistency_checks(s, slab, object)) 1721 goto bad; 1722 } 1723 1724 /* Success. Perform special debug activities for allocs */ 1725 trace(s, slab, object, 1); 1726 set_orig_size(s, object, orig_size); 1727 init_object(s, object, SLUB_RED_ACTIVE); 1728 return true; 1729 1730 bad: 1731 /* 1732 * Let's do the best we can to avoid issues in the future. Marking all 1733 * objects as used avoids touching the remaining objects. 1734 */ 1735 slab_fix(s, "Marking all objects used"); 1736 slab->inuse = slab->objects; 1737 slab->freelist = NULL; 1738 slab->frozen = 1; /* mark consistency-failed slab as frozen */ 1739 1740 return false; 1741 } 1742 1743 static inline int free_consistency_checks(struct kmem_cache *s, 1744 struct slab *slab, void *object, unsigned long addr) 1745 { 1746 if (!check_valid_pointer(s, slab, object)) { 1747 slab_err(s, slab, "Invalid object pointer 0x%p", object); 1748 return 0; 1749 } 1750 1751 if (on_freelist(s, slab, object)) { 1752 object_err(s, slab, object, "Object already free"); 1753 return 0; 1754 } 1755 1756 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) 1757 return 0; 1758 1759 if (unlikely(s != slab->slab_cache)) { 1760 if (!slab->slab_cache) { 1761 slab_err(NULL, slab, "No slab cache for object 0x%p", 1762 object); 1763 } else { 1764 object_err(s, slab, object, 1765 "page slab pointer corrupt."); 1766 } 1767 return 0; 1768 } 1769 return 1; 1770 } 1771 1772 /* 1773 * Parse a block of slab_debug options. Blocks are delimited by ';' 1774 * 1775 * @str: start of block 1776 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1777 * @slabs: return start of list of slabs, or NULL when there's no list 1778 * @init: assume this is initial parsing and not per-kmem-create parsing 1779 * 1780 * returns the start of next block if there's any, or NULL 1781 */ 1782 static const char * 1783 parse_slub_debug_flags(const char *str, slab_flags_t *flags, const char **slabs, bool init) 1784 { 1785 bool higher_order_disable = false; 1786 1787 /* Skip any completely empty blocks */ 1788 while (*str && *str == ';') 1789 str++; 1790 1791 if (*str == ',') { 1792 /* 1793 * No options but restriction on slabs. This means full 1794 * debugging for slabs matching a pattern. 1795 */ 1796 *flags = DEBUG_DEFAULT_FLAGS; 1797 goto check_slabs; 1798 } 1799 *flags = 0; 1800 1801 /* Determine which debug features should be switched on */ 1802 for (; *str && *str != ',' && *str != ';'; str++) { 1803 switch (tolower(*str)) { 1804 case '-': 1805 *flags = 0; 1806 break; 1807 case 'f': 1808 *flags |= SLAB_CONSISTENCY_CHECKS; 1809 break; 1810 case 'z': 1811 *flags |= SLAB_RED_ZONE; 1812 break; 1813 case 'p': 1814 *flags |= SLAB_POISON; 1815 break; 1816 case 'u': 1817 *flags |= SLAB_STORE_USER; 1818 break; 1819 case 't': 1820 *flags |= SLAB_TRACE; 1821 break; 1822 case 'a': 1823 *flags |= SLAB_FAILSLAB; 1824 break; 1825 case 'o': 1826 /* 1827 * Avoid enabling debugging on caches if its minimum 1828 * order would increase as a result. 1829 */ 1830 higher_order_disable = true; 1831 break; 1832 default: 1833 if (init) 1834 pr_err("slab_debug option '%c' unknown. skipped\n", *str); 1835 } 1836 } 1837 check_slabs: 1838 if (*str == ',') 1839 *slabs = ++str; 1840 else 1841 *slabs = NULL; 1842 1843 /* Skip over the slab list */ 1844 while (*str && *str != ';') 1845 str++; 1846 1847 /* Skip any completely empty blocks */ 1848 while (*str && *str == ';') 1849 str++; 1850 1851 if (init && higher_order_disable) 1852 disable_higher_order_debug = 1; 1853 1854 if (*str) 1855 return str; 1856 else 1857 return NULL; 1858 } 1859 1860 static int __init setup_slub_debug(const char *str, const struct kernel_param *kp) 1861 { 1862 slab_flags_t flags; 1863 slab_flags_t global_flags; 1864 const char *saved_str; 1865 const char *slab_list; 1866 bool global_slub_debug_changed = false; 1867 bool slab_list_specified = false; 1868 1869 global_flags = DEBUG_DEFAULT_FLAGS; 1870 if (!str || !*str) 1871 /* 1872 * No options specified. Switch on full debugging. 1873 */ 1874 goto out; 1875 1876 saved_str = str; 1877 while (str) { 1878 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1879 1880 if (!slab_list) { 1881 global_flags = flags; 1882 global_slub_debug_changed = true; 1883 } else { 1884 slab_list_specified = true; 1885 if (flags & SLAB_STORE_USER) 1886 stack_depot_request_early_init(); 1887 } 1888 } 1889 1890 /* 1891 * For backwards compatibility, a single list of flags with list of 1892 * slabs means debugging is only changed for those slabs, so the global 1893 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1894 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1895 * long as there is no option specifying flags without a slab list. 1896 */ 1897 if (slab_list_specified) { 1898 if (!global_slub_debug_changed) 1899 global_flags = slub_debug; 1900 slub_debug_string = saved_str; 1901 } 1902 out: 1903 slub_debug = global_flags; 1904 if (slub_debug & SLAB_STORE_USER) 1905 stack_depot_request_early_init(); 1906 if (slub_debug != 0 || slub_debug_string) 1907 static_branch_enable(&slub_debug_enabled); 1908 else 1909 static_branch_disable(&slub_debug_enabled); 1910 if ((static_branch_unlikely(&init_on_alloc) || 1911 static_branch_unlikely(&init_on_free)) && 1912 (slub_debug & SLAB_POISON)) 1913 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1914 return 0; 1915 } 1916 1917 static const struct kernel_param_ops param_ops_slab_debug __initconst = { 1918 .flags = KERNEL_PARAM_OPS_FL_NOARG, 1919 .set = setup_slub_debug, 1920 }; 1921 __core_param_cb(slab_debug, ¶m_ops_slab_debug, NULL, 0); 1922 __core_param_cb(slub_debug, ¶m_ops_slab_debug, NULL, 0); 1923 1924 /* 1925 * kmem_cache_flags - apply debugging options to the cache 1926 * @flags: flags to set 1927 * @name: name of the cache 1928 * 1929 * Debug option(s) are applied to @flags. In addition to the debug 1930 * option(s), if a slab name (or multiple) is specified i.e. 1931 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1932 * then only the select slabs will receive the debug option(s). 1933 */ 1934 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1935 { 1936 const char *iter; 1937 size_t len; 1938 const char *next_block; 1939 slab_flags_t block_flags; 1940 slab_flags_t slub_debug_local = slub_debug; 1941 1942 if (flags & SLAB_NO_USER_FLAGS) 1943 return flags; 1944 1945 /* 1946 * If the slab cache is for debugging (e.g. kmemleak) then 1947 * don't store user (stack trace) information by default, 1948 * but let the user enable it via the command line below. 1949 */ 1950 if (flags & SLAB_NOLEAKTRACE) 1951 slub_debug_local &= ~SLAB_STORE_USER; 1952 1953 len = strlen(name); 1954 next_block = slub_debug_string; 1955 /* Go through all blocks of debug options, see if any matches our slab's name */ 1956 while (next_block) { 1957 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1958 if (!iter) 1959 continue; 1960 /* Found a block that has a slab list, search it */ 1961 while (*iter) { 1962 const char *end, *glob; 1963 size_t cmplen; 1964 1965 end = strchrnul(iter, ','); 1966 if (next_block && next_block < end) 1967 end = next_block - 1; 1968 1969 glob = strnchr(iter, end - iter, '*'); 1970 if (glob) 1971 cmplen = glob - iter; 1972 else 1973 cmplen = max_t(size_t, len, (end - iter)); 1974 1975 if (!strncmp(name, iter, cmplen)) { 1976 flags |= block_flags; 1977 return flags; 1978 } 1979 1980 if (!*end || *end == ';') 1981 break; 1982 iter = end + 1; 1983 } 1984 } 1985 1986 return flags | slub_debug_local; 1987 } 1988 #else /* !CONFIG_SLUB_DEBUG */ 1989 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} 1990 static inline 1991 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} 1992 1993 static inline bool alloc_debug_processing(struct kmem_cache *s, 1994 struct slab *slab, void *object, int orig_size) { return true; } 1995 1996 static inline bool free_debug_processing(struct kmem_cache *s, 1997 struct slab *slab, void *head, void *tail, int *bulk_cnt, 1998 unsigned long addr, depot_stack_handle_t handle) { return true; } 1999 2000 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} 2001 static inline int check_object(struct kmem_cache *s, struct slab *slab, 2002 void *object, u8 val) { return 1; } 2003 static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; } 2004 static inline void set_track(struct kmem_cache *s, void *object, 2005 enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {} 2006 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 2007 struct slab *slab) {} 2008 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 2009 struct slab *slab) {} 2010 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 2011 { 2012 return flags; 2013 } 2014 #define slub_debug 0 2015 2016 #define disable_higher_order_debug 0 2017 2018 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 2019 { return 0; } 2020 static inline void inc_slabs_node(struct kmem_cache *s, int node, 2021 int objects) {} 2022 static inline void dec_slabs_node(struct kmem_cache *s, int node, 2023 int objects) {} 2024 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 2025 void **freelist, void *nextfree) 2026 { 2027 return false; 2028 } 2029 #endif /* CONFIG_SLUB_DEBUG */ 2030 2031 /* 2032 * The allocated objcg pointers array is not accounted directly. 2033 * Moreover, it should not come from DMA buffer and is not readily 2034 * reclaimable. So those GFP bits should be masked off. 2035 */ 2036 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ 2037 __GFP_ACCOUNT | __GFP_NOFAIL) 2038 2039 #ifdef CONFIG_SLAB_OBJ_EXT 2040 2041 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 2042 2043 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) 2044 { 2045 struct slabobj_ext *slab_exts; 2046 struct slab *obj_exts_slab; 2047 2048 obj_exts_slab = virt_to_slab(obj_exts); 2049 slab_exts = slab_obj_exts(obj_exts_slab); 2050 if (slab_exts) { 2051 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, 2052 obj_exts_slab, obj_exts); 2053 2054 if (unlikely(is_codetag_empty(&slab_exts[offs].ref))) 2055 return; 2056 2057 /* codetag should be NULL here */ 2058 WARN_ON(slab_exts[offs].ref.ct); 2059 set_codetag_empty(&slab_exts[offs].ref); 2060 } 2061 } 2062 2063 static inline bool mark_failed_objexts_alloc(struct slab *slab) 2064 { 2065 return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0; 2066 } 2067 2068 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 2069 struct slabobj_ext *vec, unsigned int objects) 2070 { 2071 /* 2072 * If vector previously failed to allocate then we have live 2073 * objects with no tag reference. Mark all references in this 2074 * vector as empty to avoid warnings later on. 2075 */ 2076 if (obj_exts == OBJEXTS_ALLOC_FAIL) { 2077 unsigned int i; 2078 2079 for (i = 0; i < objects; i++) 2080 set_codetag_empty(&vec[i].ref); 2081 } 2082 } 2083 2084 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 2085 2086 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} 2087 static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; } 2088 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 2089 struct slabobj_ext *vec, unsigned int objects) {} 2090 2091 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 2092 2093 static inline void init_slab_obj_exts(struct slab *slab) 2094 { 2095 slab->obj_exts = 0; 2096 } 2097 2098 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 2099 gfp_t gfp, bool new_slab) 2100 { 2101 bool allow_spin = gfpflags_allow_spinning(gfp); 2102 unsigned int objects = objs_per_slab(s, slab); 2103 unsigned long new_exts; 2104 unsigned long old_exts; 2105 struct slabobj_ext *vec; 2106 2107 gfp &= ~OBJCGS_CLEAR_MASK; 2108 /* Prevent recursive extension vector allocation */ 2109 gfp |= __GFP_NO_OBJ_EXT; 2110 2111 /* 2112 * Note that allow_spin may be false during early boot and its 2113 * restricted GFP_BOOT_MASK. Due to kmalloc_nolock() only supporting 2114 * architectures with cmpxchg16b, early obj_exts will be missing for 2115 * very early allocations on those. 2116 */ 2117 if (unlikely(!allow_spin)) { 2118 size_t sz = objects * sizeof(struct slabobj_ext); 2119 2120 vec = kmalloc_nolock(sz, __GFP_ZERO | __GFP_NO_OBJ_EXT, 2121 slab_nid(slab)); 2122 } else { 2123 vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, 2124 slab_nid(slab)); 2125 } 2126 if (!vec) { 2127 /* 2128 * Try to mark vectors which failed to allocate. 2129 * If this operation fails, there may be a racing process 2130 * that has already completed the allocation. 2131 */ 2132 if (!mark_failed_objexts_alloc(slab) && 2133 slab_obj_exts(slab)) 2134 return 0; 2135 2136 return -ENOMEM; 2137 } 2138 2139 new_exts = (unsigned long)vec; 2140 if (unlikely(!allow_spin)) 2141 new_exts |= OBJEXTS_NOSPIN_ALLOC; 2142 #ifdef CONFIG_MEMCG 2143 new_exts |= MEMCG_DATA_OBJEXTS; 2144 #endif 2145 retry: 2146 old_exts = READ_ONCE(slab->obj_exts); 2147 handle_failed_objexts_alloc(old_exts, vec, objects); 2148 if (new_slab) { 2149 /* 2150 * If the slab is brand new and nobody can yet access its 2151 * obj_exts, no synchronization is required and obj_exts can 2152 * be simply assigned. 2153 */ 2154 slab->obj_exts = new_exts; 2155 } else if (old_exts & ~OBJEXTS_FLAGS_MASK) { 2156 /* 2157 * If the slab is already in use, somebody can allocate and 2158 * assign slabobj_exts in parallel. In this case the existing 2159 * objcg vector should be reused. 2160 */ 2161 mark_objexts_empty(vec); 2162 if (unlikely(!allow_spin)) 2163 kfree_nolock(vec); 2164 else 2165 kfree(vec); 2166 return 0; 2167 } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { 2168 /* Retry if a racing thread changed slab->obj_exts from under us. */ 2169 goto retry; 2170 } 2171 2172 if (allow_spin) 2173 kmemleak_not_leak(vec); 2174 return 0; 2175 } 2176 2177 static inline void free_slab_obj_exts(struct slab *slab) 2178 { 2179 struct slabobj_ext *obj_exts; 2180 2181 obj_exts = slab_obj_exts(slab); 2182 if (!obj_exts) { 2183 /* 2184 * If obj_exts allocation failed, slab->obj_exts is set to 2185 * OBJEXTS_ALLOC_FAIL. In this case, we end up here and should 2186 * clear the flag. 2187 */ 2188 slab->obj_exts = 0; 2189 return; 2190 } 2191 2192 /* 2193 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its 2194 * corresponding extension will be NULL. alloc_tag_sub() will throw a 2195 * warning if slab has extensions but the extension of an object is 2196 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that 2197 * the extension for obj_exts is expected to be NULL. 2198 */ 2199 mark_objexts_empty(obj_exts); 2200 if (unlikely(READ_ONCE(slab->obj_exts) & OBJEXTS_NOSPIN_ALLOC)) 2201 kfree_nolock(obj_exts); 2202 else 2203 kfree(obj_exts); 2204 slab->obj_exts = 0; 2205 } 2206 2207 #else /* CONFIG_SLAB_OBJ_EXT */ 2208 2209 static inline void init_slab_obj_exts(struct slab *slab) 2210 { 2211 } 2212 2213 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 2214 gfp_t gfp, bool new_slab) 2215 { 2216 return 0; 2217 } 2218 2219 static inline void free_slab_obj_exts(struct slab *slab) 2220 { 2221 } 2222 2223 #endif /* CONFIG_SLAB_OBJ_EXT */ 2224 2225 #ifdef CONFIG_MEM_ALLOC_PROFILING 2226 2227 static inline struct slabobj_ext * 2228 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) 2229 { 2230 struct slab *slab; 2231 2232 slab = virt_to_slab(p); 2233 if (!slab_obj_exts(slab) && 2234 alloc_slab_obj_exts(slab, s, flags, false)) { 2235 pr_warn_once("%s, %s: Failed to create slab extension vector!\n", 2236 __func__, s->name); 2237 return NULL; 2238 } 2239 2240 return slab_obj_exts(slab) + obj_to_index(s, slab, p); 2241 } 2242 2243 /* Should be called only if mem_alloc_profiling_enabled() */ 2244 static noinline void 2245 __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2246 { 2247 struct slabobj_ext *obj_exts; 2248 2249 if (!object) 2250 return; 2251 2252 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2253 return; 2254 2255 if (flags & __GFP_NO_OBJ_EXT) 2256 return; 2257 2258 obj_exts = prepare_slab_obj_exts_hook(s, flags, object); 2259 /* 2260 * Currently obj_exts is used only for allocation profiling. 2261 * If other users appear then mem_alloc_profiling_enabled() 2262 * check should be added before alloc_tag_add(). 2263 */ 2264 if (likely(obj_exts)) 2265 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); 2266 else 2267 alloc_tag_set_inaccurate(current->alloc_tag); 2268 } 2269 2270 static inline void 2271 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2272 { 2273 if (mem_alloc_profiling_enabled()) 2274 __alloc_tagging_slab_alloc_hook(s, object, flags); 2275 } 2276 2277 /* Should be called only if mem_alloc_profiling_enabled() */ 2278 static noinline void 2279 __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2280 int objects) 2281 { 2282 struct slabobj_ext *obj_exts; 2283 int i; 2284 2285 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ 2286 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2287 return; 2288 2289 obj_exts = slab_obj_exts(slab); 2290 if (!obj_exts) 2291 return; 2292 2293 for (i = 0; i < objects; i++) { 2294 unsigned int off = obj_to_index(s, slab, p[i]); 2295 2296 alloc_tag_sub(&obj_exts[off].ref, s->size); 2297 } 2298 } 2299 2300 static inline void 2301 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2302 int objects) 2303 { 2304 if (mem_alloc_profiling_enabled()) 2305 __alloc_tagging_slab_free_hook(s, slab, p, objects); 2306 } 2307 2308 #else /* CONFIG_MEM_ALLOC_PROFILING */ 2309 2310 static inline void 2311 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2312 { 2313 } 2314 2315 static inline void 2316 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2317 int objects) 2318 { 2319 } 2320 2321 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 2322 2323 2324 #ifdef CONFIG_MEMCG 2325 2326 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object); 2327 2328 static __fastpath_inline 2329 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 2330 gfp_t flags, size_t size, void **p) 2331 { 2332 if (likely(!memcg_kmem_online())) 2333 return true; 2334 2335 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))) 2336 return true; 2337 2338 if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p))) 2339 return true; 2340 2341 if (likely(size == 1)) { 2342 memcg_alloc_abort_single(s, *p); 2343 *p = NULL; 2344 } else { 2345 kmem_cache_free_bulk(s, size, p); 2346 } 2347 2348 return false; 2349 } 2350 2351 static __fastpath_inline 2352 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2353 int objects) 2354 { 2355 struct slabobj_ext *obj_exts; 2356 2357 if (!memcg_kmem_online()) 2358 return; 2359 2360 obj_exts = slab_obj_exts(slab); 2361 if (likely(!obj_exts)) 2362 return; 2363 2364 __memcg_slab_free_hook(s, slab, p, objects, obj_exts); 2365 } 2366 2367 static __fastpath_inline 2368 bool memcg_slab_post_charge(void *p, gfp_t flags) 2369 { 2370 struct slabobj_ext *slab_exts; 2371 struct kmem_cache *s; 2372 struct page *page; 2373 struct slab *slab; 2374 unsigned long off; 2375 2376 page = virt_to_page(p); 2377 if (PageLargeKmalloc(page)) { 2378 unsigned int order; 2379 int size; 2380 2381 if (PageMemcgKmem(page)) 2382 return true; 2383 2384 order = large_kmalloc_order(page); 2385 if (__memcg_kmem_charge_page(page, flags, order)) 2386 return false; 2387 2388 /* 2389 * This page has already been accounted in the global stats but 2390 * not in the memcg stats. So, subtract from the global and use 2391 * the interface which adds to both global and memcg stats. 2392 */ 2393 size = PAGE_SIZE << order; 2394 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, -size); 2395 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, size); 2396 return true; 2397 } 2398 2399 slab = page_slab(page); 2400 s = slab->slab_cache; 2401 2402 /* 2403 * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency 2404 * of slab_obj_exts being allocated from the same slab and thus the slab 2405 * becoming effectively unfreeable. 2406 */ 2407 if (is_kmalloc_normal(s)) 2408 return true; 2409 2410 /* Ignore already charged objects. */ 2411 slab_exts = slab_obj_exts(slab); 2412 if (slab_exts) { 2413 off = obj_to_index(s, slab, p); 2414 if (unlikely(slab_exts[off].objcg)) 2415 return true; 2416 } 2417 2418 return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p); 2419 } 2420 2421 #else /* CONFIG_MEMCG */ 2422 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s, 2423 struct list_lru *lru, 2424 gfp_t flags, size_t size, 2425 void **p) 2426 { 2427 return true; 2428 } 2429 2430 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 2431 void **p, int objects) 2432 { 2433 } 2434 2435 static inline bool memcg_slab_post_charge(void *p, gfp_t flags) 2436 { 2437 return true; 2438 } 2439 #endif /* CONFIG_MEMCG */ 2440 2441 #ifdef CONFIG_SLUB_RCU_DEBUG 2442 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head); 2443 2444 struct rcu_delayed_free { 2445 struct rcu_head head; 2446 void *object; 2447 }; 2448 #endif 2449 2450 /* 2451 * Hooks for other subsystems that check memory allocations. In a typical 2452 * production configuration these hooks all should produce no code at all. 2453 * 2454 * Returns true if freeing of the object can proceed, false if its reuse 2455 * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned 2456 * to KFENCE. 2457 */ 2458 static __always_inline 2459 bool slab_free_hook(struct kmem_cache *s, void *x, bool init, 2460 bool after_rcu_delay) 2461 { 2462 /* Are the object contents still accessible? */ 2463 bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay; 2464 2465 kmemleak_free_recursive(x, s->flags); 2466 kmsan_slab_free(s, x); 2467 2468 debug_check_no_locks_freed(x, s->object_size); 2469 2470 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 2471 debug_check_no_obj_freed(x, s->object_size); 2472 2473 /* Use KCSAN to help debug racy use-after-free. */ 2474 if (!still_accessible) 2475 __kcsan_check_access(x, s->object_size, 2476 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 2477 2478 if (kfence_free(x)) 2479 return false; 2480 2481 /* 2482 * Give KASAN a chance to notice an invalid free operation before we 2483 * modify the object. 2484 */ 2485 if (kasan_slab_pre_free(s, x)) 2486 return false; 2487 2488 #ifdef CONFIG_SLUB_RCU_DEBUG 2489 if (still_accessible) { 2490 struct rcu_delayed_free *delayed_free; 2491 2492 delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT); 2493 if (delayed_free) { 2494 /* 2495 * Let KASAN track our call stack as a "related work 2496 * creation", just like if the object had been freed 2497 * normally via kfree_rcu(). 2498 * We have to do this manually because the rcu_head is 2499 * not located inside the object. 2500 */ 2501 kasan_record_aux_stack(x); 2502 2503 delayed_free->object = x; 2504 call_rcu(&delayed_free->head, slab_free_after_rcu_debug); 2505 return false; 2506 } 2507 } 2508 #endif /* CONFIG_SLUB_RCU_DEBUG */ 2509 2510 /* 2511 * As memory initialization might be integrated into KASAN, 2512 * kasan_slab_free and initialization memset's must be 2513 * kept together to avoid discrepancies in behavior. 2514 * 2515 * The initialization memset's clear the object and the metadata, 2516 * but don't touch the SLAB redzone. 2517 * 2518 * The object's freepointer is also avoided if stored outside the 2519 * object. 2520 */ 2521 if (unlikely(init)) { 2522 int rsize; 2523 unsigned int inuse, orig_size; 2524 2525 inuse = get_info_end(s); 2526 orig_size = get_orig_size(s, x); 2527 if (!kasan_has_integrated_init()) 2528 memset(kasan_reset_tag(x), 0, orig_size); 2529 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 2530 memset((char *)kasan_reset_tag(x) + inuse, 0, 2531 s->size - inuse - rsize); 2532 /* 2533 * Restore orig_size, otherwize kmalloc redzone overwritten 2534 * would be reported 2535 */ 2536 set_orig_size(s, x, orig_size); 2537 2538 } 2539 /* KASAN might put x into memory quarantine, delaying its reuse. */ 2540 return !kasan_slab_free(s, x, init, still_accessible, false); 2541 } 2542 2543 static __fastpath_inline 2544 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail, 2545 int *cnt) 2546 { 2547 2548 void *object; 2549 void *next = *head; 2550 void *old_tail = *tail; 2551 bool init; 2552 2553 if (is_kfence_address(next)) { 2554 slab_free_hook(s, next, false, false); 2555 return false; 2556 } 2557 2558 /* Head and tail of the reconstructed freelist */ 2559 *head = NULL; 2560 *tail = NULL; 2561 2562 init = slab_want_init_on_free(s); 2563 2564 do { 2565 object = next; 2566 next = get_freepointer(s, object); 2567 2568 /* If object's reuse doesn't have to be delayed */ 2569 if (likely(slab_free_hook(s, object, init, false))) { 2570 /* Move object to the new freelist */ 2571 set_freepointer(s, object, *head); 2572 *head = object; 2573 if (!*tail) 2574 *tail = object; 2575 } else { 2576 /* 2577 * Adjust the reconstructed freelist depth 2578 * accordingly if object's reuse is delayed. 2579 */ 2580 --(*cnt); 2581 } 2582 } while (object != old_tail); 2583 2584 return *head != NULL; 2585 } 2586 2587 static void *setup_object(struct kmem_cache *s, void *object) 2588 { 2589 setup_object_debug(s, object); 2590 object = kasan_init_slab_obj(s, object); 2591 if (unlikely(s->ctor)) { 2592 kasan_unpoison_new_object(s, object); 2593 s->ctor(object); 2594 kasan_poison_new_object(s, object); 2595 } 2596 return object; 2597 } 2598 2599 static struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp) 2600 { 2601 struct slab_sheaf *sheaf; 2602 size_t sheaf_size; 2603 2604 if (gfp & __GFP_NO_OBJ_EXT) 2605 return NULL; 2606 2607 gfp &= ~OBJCGS_CLEAR_MASK; 2608 2609 /* 2610 * Prevent recursion to the same cache, or a deep stack of kmallocs of 2611 * varying sizes (sheaf capacity might differ for each kmalloc size 2612 * bucket) 2613 */ 2614 if (s->flags & SLAB_KMALLOC) 2615 gfp |= __GFP_NO_OBJ_EXT; 2616 2617 sheaf_size = struct_size(sheaf, objects, s->sheaf_capacity); 2618 sheaf = kzalloc(sheaf_size, gfp); 2619 2620 if (unlikely(!sheaf)) 2621 return NULL; 2622 2623 sheaf->cache = s; 2624 2625 stat(s, SHEAF_ALLOC); 2626 2627 return sheaf; 2628 } 2629 2630 static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf) 2631 { 2632 kfree(sheaf); 2633 2634 stat(s, SHEAF_FREE); 2635 } 2636 2637 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 2638 size_t size, void **p); 2639 2640 2641 static int refill_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf, 2642 gfp_t gfp) 2643 { 2644 int to_fill = s->sheaf_capacity - sheaf->size; 2645 int filled; 2646 2647 if (!to_fill) 2648 return 0; 2649 2650 filled = __kmem_cache_alloc_bulk(s, gfp, to_fill, 2651 &sheaf->objects[sheaf->size]); 2652 2653 sheaf->size += filled; 2654 2655 stat_add(s, SHEAF_REFILL, filled); 2656 2657 if (filled < to_fill) 2658 return -ENOMEM; 2659 2660 return 0; 2661 } 2662 2663 2664 static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp) 2665 { 2666 struct slab_sheaf *sheaf = alloc_empty_sheaf(s, gfp); 2667 2668 if (!sheaf) 2669 return NULL; 2670 2671 if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) { 2672 free_empty_sheaf(s, sheaf); 2673 return NULL; 2674 } 2675 2676 return sheaf; 2677 } 2678 2679 /* 2680 * Maximum number of objects freed during a single flush of main pcs sheaf. 2681 * Translates directly to an on-stack array size. 2682 */ 2683 #define PCS_BATCH_MAX 32U 2684 2685 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); 2686 2687 /* 2688 * Free all objects from the main sheaf. In order to perform 2689 * __kmem_cache_free_bulk() outside of cpu_sheaves->lock, work in batches where 2690 * object pointers are moved to a on-stack array under the lock. To bound the 2691 * stack usage, limit each batch to PCS_BATCH_MAX. 2692 * 2693 * returns true if at least partially flushed 2694 */ 2695 static bool sheaf_flush_main(struct kmem_cache *s) 2696 { 2697 struct slub_percpu_sheaves *pcs; 2698 unsigned int batch, remaining; 2699 void *objects[PCS_BATCH_MAX]; 2700 struct slab_sheaf *sheaf; 2701 bool ret = false; 2702 2703 next_batch: 2704 if (!local_trylock(&s->cpu_sheaves->lock)) 2705 return ret; 2706 2707 pcs = this_cpu_ptr(s->cpu_sheaves); 2708 sheaf = pcs->main; 2709 2710 batch = min(PCS_BATCH_MAX, sheaf->size); 2711 2712 sheaf->size -= batch; 2713 memcpy(objects, sheaf->objects + sheaf->size, batch * sizeof(void *)); 2714 2715 remaining = sheaf->size; 2716 2717 local_unlock(&s->cpu_sheaves->lock); 2718 2719 __kmem_cache_free_bulk(s, batch, &objects[0]); 2720 2721 stat_add(s, SHEAF_FLUSH, batch); 2722 2723 ret = true; 2724 2725 if (remaining) 2726 goto next_batch; 2727 2728 return ret; 2729 } 2730 2731 /* 2732 * Free all objects from a sheaf that's unused, i.e. not linked to any 2733 * cpu_sheaves, so we need no locking and batching. The locking is also not 2734 * necessary when flushing cpu's sheaves (both spare and main) during cpu 2735 * hotremove as the cpu is not executing anymore. 2736 */ 2737 static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf) 2738 { 2739 if (!sheaf->size) 2740 return; 2741 2742 stat_add(s, SHEAF_FLUSH, sheaf->size); 2743 2744 __kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]); 2745 2746 sheaf->size = 0; 2747 } 2748 2749 static bool __rcu_free_sheaf_prepare(struct kmem_cache *s, 2750 struct slab_sheaf *sheaf) 2751 { 2752 bool init = slab_want_init_on_free(s); 2753 void **p = &sheaf->objects[0]; 2754 unsigned int i = 0; 2755 bool pfmemalloc = false; 2756 2757 while (i < sheaf->size) { 2758 struct slab *slab = virt_to_slab(p[i]); 2759 2760 memcg_slab_free_hook(s, slab, p + i, 1); 2761 alloc_tagging_slab_free_hook(s, slab, p + i, 1); 2762 2763 if (unlikely(!slab_free_hook(s, p[i], init, true))) { 2764 p[i] = p[--sheaf->size]; 2765 continue; 2766 } 2767 2768 if (slab_test_pfmemalloc(slab)) 2769 pfmemalloc = true; 2770 2771 i++; 2772 } 2773 2774 return pfmemalloc; 2775 } 2776 2777 static void rcu_free_sheaf_nobarn(struct rcu_head *head) 2778 { 2779 struct slab_sheaf *sheaf; 2780 struct kmem_cache *s; 2781 2782 sheaf = container_of(head, struct slab_sheaf, rcu_head); 2783 s = sheaf->cache; 2784 2785 __rcu_free_sheaf_prepare(s, sheaf); 2786 2787 sheaf_flush_unused(s, sheaf); 2788 2789 free_empty_sheaf(s, sheaf); 2790 } 2791 2792 /* 2793 * Caller needs to make sure migration is disabled in order to fully flush 2794 * single cpu's sheaves 2795 * 2796 * must not be called from an irq 2797 * 2798 * flushing operations are rare so let's keep it simple and flush to slabs 2799 * directly, skipping the barn 2800 */ 2801 static void pcs_flush_all(struct kmem_cache *s) 2802 { 2803 struct slub_percpu_sheaves *pcs; 2804 struct slab_sheaf *spare, *rcu_free; 2805 2806 local_lock(&s->cpu_sheaves->lock); 2807 pcs = this_cpu_ptr(s->cpu_sheaves); 2808 2809 spare = pcs->spare; 2810 pcs->spare = NULL; 2811 2812 rcu_free = pcs->rcu_free; 2813 pcs->rcu_free = NULL; 2814 2815 local_unlock(&s->cpu_sheaves->lock); 2816 2817 if (spare) { 2818 sheaf_flush_unused(s, spare); 2819 free_empty_sheaf(s, spare); 2820 } 2821 2822 if (rcu_free) 2823 call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn); 2824 2825 sheaf_flush_main(s); 2826 } 2827 2828 static void __pcs_flush_all_cpu(struct kmem_cache *s, unsigned int cpu) 2829 { 2830 struct slub_percpu_sheaves *pcs; 2831 2832 pcs = per_cpu_ptr(s->cpu_sheaves, cpu); 2833 2834 /* The cpu is not executing anymore so we don't need pcs->lock */ 2835 sheaf_flush_unused(s, pcs->main); 2836 if (pcs->spare) { 2837 sheaf_flush_unused(s, pcs->spare); 2838 free_empty_sheaf(s, pcs->spare); 2839 pcs->spare = NULL; 2840 } 2841 2842 if (pcs->rcu_free) { 2843 call_rcu(&pcs->rcu_free->rcu_head, rcu_free_sheaf_nobarn); 2844 pcs->rcu_free = NULL; 2845 } 2846 } 2847 2848 static void pcs_destroy(struct kmem_cache *s) 2849 { 2850 int cpu; 2851 2852 for_each_possible_cpu(cpu) { 2853 struct slub_percpu_sheaves *pcs; 2854 2855 pcs = per_cpu_ptr(s->cpu_sheaves, cpu); 2856 2857 /* can happen when unwinding failed create */ 2858 if (!pcs->main) 2859 continue; 2860 2861 /* 2862 * We have already passed __kmem_cache_shutdown() so everything 2863 * was flushed and there should be no objects allocated from 2864 * slabs, otherwise kmem_cache_destroy() would have aborted. 2865 * Therefore something would have to be really wrong if the 2866 * warnings here trigger, and we should rather leave objects and 2867 * sheaves to leak in that case. 2868 */ 2869 2870 WARN_ON(pcs->spare); 2871 WARN_ON(pcs->rcu_free); 2872 2873 if (!WARN_ON(pcs->main->size)) { 2874 free_empty_sheaf(s, pcs->main); 2875 pcs->main = NULL; 2876 } 2877 } 2878 2879 free_percpu(s->cpu_sheaves); 2880 s->cpu_sheaves = NULL; 2881 } 2882 2883 static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn) 2884 { 2885 struct slab_sheaf *empty = NULL; 2886 unsigned long flags; 2887 2888 if (!data_race(barn->nr_empty)) 2889 return NULL; 2890 2891 spin_lock_irqsave(&barn->lock, flags); 2892 2893 if (likely(barn->nr_empty)) { 2894 empty = list_first_entry(&barn->sheaves_empty, 2895 struct slab_sheaf, barn_list); 2896 list_del(&empty->barn_list); 2897 barn->nr_empty--; 2898 } 2899 2900 spin_unlock_irqrestore(&barn->lock, flags); 2901 2902 return empty; 2903 } 2904 2905 /* 2906 * The following two functions are used mainly in cases where we have to undo an 2907 * intended action due to a race or cpu migration. Thus they do not check the 2908 * empty or full sheaf limits for simplicity. 2909 */ 2910 2911 static void barn_put_empty_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf) 2912 { 2913 unsigned long flags; 2914 2915 spin_lock_irqsave(&barn->lock, flags); 2916 2917 list_add(&sheaf->barn_list, &barn->sheaves_empty); 2918 barn->nr_empty++; 2919 2920 spin_unlock_irqrestore(&barn->lock, flags); 2921 } 2922 2923 static void barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf) 2924 { 2925 unsigned long flags; 2926 2927 spin_lock_irqsave(&barn->lock, flags); 2928 2929 list_add(&sheaf->barn_list, &barn->sheaves_full); 2930 barn->nr_full++; 2931 2932 spin_unlock_irqrestore(&barn->lock, flags); 2933 } 2934 2935 static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn) 2936 { 2937 struct slab_sheaf *sheaf = NULL; 2938 unsigned long flags; 2939 2940 if (!data_race(barn->nr_full) && !data_race(barn->nr_empty)) 2941 return NULL; 2942 2943 spin_lock_irqsave(&barn->lock, flags); 2944 2945 if (barn->nr_full) { 2946 sheaf = list_first_entry(&barn->sheaves_full, struct slab_sheaf, 2947 barn_list); 2948 list_del(&sheaf->barn_list); 2949 barn->nr_full--; 2950 } else if (barn->nr_empty) { 2951 sheaf = list_first_entry(&barn->sheaves_empty, 2952 struct slab_sheaf, barn_list); 2953 list_del(&sheaf->barn_list); 2954 barn->nr_empty--; 2955 } 2956 2957 spin_unlock_irqrestore(&barn->lock, flags); 2958 2959 return sheaf; 2960 } 2961 2962 /* 2963 * If a full sheaf is available, return it and put the supplied empty one to 2964 * barn. We ignore the limit on empty sheaves as the number of sheaves doesn't 2965 * change. 2966 */ 2967 static struct slab_sheaf * 2968 barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty) 2969 { 2970 struct slab_sheaf *full = NULL; 2971 unsigned long flags; 2972 2973 if (!data_race(barn->nr_full)) 2974 return NULL; 2975 2976 spin_lock_irqsave(&barn->lock, flags); 2977 2978 if (likely(barn->nr_full)) { 2979 full = list_first_entry(&barn->sheaves_full, struct slab_sheaf, 2980 barn_list); 2981 list_del(&full->barn_list); 2982 list_add(&empty->barn_list, &barn->sheaves_empty); 2983 barn->nr_full--; 2984 barn->nr_empty++; 2985 } 2986 2987 spin_unlock_irqrestore(&barn->lock, flags); 2988 2989 return full; 2990 } 2991 2992 /* 2993 * If an empty sheaf is available, return it and put the supplied full one to 2994 * barn. But if there are too many full sheaves, reject this with -E2BIG. 2995 */ 2996 static struct slab_sheaf * 2997 barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full) 2998 { 2999 struct slab_sheaf *empty; 3000 unsigned long flags; 3001 3002 /* we don't repeat this check under barn->lock as it's not critical */ 3003 if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES) 3004 return ERR_PTR(-E2BIG); 3005 if (!data_race(barn->nr_empty)) 3006 return ERR_PTR(-ENOMEM); 3007 3008 spin_lock_irqsave(&barn->lock, flags); 3009 3010 if (likely(barn->nr_empty)) { 3011 empty = list_first_entry(&barn->sheaves_empty, struct slab_sheaf, 3012 barn_list); 3013 list_del(&empty->barn_list); 3014 list_add(&full->barn_list, &barn->sheaves_full); 3015 barn->nr_empty--; 3016 barn->nr_full++; 3017 } else { 3018 empty = ERR_PTR(-ENOMEM); 3019 } 3020 3021 spin_unlock_irqrestore(&barn->lock, flags); 3022 3023 return empty; 3024 } 3025 3026 static void barn_init(struct node_barn *barn) 3027 { 3028 spin_lock_init(&barn->lock); 3029 INIT_LIST_HEAD(&barn->sheaves_full); 3030 INIT_LIST_HEAD(&barn->sheaves_empty); 3031 barn->nr_full = 0; 3032 barn->nr_empty = 0; 3033 } 3034 3035 static void barn_shrink(struct kmem_cache *s, struct node_barn *barn) 3036 { 3037 LIST_HEAD(empty_list); 3038 LIST_HEAD(full_list); 3039 struct slab_sheaf *sheaf, *sheaf2; 3040 unsigned long flags; 3041 3042 spin_lock_irqsave(&barn->lock, flags); 3043 3044 list_splice_init(&barn->sheaves_full, &full_list); 3045 barn->nr_full = 0; 3046 list_splice_init(&barn->sheaves_empty, &empty_list); 3047 barn->nr_empty = 0; 3048 3049 spin_unlock_irqrestore(&barn->lock, flags); 3050 3051 list_for_each_entry_safe(sheaf, sheaf2, &full_list, barn_list) { 3052 sheaf_flush_unused(s, sheaf); 3053 free_empty_sheaf(s, sheaf); 3054 } 3055 3056 list_for_each_entry_safe(sheaf, sheaf2, &empty_list, barn_list) 3057 free_empty_sheaf(s, sheaf); 3058 } 3059 3060 /* 3061 * Slab allocation and freeing 3062 */ 3063 static inline struct slab *alloc_slab_page(gfp_t flags, int node, 3064 struct kmem_cache_order_objects oo, 3065 bool allow_spin) 3066 { 3067 struct page *page; 3068 struct slab *slab; 3069 unsigned int order = oo_order(oo); 3070 3071 if (unlikely(!allow_spin)) 3072 page = alloc_frozen_pages_nolock(0/* __GFP_COMP is implied */, 3073 node, order); 3074 else if (node == NUMA_NO_NODE) 3075 page = alloc_frozen_pages(flags, order); 3076 else 3077 page = __alloc_frozen_pages(flags, order, node, NULL); 3078 3079 if (!page) 3080 return NULL; 3081 3082 __SetPageSlab(page); 3083 slab = page_slab(page); 3084 if (page_is_pfmemalloc(page)) 3085 slab_set_pfmemalloc(slab); 3086 3087 return slab; 3088 } 3089 3090 #ifdef CONFIG_SLAB_FREELIST_RANDOM 3091 /* Pre-initialize the random sequence cache */ 3092 static int init_cache_random_seq(struct kmem_cache *s) 3093 { 3094 unsigned int count = oo_objects(s->oo); 3095 int err; 3096 3097 /* Bailout if already initialised */ 3098 if (s->random_seq) 3099 return 0; 3100 3101 err = cache_random_seq_create(s, count, GFP_KERNEL); 3102 if (err) { 3103 pr_err("SLUB: Unable to initialize free list for %s\n", 3104 s->name); 3105 return err; 3106 } 3107 3108 /* Transform to an offset on the set of pages */ 3109 if (s->random_seq) { 3110 unsigned int i; 3111 3112 for (i = 0; i < count; i++) 3113 s->random_seq[i] *= s->size; 3114 } 3115 return 0; 3116 } 3117 3118 /* Initialize each random sequence freelist per cache */ 3119 static void __init init_freelist_randomization(void) 3120 { 3121 struct kmem_cache *s; 3122 3123 mutex_lock(&slab_mutex); 3124 3125 list_for_each_entry(s, &slab_caches, list) 3126 init_cache_random_seq(s); 3127 3128 mutex_unlock(&slab_mutex); 3129 } 3130 3131 /* Get the next entry on the pre-computed freelist randomized */ 3132 static void *next_freelist_entry(struct kmem_cache *s, 3133 unsigned long *pos, void *start, 3134 unsigned long page_limit, 3135 unsigned long freelist_count) 3136 { 3137 unsigned int idx; 3138 3139 /* 3140 * If the target page allocation failed, the number of objects on the 3141 * page might be smaller than the usual size defined by the cache. 3142 */ 3143 do { 3144 idx = s->random_seq[*pos]; 3145 *pos += 1; 3146 if (*pos >= freelist_count) 3147 *pos = 0; 3148 } while (unlikely(idx >= page_limit)); 3149 3150 return (char *)start + idx; 3151 } 3152 3153 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 3154 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 3155 { 3156 void *start; 3157 void *cur; 3158 void *next; 3159 unsigned long idx, pos, page_limit, freelist_count; 3160 3161 if (slab->objects < 2 || !s->random_seq) 3162 return false; 3163 3164 freelist_count = oo_objects(s->oo); 3165 pos = get_random_u32_below(freelist_count); 3166 3167 page_limit = slab->objects * s->size; 3168 start = fixup_red_left(s, slab_address(slab)); 3169 3170 /* First entry is used as the base of the freelist */ 3171 cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count); 3172 cur = setup_object(s, cur); 3173 slab->freelist = cur; 3174 3175 for (idx = 1; idx < slab->objects; idx++) { 3176 next = next_freelist_entry(s, &pos, start, page_limit, 3177 freelist_count); 3178 next = setup_object(s, next); 3179 set_freepointer(s, cur, next); 3180 cur = next; 3181 } 3182 set_freepointer(s, cur, NULL); 3183 3184 return true; 3185 } 3186 #else 3187 static inline int init_cache_random_seq(struct kmem_cache *s) 3188 { 3189 return 0; 3190 } 3191 static inline void init_freelist_randomization(void) { } 3192 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 3193 { 3194 return false; 3195 } 3196 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 3197 3198 static __always_inline void account_slab(struct slab *slab, int order, 3199 struct kmem_cache *s, gfp_t gfp) 3200 { 3201 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 3202 alloc_slab_obj_exts(slab, s, gfp, true); 3203 3204 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 3205 PAGE_SIZE << order); 3206 } 3207 3208 static __always_inline void unaccount_slab(struct slab *slab, int order, 3209 struct kmem_cache *s) 3210 { 3211 /* 3212 * The slab object extensions should now be freed regardless of 3213 * whether mem_alloc_profiling_enabled() or not because profiling 3214 * might have been disabled after slab->obj_exts got allocated. 3215 */ 3216 free_slab_obj_exts(slab); 3217 3218 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 3219 -(PAGE_SIZE << order)); 3220 } 3221 3222 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 3223 { 3224 bool allow_spin = gfpflags_allow_spinning(flags); 3225 struct slab *slab; 3226 struct kmem_cache_order_objects oo = s->oo; 3227 gfp_t alloc_gfp; 3228 void *start, *p, *next; 3229 int idx; 3230 bool shuffle; 3231 3232 flags &= gfp_allowed_mask; 3233 3234 flags |= s->allocflags; 3235 3236 /* 3237 * Let the initial higher-order allocation fail under memory pressure 3238 * so we fall-back to the minimum order allocation. 3239 */ 3240 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 3241 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 3242 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; 3243 3244 /* 3245 * __GFP_RECLAIM could be cleared on the first allocation attempt, 3246 * so pass allow_spin flag directly. 3247 */ 3248 slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin); 3249 if (unlikely(!slab)) { 3250 oo = s->min; 3251 alloc_gfp = flags; 3252 /* 3253 * Allocation may have failed due to fragmentation. 3254 * Try a lower order alloc if possible 3255 */ 3256 slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin); 3257 if (unlikely(!slab)) 3258 return NULL; 3259 stat(s, ORDER_FALLBACK); 3260 } 3261 3262 slab->objects = oo_objects(oo); 3263 slab->inuse = 0; 3264 slab->frozen = 0; 3265 init_slab_obj_exts(slab); 3266 3267 account_slab(slab, oo_order(oo), s, flags); 3268 3269 slab->slab_cache = s; 3270 3271 kasan_poison_slab(slab); 3272 3273 start = slab_address(slab); 3274 3275 setup_slab_debug(s, slab, start); 3276 3277 shuffle = shuffle_freelist(s, slab); 3278 3279 if (!shuffle) { 3280 start = fixup_red_left(s, start); 3281 start = setup_object(s, start); 3282 slab->freelist = start; 3283 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { 3284 next = p + s->size; 3285 next = setup_object(s, next); 3286 set_freepointer(s, p, next); 3287 p = next; 3288 } 3289 set_freepointer(s, p, NULL); 3290 } 3291 3292 return slab; 3293 } 3294 3295 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) 3296 { 3297 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 3298 flags = kmalloc_fix_flags(flags); 3299 3300 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 3301 3302 return allocate_slab(s, 3303 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 3304 } 3305 3306 static void __free_slab(struct kmem_cache *s, struct slab *slab) 3307 { 3308 struct page *page = slab_page(slab); 3309 int order = compound_order(page); 3310 int pages = 1 << order; 3311 3312 __slab_clear_pfmemalloc(slab); 3313 page->mapping = NULL; 3314 __ClearPageSlab(page); 3315 mm_account_reclaimed_pages(pages); 3316 unaccount_slab(slab, order, s); 3317 free_frozen_pages(page, order); 3318 } 3319 3320 static void rcu_free_slab(struct rcu_head *h) 3321 { 3322 struct slab *slab = container_of(h, struct slab, rcu_head); 3323 3324 __free_slab(slab->slab_cache, slab); 3325 } 3326 3327 static void free_slab(struct kmem_cache *s, struct slab *slab) 3328 { 3329 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 3330 void *p; 3331 3332 slab_pad_check(s, slab); 3333 for_each_object(p, s, slab_address(slab), slab->objects) 3334 check_object(s, slab, p, SLUB_RED_INACTIVE); 3335 } 3336 3337 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) 3338 call_rcu(&slab->rcu_head, rcu_free_slab); 3339 else 3340 __free_slab(s, slab); 3341 } 3342 3343 static void discard_slab(struct kmem_cache *s, struct slab *slab) 3344 { 3345 dec_slabs_node(s, slab_nid(slab), slab->objects); 3346 free_slab(s, slab); 3347 } 3348 3349 static inline bool slab_test_node_partial(const struct slab *slab) 3350 { 3351 return test_bit(SL_partial, &slab->flags.f); 3352 } 3353 3354 static inline void slab_set_node_partial(struct slab *slab) 3355 { 3356 set_bit(SL_partial, &slab->flags.f); 3357 } 3358 3359 static inline void slab_clear_node_partial(struct slab *slab) 3360 { 3361 clear_bit(SL_partial, &slab->flags.f); 3362 } 3363 3364 /* 3365 * Management of partially allocated slabs. 3366 */ 3367 static inline void 3368 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 3369 { 3370 n->nr_partial++; 3371 if (tail == DEACTIVATE_TO_TAIL) 3372 list_add_tail(&slab->slab_list, &n->partial); 3373 else 3374 list_add(&slab->slab_list, &n->partial); 3375 slab_set_node_partial(slab); 3376 } 3377 3378 static inline void add_partial(struct kmem_cache_node *n, 3379 struct slab *slab, int tail) 3380 { 3381 lockdep_assert_held(&n->list_lock); 3382 __add_partial(n, slab, tail); 3383 } 3384 3385 static inline void remove_partial(struct kmem_cache_node *n, 3386 struct slab *slab) 3387 { 3388 lockdep_assert_held(&n->list_lock); 3389 list_del(&slab->slab_list); 3390 slab_clear_node_partial(slab); 3391 n->nr_partial--; 3392 } 3393 3394 /* 3395 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a 3396 * slab from the n->partial list. Remove only a single object from the slab, do 3397 * the alloc_debug_processing() checks and leave the slab on the list, or move 3398 * it to full list if it was the last free object. 3399 */ 3400 static void *alloc_single_from_partial(struct kmem_cache *s, 3401 struct kmem_cache_node *n, struct slab *slab, int orig_size) 3402 { 3403 void *object; 3404 3405 lockdep_assert_held(&n->list_lock); 3406 3407 #ifdef CONFIG_SLUB_DEBUG 3408 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3409 if (!validate_slab_ptr(slab)) { 3410 slab_err(s, slab, "Not a valid slab page"); 3411 return NULL; 3412 } 3413 } 3414 #endif 3415 3416 object = slab->freelist; 3417 slab->freelist = get_freepointer(s, object); 3418 slab->inuse++; 3419 3420 if (!alloc_debug_processing(s, slab, object, orig_size)) { 3421 remove_partial(n, slab); 3422 return NULL; 3423 } 3424 3425 if (slab->inuse == slab->objects) { 3426 remove_partial(n, slab); 3427 add_full(s, n, slab); 3428 } 3429 3430 return object; 3431 } 3432 3433 static void defer_deactivate_slab(struct slab *slab, void *flush_freelist); 3434 3435 /* 3436 * Called only for kmem_cache_debug() caches to allocate from a freshly 3437 * allocated slab. Allocate a single object instead of whole freelist 3438 * and put the slab to the partial (or full) list. 3439 */ 3440 static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab, 3441 int orig_size, gfp_t gfpflags) 3442 { 3443 bool allow_spin = gfpflags_allow_spinning(gfpflags); 3444 int nid = slab_nid(slab); 3445 struct kmem_cache_node *n = get_node(s, nid); 3446 unsigned long flags; 3447 void *object; 3448 3449 if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) { 3450 /* Unlucky, discard newly allocated slab */ 3451 defer_deactivate_slab(slab, NULL); 3452 return NULL; 3453 } 3454 3455 object = slab->freelist; 3456 slab->freelist = get_freepointer(s, object); 3457 slab->inuse = 1; 3458 3459 if (!alloc_debug_processing(s, slab, object, orig_size)) { 3460 /* 3461 * It's not really expected that this would fail on a 3462 * freshly allocated slab, but a concurrent memory 3463 * corruption in theory could cause that. 3464 * Leak memory of allocated slab. 3465 */ 3466 if (!allow_spin) 3467 spin_unlock_irqrestore(&n->list_lock, flags); 3468 return NULL; 3469 } 3470 3471 if (allow_spin) 3472 spin_lock_irqsave(&n->list_lock, flags); 3473 3474 if (slab->inuse == slab->objects) 3475 add_full(s, n, slab); 3476 else 3477 add_partial(n, slab, DEACTIVATE_TO_HEAD); 3478 3479 inc_slabs_node(s, nid, slab->objects); 3480 spin_unlock_irqrestore(&n->list_lock, flags); 3481 3482 return object; 3483 } 3484 3485 #ifdef CONFIG_SLUB_CPU_PARTIAL 3486 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); 3487 #else 3488 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, 3489 int drain) { } 3490 #endif 3491 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); 3492 3493 /* 3494 * Try to allocate a partial slab from a specific node. 3495 */ 3496 static struct slab *get_partial_node(struct kmem_cache *s, 3497 struct kmem_cache_node *n, 3498 struct partial_context *pc) 3499 { 3500 struct slab *slab, *slab2, *partial = NULL; 3501 unsigned long flags; 3502 unsigned int partial_slabs = 0; 3503 3504 /* 3505 * Racy check. If we mistakenly see no partial slabs then we 3506 * just allocate an empty slab. If we mistakenly try to get a 3507 * partial slab and there is none available then get_partial() 3508 * will return NULL. 3509 */ 3510 if (!n || !n->nr_partial) 3511 return NULL; 3512 3513 if (gfpflags_allow_spinning(pc->flags)) 3514 spin_lock_irqsave(&n->list_lock, flags); 3515 else if (!spin_trylock_irqsave(&n->list_lock, flags)) 3516 return NULL; 3517 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { 3518 if (!pfmemalloc_match(slab, pc->flags)) 3519 continue; 3520 3521 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 3522 void *object = alloc_single_from_partial(s, n, slab, 3523 pc->orig_size); 3524 if (object) { 3525 partial = slab; 3526 pc->object = object; 3527 break; 3528 } 3529 continue; 3530 } 3531 3532 remove_partial(n, slab); 3533 3534 if (!partial) { 3535 partial = slab; 3536 stat(s, ALLOC_FROM_PARTIAL); 3537 3538 if ((slub_get_cpu_partial(s) == 0)) { 3539 break; 3540 } 3541 } else { 3542 put_cpu_partial(s, slab, 0); 3543 stat(s, CPU_PARTIAL_NODE); 3544 3545 if (++partial_slabs > slub_get_cpu_partial(s) / 2) { 3546 break; 3547 } 3548 } 3549 } 3550 spin_unlock_irqrestore(&n->list_lock, flags); 3551 return partial; 3552 } 3553 3554 /* 3555 * Get a slab from somewhere. Search in increasing NUMA distances. 3556 */ 3557 static struct slab *get_any_partial(struct kmem_cache *s, 3558 struct partial_context *pc) 3559 { 3560 #ifdef CONFIG_NUMA 3561 struct zonelist *zonelist; 3562 struct zoneref *z; 3563 struct zone *zone; 3564 enum zone_type highest_zoneidx = gfp_zone(pc->flags); 3565 struct slab *slab; 3566 unsigned int cpuset_mems_cookie; 3567 3568 /* 3569 * The defrag ratio allows a configuration of the tradeoffs between 3570 * inter node defragmentation and node local allocations. A lower 3571 * defrag_ratio increases the tendency to do local allocations 3572 * instead of attempting to obtain partial slabs from other nodes. 3573 * 3574 * If the defrag_ratio is set to 0 then kmalloc() always 3575 * returns node local objects. If the ratio is higher then kmalloc() 3576 * may return off node objects because partial slabs are obtained 3577 * from other nodes and filled up. 3578 * 3579 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 3580 * (which makes defrag_ratio = 1000) then every (well almost) 3581 * allocation will first attempt to defrag slab caches on other nodes. 3582 * This means scanning over all nodes to look for partial slabs which 3583 * may be expensive if we do it every time we are trying to find a slab 3584 * with available objects. 3585 */ 3586 if (!s->remote_node_defrag_ratio || 3587 get_cycles() % 1024 > s->remote_node_defrag_ratio) 3588 return NULL; 3589 3590 do { 3591 cpuset_mems_cookie = read_mems_allowed_begin(); 3592 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); 3593 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 3594 struct kmem_cache_node *n; 3595 3596 n = get_node(s, zone_to_nid(zone)); 3597 3598 if (n && cpuset_zone_allowed(zone, pc->flags) && 3599 n->nr_partial > s->min_partial) { 3600 slab = get_partial_node(s, n, pc); 3601 if (slab) { 3602 /* 3603 * Don't check read_mems_allowed_retry() 3604 * here - if mems_allowed was updated in 3605 * parallel, that was a harmless race 3606 * between allocation and the cpuset 3607 * update 3608 */ 3609 return slab; 3610 } 3611 } 3612 } 3613 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 3614 #endif /* CONFIG_NUMA */ 3615 return NULL; 3616 } 3617 3618 /* 3619 * Get a partial slab, lock it and return it. 3620 */ 3621 static struct slab *get_partial(struct kmem_cache *s, int node, 3622 struct partial_context *pc) 3623 { 3624 struct slab *slab; 3625 int searchnode = node; 3626 3627 if (node == NUMA_NO_NODE) 3628 searchnode = numa_mem_id(); 3629 3630 slab = get_partial_node(s, get_node(s, searchnode), pc); 3631 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE))) 3632 return slab; 3633 3634 return get_any_partial(s, pc); 3635 } 3636 3637 #ifdef CONFIG_PREEMPTION 3638 /* 3639 * Calculate the next globally unique transaction for disambiguation 3640 * during cmpxchg. The transactions start with the cpu number and are then 3641 * incremented by CONFIG_NR_CPUS. 3642 */ 3643 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 3644 #else 3645 /* 3646 * No preemption supported therefore also no need to check for 3647 * different cpus. 3648 */ 3649 #define TID_STEP 1 3650 #endif /* CONFIG_PREEMPTION */ 3651 3652 static inline unsigned long next_tid(unsigned long tid) 3653 { 3654 return tid + TID_STEP; 3655 } 3656 3657 #ifdef SLUB_DEBUG_CMPXCHG 3658 static inline unsigned int tid_to_cpu(unsigned long tid) 3659 { 3660 return tid % TID_STEP; 3661 } 3662 3663 static inline unsigned long tid_to_event(unsigned long tid) 3664 { 3665 return tid / TID_STEP; 3666 } 3667 #endif 3668 3669 static inline unsigned int init_tid(int cpu) 3670 { 3671 return cpu; 3672 } 3673 3674 static inline void note_cmpxchg_failure(const char *n, 3675 const struct kmem_cache *s, unsigned long tid) 3676 { 3677 #ifdef SLUB_DEBUG_CMPXCHG 3678 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 3679 3680 pr_info("%s %s: cmpxchg redo ", n, s->name); 3681 3682 if (IS_ENABLED(CONFIG_PREEMPTION) && 3683 tid_to_cpu(tid) != tid_to_cpu(actual_tid)) { 3684 pr_warn("due to cpu change %d -> %d\n", 3685 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 3686 } else if (tid_to_event(tid) != tid_to_event(actual_tid)) { 3687 pr_warn("due to cpu running other code. Event %ld->%ld\n", 3688 tid_to_event(tid), tid_to_event(actual_tid)); 3689 } else { 3690 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 3691 actual_tid, tid, next_tid(tid)); 3692 } 3693 #endif 3694 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 3695 } 3696 3697 static void init_kmem_cache_cpus(struct kmem_cache *s) 3698 { 3699 #ifdef CONFIG_PREEMPT_RT 3700 /* 3701 * Register lockdep key for non-boot kmem caches to avoid 3702 * WARN_ON_ONCE(static_obj(key))) in lockdep_register_key() 3703 */ 3704 bool finegrain_lockdep = !init_section_contains(s, 1); 3705 #else 3706 /* 3707 * Don't bother with different lockdep classes for each 3708 * kmem_cache, since we only use local_trylock_irqsave(). 3709 */ 3710 bool finegrain_lockdep = false; 3711 #endif 3712 int cpu; 3713 struct kmem_cache_cpu *c; 3714 3715 if (finegrain_lockdep) 3716 lockdep_register_key(&s->lock_key); 3717 for_each_possible_cpu(cpu) { 3718 c = per_cpu_ptr(s->cpu_slab, cpu); 3719 local_trylock_init(&c->lock); 3720 if (finegrain_lockdep) 3721 lockdep_set_class(&c->lock, &s->lock_key); 3722 c->tid = init_tid(cpu); 3723 } 3724 } 3725 3726 /* 3727 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, 3728 * unfreezes the slabs and puts it on the proper list. 3729 * Assumes the slab has been already safely taken away from kmem_cache_cpu 3730 * by the caller. 3731 */ 3732 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, 3733 void *freelist) 3734 { 3735 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 3736 int free_delta = 0; 3737 void *nextfree, *freelist_iter, *freelist_tail; 3738 int tail = DEACTIVATE_TO_HEAD; 3739 unsigned long flags = 0; 3740 struct freelist_counters old, new; 3741 3742 if (READ_ONCE(slab->freelist)) { 3743 stat(s, DEACTIVATE_REMOTE_FREES); 3744 tail = DEACTIVATE_TO_TAIL; 3745 } 3746 3747 /* 3748 * Stage one: Count the objects on cpu's freelist as free_delta and 3749 * remember the last object in freelist_tail for later splicing. 3750 */ 3751 freelist_tail = NULL; 3752 freelist_iter = freelist; 3753 while (freelist_iter) { 3754 nextfree = get_freepointer(s, freelist_iter); 3755 3756 /* 3757 * If 'nextfree' is invalid, it is possible that the object at 3758 * 'freelist_iter' is already corrupted. So isolate all objects 3759 * starting at 'freelist_iter' by skipping them. 3760 */ 3761 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) 3762 break; 3763 3764 freelist_tail = freelist_iter; 3765 free_delta++; 3766 3767 freelist_iter = nextfree; 3768 } 3769 3770 /* 3771 * Stage two: Unfreeze the slab while splicing the per-cpu 3772 * freelist to the head of slab's freelist. 3773 */ 3774 do { 3775 old.freelist = READ_ONCE(slab->freelist); 3776 old.counters = READ_ONCE(slab->counters); 3777 VM_BUG_ON(!old.frozen); 3778 3779 /* Determine target state of the slab */ 3780 new.counters = old.counters; 3781 new.frozen = 0; 3782 if (freelist_tail) { 3783 new.inuse -= free_delta; 3784 set_freepointer(s, freelist_tail, old.freelist); 3785 new.freelist = freelist; 3786 } else { 3787 new.freelist = old.freelist; 3788 } 3789 } while (!slab_update_freelist(s, slab, &old, &new, "unfreezing slab")); 3790 3791 /* 3792 * Stage three: Manipulate the slab list based on the updated state. 3793 */ 3794 if (!new.inuse && n->nr_partial >= s->min_partial) { 3795 stat(s, DEACTIVATE_EMPTY); 3796 discard_slab(s, slab); 3797 stat(s, FREE_SLAB); 3798 } else if (new.freelist) { 3799 spin_lock_irqsave(&n->list_lock, flags); 3800 add_partial(n, slab, tail); 3801 spin_unlock_irqrestore(&n->list_lock, flags); 3802 stat(s, tail); 3803 } else { 3804 stat(s, DEACTIVATE_FULL); 3805 } 3806 } 3807 3808 /* 3809 * ___slab_alloc()'s caller is supposed to check if kmem_cache::kmem_cache_cpu::lock 3810 * can be acquired without a deadlock before invoking the function. 3811 * 3812 * Without LOCKDEP we trust the code to be correct. kmalloc_nolock() is 3813 * using local_lock_is_locked() properly before calling local_lock_cpu_slab(), 3814 * and kmalloc() is not used in an unsupported context. 3815 * 3816 * With LOCKDEP, on PREEMPT_RT lockdep does its checking in local_lock_irqsave(). 3817 * On !PREEMPT_RT we use trylock to avoid false positives in NMI, but 3818 * lockdep_assert() will catch a bug in case: 3819 * #1 3820 * kmalloc() -> ___slab_alloc() -> irqsave -> NMI -> bpf -> kmalloc_nolock() 3821 * or 3822 * #2 3823 * kmalloc() -> ___slab_alloc() -> irqsave -> tracepoint/kprobe -> bpf -> kmalloc_nolock() 3824 * 3825 * On PREEMPT_RT an invocation is not possible from IRQ-off or preempt 3826 * disabled context. The lock will always be acquired and if needed it 3827 * block and sleep until the lock is available. 3828 * #1 is possible in !PREEMPT_RT only. 3829 * #2 is possible in both with a twist that irqsave is replaced with rt_spinlock: 3830 * kmalloc() -> ___slab_alloc() -> rt_spin_lock(kmem_cache_A) -> 3831 * tracepoint/kprobe -> bpf -> kmalloc_nolock() -> rt_spin_lock(kmem_cache_B) 3832 * 3833 * local_lock_is_locked() prevents the case kmem_cache_A == kmem_cache_B 3834 */ 3835 #if defined(CONFIG_PREEMPT_RT) || !defined(CONFIG_LOCKDEP) 3836 #define local_lock_cpu_slab(s, flags) \ 3837 local_lock_irqsave(&(s)->cpu_slab->lock, flags) 3838 #else 3839 #define local_lock_cpu_slab(s, flags) \ 3840 do { \ 3841 bool __l = local_trylock_irqsave(&(s)->cpu_slab->lock, flags); \ 3842 lockdep_assert(__l); \ 3843 } while (0) 3844 #endif 3845 3846 #define local_unlock_cpu_slab(s, flags) \ 3847 local_unlock_irqrestore(&(s)->cpu_slab->lock, flags) 3848 3849 #ifdef CONFIG_SLUB_CPU_PARTIAL 3850 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab) 3851 { 3852 struct kmem_cache_node *n = NULL, *n2 = NULL; 3853 struct slab *slab, *slab_to_discard = NULL; 3854 unsigned long flags = 0; 3855 3856 while (partial_slab) { 3857 slab = partial_slab; 3858 partial_slab = slab->next; 3859 3860 n2 = get_node(s, slab_nid(slab)); 3861 if (n != n2) { 3862 if (n) 3863 spin_unlock_irqrestore(&n->list_lock, flags); 3864 3865 n = n2; 3866 spin_lock_irqsave(&n->list_lock, flags); 3867 } 3868 3869 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { 3870 slab->next = slab_to_discard; 3871 slab_to_discard = slab; 3872 } else { 3873 add_partial(n, slab, DEACTIVATE_TO_TAIL); 3874 stat(s, FREE_ADD_PARTIAL); 3875 } 3876 } 3877 3878 if (n) 3879 spin_unlock_irqrestore(&n->list_lock, flags); 3880 3881 while (slab_to_discard) { 3882 slab = slab_to_discard; 3883 slab_to_discard = slab_to_discard->next; 3884 3885 stat(s, DEACTIVATE_EMPTY); 3886 discard_slab(s, slab); 3887 stat(s, FREE_SLAB); 3888 } 3889 } 3890 3891 /* 3892 * Put all the cpu partial slabs to the node partial list. 3893 */ 3894 static void put_partials(struct kmem_cache *s) 3895 { 3896 struct slab *partial_slab; 3897 unsigned long flags; 3898 3899 local_lock_irqsave(&s->cpu_slab->lock, flags); 3900 partial_slab = this_cpu_read(s->cpu_slab->partial); 3901 this_cpu_write(s->cpu_slab->partial, NULL); 3902 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3903 3904 if (partial_slab) 3905 __put_partials(s, partial_slab); 3906 } 3907 3908 static void put_partials_cpu(struct kmem_cache *s, 3909 struct kmem_cache_cpu *c) 3910 { 3911 struct slab *partial_slab; 3912 3913 partial_slab = slub_percpu_partial(c); 3914 c->partial = NULL; 3915 3916 if (partial_slab) 3917 __put_partials(s, partial_slab); 3918 } 3919 3920 /* 3921 * Put a slab into a partial slab slot if available. 3922 * 3923 * If we did not find a slot then simply move all the partials to the 3924 * per node partial list. 3925 */ 3926 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) 3927 { 3928 struct slab *oldslab; 3929 struct slab *slab_to_put = NULL; 3930 unsigned long flags; 3931 int slabs = 0; 3932 3933 local_lock_cpu_slab(s, flags); 3934 3935 oldslab = this_cpu_read(s->cpu_slab->partial); 3936 3937 if (oldslab) { 3938 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { 3939 /* 3940 * Partial array is full. Move the existing set to the 3941 * per node partial list. Postpone the actual unfreezing 3942 * outside of the critical section. 3943 */ 3944 slab_to_put = oldslab; 3945 oldslab = NULL; 3946 } else { 3947 slabs = oldslab->slabs; 3948 } 3949 } 3950 3951 slabs++; 3952 3953 slab->slabs = slabs; 3954 slab->next = oldslab; 3955 3956 this_cpu_write(s->cpu_slab->partial, slab); 3957 3958 local_unlock_cpu_slab(s, flags); 3959 3960 if (slab_to_put) { 3961 __put_partials(s, slab_to_put); 3962 stat(s, CPU_PARTIAL_DRAIN); 3963 } 3964 } 3965 3966 #else /* CONFIG_SLUB_CPU_PARTIAL */ 3967 3968 static inline void put_partials(struct kmem_cache *s) { } 3969 static inline void put_partials_cpu(struct kmem_cache *s, 3970 struct kmem_cache_cpu *c) { } 3971 3972 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 3973 3974 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 3975 { 3976 unsigned long flags; 3977 struct slab *slab; 3978 void *freelist; 3979 3980 local_lock_irqsave(&s->cpu_slab->lock, flags); 3981 3982 slab = c->slab; 3983 freelist = c->freelist; 3984 3985 c->slab = NULL; 3986 c->freelist = NULL; 3987 c->tid = next_tid(c->tid); 3988 3989 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3990 3991 if (slab) { 3992 deactivate_slab(s, slab, freelist); 3993 stat(s, CPUSLAB_FLUSH); 3994 } 3995 } 3996 3997 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 3998 { 3999 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 4000 void *freelist = c->freelist; 4001 struct slab *slab = c->slab; 4002 4003 c->slab = NULL; 4004 c->freelist = NULL; 4005 c->tid = next_tid(c->tid); 4006 4007 if (slab) { 4008 deactivate_slab(s, slab, freelist); 4009 stat(s, CPUSLAB_FLUSH); 4010 } 4011 4012 put_partials_cpu(s, c); 4013 } 4014 4015 static inline void flush_this_cpu_slab(struct kmem_cache *s) 4016 { 4017 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); 4018 4019 if (c->slab) 4020 flush_slab(s, c); 4021 4022 put_partials(s); 4023 } 4024 4025 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 4026 { 4027 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 4028 4029 return c->slab || slub_percpu_partial(c); 4030 } 4031 4032 static bool has_pcs_used(int cpu, struct kmem_cache *s) 4033 { 4034 struct slub_percpu_sheaves *pcs; 4035 4036 if (!s->cpu_sheaves) 4037 return false; 4038 4039 pcs = per_cpu_ptr(s->cpu_sheaves, cpu); 4040 4041 return (pcs->spare || pcs->rcu_free || pcs->main->size); 4042 } 4043 4044 /* 4045 * Flush cpu slab. 4046 * 4047 * Called from CPU work handler with migration disabled. 4048 */ 4049 static void flush_cpu_slab(struct work_struct *w) 4050 { 4051 struct kmem_cache *s; 4052 struct slub_flush_work *sfw; 4053 4054 sfw = container_of(w, struct slub_flush_work, work); 4055 4056 s = sfw->s; 4057 4058 if (s->cpu_sheaves) 4059 pcs_flush_all(s); 4060 4061 flush_this_cpu_slab(s); 4062 } 4063 4064 static void flush_all_cpus_locked(struct kmem_cache *s) 4065 { 4066 struct slub_flush_work *sfw; 4067 unsigned int cpu; 4068 4069 lockdep_assert_cpus_held(); 4070 mutex_lock(&flush_lock); 4071 4072 for_each_online_cpu(cpu) { 4073 sfw = &per_cpu(slub_flush, cpu); 4074 if (!has_cpu_slab(cpu, s) && !has_pcs_used(cpu, s)) { 4075 sfw->skip = true; 4076 continue; 4077 } 4078 INIT_WORK(&sfw->work, flush_cpu_slab); 4079 sfw->skip = false; 4080 sfw->s = s; 4081 queue_work_on(cpu, flushwq, &sfw->work); 4082 } 4083 4084 for_each_online_cpu(cpu) { 4085 sfw = &per_cpu(slub_flush, cpu); 4086 if (sfw->skip) 4087 continue; 4088 flush_work(&sfw->work); 4089 } 4090 4091 mutex_unlock(&flush_lock); 4092 } 4093 4094 static void flush_all(struct kmem_cache *s) 4095 { 4096 cpus_read_lock(); 4097 flush_all_cpus_locked(s); 4098 cpus_read_unlock(); 4099 } 4100 4101 static void flush_rcu_sheaf(struct work_struct *w) 4102 { 4103 struct slub_percpu_sheaves *pcs; 4104 struct slab_sheaf *rcu_free; 4105 struct slub_flush_work *sfw; 4106 struct kmem_cache *s; 4107 4108 sfw = container_of(w, struct slub_flush_work, work); 4109 s = sfw->s; 4110 4111 local_lock(&s->cpu_sheaves->lock); 4112 pcs = this_cpu_ptr(s->cpu_sheaves); 4113 4114 rcu_free = pcs->rcu_free; 4115 pcs->rcu_free = NULL; 4116 4117 local_unlock(&s->cpu_sheaves->lock); 4118 4119 if (rcu_free) 4120 call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn); 4121 } 4122 4123 4124 /* needed for kvfree_rcu_barrier() */ 4125 void flush_all_rcu_sheaves(void) 4126 { 4127 struct slub_flush_work *sfw; 4128 struct kmem_cache *s; 4129 unsigned int cpu; 4130 4131 cpus_read_lock(); 4132 mutex_lock(&slab_mutex); 4133 4134 list_for_each_entry(s, &slab_caches, list) { 4135 if (!s->cpu_sheaves) 4136 continue; 4137 4138 mutex_lock(&flush_lock); 4139 4140 for_each_online_cpu(cpu) { 4141 sfw = &per_cpu(slub_flush, cpu); 4142 4143 /* 4144 * we don't check if rcu_free sheaf exists - racing 4145 * __kfree_rcu_sheaf() might have just removed it. 4146 * by executing flush_rcu_sheaf() on the cpu we make 4147 * sure the __kfree_rcu_sheaf() finished its call_rcu() 4148 */ 4149 4150 INIT_WORK(&sfw->work, flush_rcu_sheaf); 4151 sfw->s = s; 4152 queue_work_on(cpu, flushwq, &sfw->work); 4153 } 4154 4155 for_each_online_cpu(cpu) { 4156 sfw = &per_cpu(slub_flush, cpu); 4157 flush_work(&sfw->work); 4158 } 4159 4160 mutex_unlock(&flush_lock); 4161 } 4162 4163 mutex_unlock(&slab_mutex); 4164 cpus_read_unlock(); 4165 4166 rcu_barrier(); 4167 } 4168 4169 /* 4170 * Use the cpu notifier to insure that the cpu slabs are flushed when 4171 * necessary. 4172 */ 4173 static int slub_cpu_dead(unsigned int cpu) 4174 { 4175 struct kmem_cache *s; 4176 4177 mutex_lock(&slab_mutex); 4178 list_for_each_entry(s, &slab_caches, list) { 4179 __flush_cpu_slab(s, cpu); 4180 if (s->cpu_sheaves) 4181 __pcs_flush_all_cpu(s, cpu); 4182 } 4183 mutex_unlock(&slab_mutex); 4184 return 0; 4185 } 4186 4187 /* 4188 * Check if the objects in a per cpu structure fit numa 4189 * locality expectations. 4190 */ 4191 static inline int node_match(struct slab *slab, int node) 4192 { 4193 #ifdef CONFIG_NUMA 4194 if (node != NUMA_NO_NODE && slab_nid(slab) != node) 4195 return 0; 4196 #endif 4197 return 1; 4198 } 4199 4200 #ifdef CONFIG_SLUB_DEBUG 4201 static int count_free(struct slab *slab) 4202 { 4203 return slab->objects - slab->inuse; 4204 } 4205 4206 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 4207 { 4208 return atomic_long_read(&n->total_objects); 4209 } 4210 4211 /* Supports checking bulk free of a constructed freelist */ 4212 static inline bool free_debug_processing(struct kmem_cache *s, 4213 struct slab *slab, void *head, void *tail, int *bulk_cnt, 4214 unsigned long addr, depot_stack_handle_t handle) 4215 { 4216 bool checks_ok = false; 4217 void *object = head; 4218 int cnt = 0; 4219 4220 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 4221 if (!check_slab(s, slab)) 4222 goto out; 4223 } 4224 4225 if (slab->inuse < *bulk_cnt) { 4226 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", 4227 slab->inuse, *bulk_cnt); 4228 goto out; 4229 } 4230 4231 next_object: 4232 4233 if (++cnt > *bulk_cnt) 4234 goto out_cnt; 4235 4236 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 4237 if (!free_consistency_checks(s, slab, object, addr)) 4238 goto out; 4239 } 4240 4241 if (s->flags & SLAB_STORE_USER) 4242 set_track_update(s, object, TRACK_FREE, addr, handle); 4243 trace(s, slab, object, 0); 4244 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 4245 init_object(s, object, SLUB_RED_INACTIVE); 4246 4247 /* Reached end of constructed freelist yet? */ 4248 if (object != tail) { 4249 object = get_freepointer(s, object); 4250 goto next_object; 4251 } 4252 checks_ok = true; 4253 4254 out_cnt: 4255 if (cnt != *bulk_cnt) { 4256 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", 4257 *bulk_cnt, cnt); 4258 *bulk_cnt = cnt; 4259 } 4260 4261 out: 4262 4263 if (!checks_ok) 4264 slab_fix(s, "Object at 0x%p not freed", object); 4265 4266 return checks_ok; 4267 } 4268 #endif /* CONFIG_SLUB_DEBUG */ 4269 4270 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS) 4271 static unsigned long count_partial(struct kmem_cache_node *n, 4272 int (*get_count)(struct slab *)) 4273 { 4274 unsigned long flags; 4275 unsigned long x = 0; 4276 struct slab *slab; 4277 4278 spin_lock_irqsave(&n->list_lock, flags); 4279 list_for_each_entry(slab, &n->partial, slab_list) 4280 x += get_count(slab); 4281 spin_unlock_irqrestore(&n->list_lock, flags); 4282 return x; 4283 } 4284 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */ 4285 4286 #ifdef CONFIG_SLUB_DEBUG 4287 #define MAX_PARTIAL_TO_SCAN 10000 4288 4289 static unsigned long count_partial_free_approx(struct kmem_cache_node *n) 4290 { 4291 unsigned long flags; 4292 unsigned long x = 0; 4293 struct slab *slab; 4294 4295 spin_lock_irqsave(&n->list_lock, flags); 4296 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) { 4297 list_for_each_entry(slab, &n->partial, slab_list) 4298 x += slab->objects - slab->inuse; 4299 } else { 4300 /* 4301 * For a long list, approximate the total count of objects in 4302 * it to meet the limit on the number of slabs to scan. 4303 * Scan from both the list's head and tail for better accuracy. 4304 */ 4305 unsigned long scanned = 0; 4306 4307 list_for_each_entry(slab, &n->partial, slab_list) { 4308 x += slab->objects - slab->inuse; 4309 if (++scanned == MAX_PARTIAL_TO_SCAN / 2) 4310 break; 4311 } 4312 list_for_each_entry_reverse(slab, &n->partial, slab_list) { 4313 x += slab->objects - slab->inuse; 4314 if (++scanned == MAX_PARTIAL_TO_SCAN) 4315 break; 4316 } 4317 x = mult_frac(x, n->nr_partial, scanned); 4318 x = min(x, node_nr_objs(n)); 4319 } 4320 spin_unlock_irqrestore(&n->list_lock, flags); 4321 return x; 4322 } 4323 4324 static noinline void 4325 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 4326 { 4327 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 4328 DEFAULT_RATELIMIT_BURST); 4329 int cpu = raw_smp_processor_id(); 4330 int node; 4331 struct kmem_cache_node *n; 4332 4333 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 4334 return; 4335 4336 pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n", 4337 cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags); 4338 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 4339 s->name, s->object_size, s->size, oo_order(s->oo), 4340 oo_order(s->min)); 4341 4342 if (oo_order(s->min) > get_order(s->object_size)) 4343 pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n", 4344 s->name); 4345 4346 for_each_kmem_cache_node(s, node, n) { 4347 unsigned long nr_slabs; 4348 unsigned long nr_objs; 4349 unsigned long nr_free; 4350 4351 nr_free = count_partial_free_approx(n); 4352 nr_slabs = node_nr_slabs(n); 4353 nr_objs = node_nr_objs(n); 4354 4355 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 4356 node, nr_slabs, nr_objs, nr_free); 4357 } 4358 } 4359 #else /* CONFIG_SLUB_DEBUG */ 4360 static inline void 4361 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } 4362 #endif 4363 4364 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) 4365 { 4366 if (unlikely(slab_test_pfmemalloc(slab))) 4367 return gfp_pfmemalloc_allowed(gfpflags); 4368 4369 return true; 4370 } 4371 4372 static inline bool 4373 __update_cpu_freelist_fast(struct kmem_cache *s, 4374 void *freelist_old, void *freelist_new, 4375 unsigned long tid) 4376 { 4377 struct freelist_tid old = { .freelist = freelist_old, .tid = tid }; 4378 struct freelist_tid new = { .freelist = freelist_new, .tid = next_tid(tid) }; 4379 4380 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid, 4381 &old.freelist_tid, new.freelist_tid); 4382 } 4383 4384 /* 4385 * Check the slab->freelist and either transfer the freelist to the 4386 * per cpu freelist or deactivate the slab. 4387 * 4388 * The slab is still frozen if the return value is not NULL. 4389 * 4390 * If this function returns NULL then the slab has been unfrozen. 4391 */ 4392 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) 4393 { 4394 struct freelist_counters old, new; 4395 4396 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 4397 4398 do { 4399 old.freelist = slab->freelist; 4400 old.counters = slab->counters; 4401 4402 new.freelist = NULL; 4403 new.counters = old.counters; 4404 4405 new.inuse = old.objects; 4406 new.frozen = old.freelist != NULL; 4407 4408 4409 } while (!__slab_update_freelist(s, slab, &old, &new, "get_freelist")); 4410 4411 return old.freelist; 4412 } 4413 4414 /* 4415 * Freeze the partial slab and return the pointer to the freelist. 4416 */ 4417 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab) 4418 { 4419 struct freelist_counters old, new; 4420 4421 do { 4422 old.freelist = slab->freelist; 4423 old.counters = slab->counters; 4424 4425 new.freelist = NULL; 4426 new.counters = old.counters; 4427 VM_BUG_ON(new.frozen); 4428 4429 new.inuse = old.objects; 4430 new.frozen = 1; 4431 4432 } while (!slab_update_freelist(s, slab, &old, &new, "freeze_slab")); 4433 4434 return old.freelist; 4435 } 4436 4437 /* 4438 * Slow path. The lockless freelist is empty or we need to perform 4439 * debugging duties. 4440 * 4441 * Processing is still very fast if new objects have been freed to the 4442 * regular freelist. In that case we simply take over the regular freelist 4443 * as the lockless freelist and zap the regular freelist. 4444 * 4445 * If that is not working then we fall back to the partial lists. We take the 4446 * first element of the freelist as the object to allocate now and move the 4447 * rest of the freelist to the lockless freelist. 4448 * 4449 * And if we were unable to get a new slab from the partial slab lists then 4450 * we need to allocate a new slab. This is the slowest path since it involves 4451 * a call to the page allocator and the setup of a new slab. 4452 * 4453 * Version of __slab_alloc to use when we know that preemption is 4454 * already disabled (which is the case for bulk allocation). 4455 */ 4456 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 4457 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 4458 { 4459 bool allow_spin = gfpflags_allow_spinning(gfpflags); 4460 void *freelist; 4461 struct slab *slab; 4462 unsigned long flags; 4463 struct partial_context pc; 4464 bool try_thisnode = true; 4465 4466 stat(s, ALLOC_SLOWPATH); 4467 4468 reread_slab: 4469 4470 slab = READ_ONCE(c->slab); 4471 if (!slab) { 4472 /* 4473 * if the node is not online or has no normal memory, just 4474 * ignore the node constraint 4475 */ 4476 if (unlikely(node != NUMA_NO_NODE && 4477 !node_isset(node, slab_nodes))) 4478 node = NUMA_NO_NODE; 4479 goto new_slab; 4480 } 4481 4482 if (unlikely(!node_match(slab, node))) { 4483 /* 4484 * same as above but node_match() being false already 4485 * implies node != NUMA_NO_NODE. 4486 * 4487 * We don't strictly honor pfmemalloc and NUMA preferences 4488 * when !allow_spin because: 4489 * 4490 * 1. Most kmalloc() users allocate objects on the local node, 4491 * so kmalloc_nolock() tries not to interfere with them by 4492 * deactivating the cpu slab. 4493 * 4494 * 2. Deactivating due to NUMA or pfmemalloc mismatch may cause 4495 * unnecessary slab allocations even when n->partial list 4496 * is not empty. 4497 */ 4498 if (!node_isset(node, slab_nodes) || 4499 !allow_spin) { 4500 node = NUMA_NO_NODE; 4501 } else { 4502 stat(s, ALLOC_NODE_MISMATCH); 4503 goto deactivate_slab; 4504 } 4505 } 4506 4507 /* 4508 * By rights, we should be searching for a slab page that was 4509 * PFMEMALLOC but right now, we are losing the pfmemalloc 4510 * information when the page leaves the per-cpu allocator 4511 */ 4512 if (unlikely(!pfmemalloc_match(slab, gfpflags) && allow_spin)) 4513 goto deactivate_slab; 4514 4515 /* must check again c->slab in case we got preempted and it changed */ 4516 local_lock_cpu_slab(s, flags); 4517 4518 if (unlikely(slab != c->slab)) { 4519 local_unlock_cpu_slab(s, flags); 4520 goto reread_slab; 4521 } 4522 freelist = c->freelist; 4523 if (freelist) 4524 goto load_freelist; 4525 4526 freelist = get_freelist(s, slab); 4527 4528 if (!freelist) { 4529 c->slab = NULL; 4530 c->tid = next_tid(c->tid); 4531 local_unlock_cpu_slab(s, flags); 4532 stat(s, DEACTIVATE_BYPASS); 4533 goto new_slab; 4534 } 4535 4536 stat(s, ALLOC_REFILL); 4537 4538 load_freelist: 4539 4540 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 4541 4542 /* 4543 * freelist is pointing to the list of objects to be used. 4544 * slab is pointing to the slab from which the objects are obtained. 4545 * That slab must be frozen for per cpu allocations to work. 4546 */ 4547 VM_BUG_ON(!c->slab->frozen); 4548 c->freelist = get_freepointer(s, freelist); 4549 c->tid = next_tid(c->tid); 4550 local_unlock_cpu_slab(s, flags); 4551 return freelist; 4552 4553 deactivate_slab: 4554 4555 local_lock_cpu_slab(s, flags); 4556 if (slab != c->slab) { 4557 local_unlock_cpu_slab(s, flags); 4558 goto reread_slab; 4559 } 4560 freelist = c->freelist; 4561 c->slab = NULL; 4562 c->freelist = NULL; 4563 c->tid = next_tid(c->tid); 4564 local_unlock_cpu_slab(s, flags); 4565 deactivate_slab(s, slab, freelist); 4566 4567 new_slab: 4568 4569 #ifdef CONFIG_SLUB_CPU_PARTIAL 4570 while (slub_percpu_partial(c)) { 4571 local_lock_cpu_slab(s, flags); 4572 if (unlikely(c->slab)) { 4573 local_unlock_cpu_slab(s, flags); 4574 goto reread_slab; 4575 } 4576 if (unlikely(!slub_percpu_partial(c))) { 4577 local_unlock_cpu_slab(s, flags); 4578 /* we were preempted and partial list got empty */ 4579 goto new_objects; 4580 } 4581 4582 slab = slub_percpu_partial(c); 4583 slub_set_percpu_partial(c, slab); 4584 4585 if (likely(node_match(slab, node) && 4586 pfmemalloc_match(slab, gfpflags)) || 4587 !allow_spin) { 4588 c->slab = slab; 4589 freelist = get_freelist(s, slab); 4590 VM_BUG_ON(!freelist); 4591 stat(s, CPU_PARTIAL_ALLOC); 4592 goto load_freelist; 4593 } 4594 4595 local_unlock_cpu_slab(s, flags); 4596 4597 slab->next = NULL; 4598 __put_partials(s, slab); 4599 } 4600 #endif 4601 4602 new_objects: 4603 4604 pc.flags = gfpflags; 4605 /* 4606 * When a preferred node is indicated but no __GFP_THISNODE 4607 * 4608 * 1) try to get a partial slab from target node only by having 4609 * __GFP_THISNODE in pc.flags for get_partial() 4610 * 2) if 1) failed, try to allocate a new slab from target node with 4611 * GPF_NOWAIT | __GFP_THISNODE opportunistically 4612 * 3) if 2) failed, retry with original gfpflags which will allow 4613 * get_partial() try partial lists of other nodes before potentially 4614 * allocating new page from other nodes 4615 */ 4616 if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 4617 && try_thisnode)) { 4618 if (unlikely(!allow_spin)) 4619 /* Do not upgrade gfp to NOWAIT from more restrictive mode */ 4620 pc.flags = gfpflags | __GFP_THISNODE; 4621 else 4622 pc.flags = GFP_NOWAIT | __GFP_THISNODE; 4623 } 4624 4625 pc.orig_size = orig_size; 4626 slab = get_partial(s, node, &pc); 4627 if (slab) { 4628 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 4629 freelist = pc.object; 4630 /* 4631 * For debug caches here we had to go through 4632 * alloc_single_from_partial() so just store the 4633 * tracking info and return the object. 4634 * 4635 * Due to disabled preemption we need to disallow 4636 * blocking. The flags are further adjusted by 4637 * gfp_nested_mask() in stack_depot itself. 4638 */ 4639 if (s->flags & SLAB_STORE_USER) 4640 set_track(s, freelist, TRACK_ALLOC, addr, 4641 gfpflags & ~(__GFP_DIRECT_RECLAIM)); 4642 4643 return freelist; 4644 } 4645 4646 freelist = freeze_slab(s, slab); 4647 goto retry_load_slab; 4648 } 4649 4650 slub_put_cpu_ptr(s->cpu_slab); 4651 slab = new_slab(s, pc.flags, node); 4652 c = slub_get_cpu_ptr(s->cpu_slab); 4653 4654 if (unlikely(!slab)) { 4655 if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 4656 && try_thisnode) { 4657 try_thisnode = false; 4658 goto new_objects; 4659 } 4660 slab_out_of_memory(s, gfpflags, node); 4661 return NULL; 4662 } 4663 4664 stat(s, ALLOC_SLAB); 4665 4666 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 4667 freelist = alloc_single_from_new_slab(s, slab, orig_size, gfpflags); 4668 4669 if (unlikely(!freelist)) { 4670 /* This could cause an endless loop. Fail instead. */ 4671 if (!allow_spin) 4672 return NULL; 4673 goto new_objects; 4674 } 4675 4676 if (s->flags & SLAB_STORE_USER) 4677 set_track(s, freelist, TRACK_ALLOC, addr, 4678 gfpflags & ~(__GFP_DIRECT_RECLAIM)); 4679 4680 return freelist; 4681 } 4682 4683 /* 4684 * No other reference to the slab yet so we can 4685 * muck around with it freely without cmpxchg 4686 */ 4687 freelist = slab->freelist; 4688 slab->freelist = NULL; 4689 slab->inuse = slab->objects; 4690 slab->frozen = 1; 4691 4692 inc_slabs_node(s, slab_nid(slab), slab->objects); 4693 4694 if (unlikely(!pfmemalloc_match(slab, gfpflags) && allow_spin)) { 4695 /* 4696 * For !pfmemalloc_match() case we don't load freelist so that 4697 * we don't make further mismatched allocations easier. 4698 */ 4699 deactivate_slab(s, slab, get_freepointer(s, freelist)); 4700 return freelist; 4701 } 4702 4703 retry_load_slab: 4704 4705 local_lock_cpu_slab(s, flags); 4706 if (unlikely(c->slab)) { 4707 void *flush_freelist = c->freelist; 4708 struct slab *flush_slab = c->slab; 4709 4710 c->slab = NULL; 4711 c->freelist = NULL; 4712 c->tid = next_tid(c->tid); 4713 4714 local_unlock_cpu_slab(s, flags); 4715 4716 if (unlikely(!allow_spin)) { 4717 /* Reentrant slub cannot take locks, defer */ 4718 defer_deactivate_slab(flush_slab, flush_freelist); 4719 } else { 4720 deactivate_slab(s, flush_slab, flush_freelist); 4721 } 4722 4723 stat(s, CPUSLAB_FLUSH); 4724 4725 goto retry_load_slab; 4726 } 4727 c->slab = slab; 4728 4729 goto load_freelist; 4730 } 4731 /* 4732 * We disallow kprobes in ___slab_alloc() to prevent reentrance 4733 * 4734 * kmalloc() -> ___slab_alloc() -> local_lock_cpu_slab() protected part of 4735 * ___slab_alloc() manipulating c->freelist -> kprobe -> bpf -> 4736 * kmalloc_nolock() or kfree_nolock() -> __update_cpu_freelist_fast() 4737 * manipulating c->freelist without lock. 4738 * 4739 * This does not prevent kprobe in functions called from ___slab_alloc() such as 4740 * local_lock_irqsave() itself, and that is fine, we only need to protect the 4741 * c->freelist manipulation in ___slab_alloc() itself. 4742 */ 4743 NOKPROBE_SYMBOL(___slab_alloc); 4744 4745 /* 4746 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 4747 * disabled. Compensates for possible cpu changes by refetching the per cpu area 4748 * pointer. 4749 */ 4750 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 4751 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 4752 { 4753 void *p; 4754 4755 #ifdef CONFIG_PREEMPT_COUNT 4756 /* 4757 * We may have been preempted and rescheduled on a different 4758 * cpu before disabling preemption. Need to reload cpu area 4759 * pointer. 4760 */ 4761 c = slub_get_cpu_ptr(s->cpu_slab); 4762 #endif 4763 if (unlikely(!gfpflags_allow_spinning(gfpflags))) { 4764 if (local_lock_is_locked(&s->cpu_slab->lock)) { 4765 /* 4766 * EBUSY is an internal signal to kmalloc_nolock() to 4767 * retry a different bucket. It's not propagated 4768 * to the caller. 4769 */ 4770 p = ERR_PTR(-EBUSY); 4771 goto out; 4772 } 4773 } 4774 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); 4775 out: 4776 #ifdef CONFIG_PREEMPT_COUNT 4777 slub_put_cpu_ptr(s->cpu_slab); 4778 #endif 4779 return p; 4780 } 4781 4782 static __always_inline void *__slab_alloc_node(struct kmem_cache *s, 4783 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 4784 { 4785 struct kmem_cache_cpu *c; 4786 struct slab *slab; 4787 unsigned long tid; 4788 void *object; 4789 4790 redo: 4791 /* 4792 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 4793 * enabled. We may switch back and forth between cpus while 4794 * reading from one cpu area. That does not matter as long 4795 * as we end up on the original cpu again when doing the cmpxchg. 4796 * 4797 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 4798 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 4799 * the tid. If we are preempted and switched to another cpu between the 4800 * two reads, it's OK as the two are still associated with the same cpu 4801 * and cmpxchg later will validate the cpu. 4802 */ 4803 c = raw_cpu_ptr(s->cpu_slab); 4804 tid = READ_ONCE(c->tid); 4805 4806 /* 4807 * Irqless object alloc/free algorithm used here depends on sequence 4808 * of fetching cpu_slab's data. tid should be fetched before anything 4809 * on c to guarantee that object and slab associated with previous tid 4810 * won't be used with current tid. If we fetch tid first, object and 4811 * slab could be one associated with next tid and our alloc/free 4812 * request will be failed. In this case, we will retry. So, no problem. 4813 */ 4814 barrier(); 4815 4816 /* 4817 * The transaction ids are globally unique per cpu and per operation on 4818 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 4819 * occurs on the right processor and that there was no operation on the 4820 * linked list in between. 4821 */ 4822 4823 object = c->freelist; 4824 slab = c->slab; 4825 4826 #ifdef CONFIG_NUMA 4827 if (static_branch_unlikely(&strict_numa) && 4828 node == NUMA_NO_NODE) { 4829 4830 struct mempolicy *mpol = current->mempolicy; 4831 4832 if (mpol) { 4833 /* 4834 * Special BIND rule support. If existing slab 4835 * is in permitted set then do not redirect 4836 * to a particular node. 4837 * Otherwise we apply the memory policy to get 4838 * the node we need to allocate on. 4839 */ 4840 if (mpol->mode != MPOL_BIND || !slab || 4841 !node_isset(slab_nid(slab), mpol->nodes)) 4842 4843 node = mempolicy_slab_node(); 4844 } 4845 } 4846 #endif 4847 4848 if (!USE_LOCKLESS_FAST_PATH() || 4849 unlikely(!object || !slab || !node_match(slab, node))) { 4850 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); 4851 } else { 4852 void *next_object = get_freepointer_safe(s, object); 4853 4854 /* 4855 * The cmpxchg will only match if there was no additional 4856 * operation and if we are on the right processor. 4857 * 4858 * The cmpxchg does the following atomically (without lock 4859 * semantics!) 4860 * 1. Relocate first pointer to the current per cpu area. 4861 * 2. Verify that tid and freelist have not been changed 4862 * 3. If they were not changed replace tid and freelist 4863 * 4864 * Since this is without lock semantics the protection is only 4865 * against code executing on this cpu *not* from access by 4866 * other cpus. 4867 */ 4868 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { 4869 note_cmpxchg_failure("slab_alloc", s, tid); 4870 goto redo; 4871 } 4872 prefetch_freepointer(s, next_object); 4873 stat(s, ALLOC_FASTPATH); 4874 } 4875 4876 return object; 4877 } 4878 4879 /* 4880 * If the object has been wiped upon free, make sure it's fully initialized by 4881 * zeroing out freelist pointer. 4882 * 4883 * Note that we also wipe custom freelist pointers. 4884 */ 4885 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 4886 void *obj) 4887 { 4888 if (unlikely(slab_want_init_on_free(s)) && obj && 4889 !freeptr_outside_object(s)) 4890 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 4891 0, sizeof(void *)); 4892 } 4893 4894 static __fastpath_inline 4895 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 4896 { 4897 flags &= gfp_allowed_mask; 4898 4899 might_alloc(flags); 4900 4901 if (unlikely(should_failslab(s, flags))) 4902 return NULL; 4903 4904 return s; 4905 } 4906 4907 static __fastpath_inline 4908 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 4909 gfp_t flags, size_t size, void **p, bool init, 4910 unsigned int orig_size) 4911 { 4912 unsigned int zero_size = s->object_size; 4913 bool kasan_init = init; 4914 size_t i; 4915 gfp_t init_flags = flags & gfp_allowed_mask; 4916 4917 /* 4918 * For kmalloc object, the allocated memory size(object_size) is likely 4919 * larger than the requested size(orig_size). If redzone check is 4920 * enabled for the extra space, don't zero it, as it will be redzoned 4921 * soon. The redzone operation for this extra space could be seen as a 4922 * replacement of current poisoning under certain debug option, and 4923 * won't break other sanity checks. 4924 */ 4925 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 4926 (s->flags & SLAB_KMALLOC)) 4927 zero_size = orig_size; 4928 4929 /* 4930 * When slab_debug is enabled, avoid memory initialization integrated 4931 * into KASAN and instead zero out the memory via the memset below with 4932 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and 4933 * cause false-positive reports. This does not lead to a performance 4934 * penalty on production builds, as slab_debug is not intended to be 4935 * enabled there. 4936 */ 4937 if (__slub_debug_enabled()) 4938 kasan_init = false; 4939 4940 /* 4941 * As memory initialization might be integrated into KASAN, 4942 * kasan_slab_alloc and initialization memset must be 4943 * kept together to avoid discrepancies in behavior. 4944 * 4945 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 4946 */ 4947 for (i = 0; i < size; i++) { 4948 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init); 4949 if (p[i] && init && (!kasan_init || 4950 !kasan_has_integrated_init())) 4951 memset(p[i], 0, zero_size); 4952 if (gfpflags_allow_spinning(flags)) 4953 kmemleak_alloc_recursive(p[i], s->object_size, 1, 4954 s->flags, init_flags); 4955 kmsan_slab_alloc(s, p[i], init_flags); 4956 alloc_tagging_slab_alloc_hook(s, p[i], flags); 4957 } 4958 4959 return memcg_slab_post_alloc_hook(s, lru, flags, size, p); 4960 } 4961 4962 /* 4963 * Replace the empty main sheaf with a (at least partially) full sheaf. 4964 * 4965 * Must be called with the cpu_sheaves local lock locked. If successful, returns 4966 * the pcs pointer and the local lock locked (possibly on a different cpu than 4967 * initially called). If not successful, returns NULL and the local lock 4968 * unlocked. 4969 */ 4970 static struct slub_percpu_sheaves * 4971 __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs, gfp_t gfp) 4972 { 4973 struct slab_sheaf *empty = NULL; 4974 struct slab_sheaf *full; 4975 struct node_barn *barn; 4976 bool can_alloc; 4977 4978 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); 4979 4980 if (pcs->spare && pcs->spare->size > 0) { 4981 swap(pcs->main, pcs->spare); 4982 return pcs; 4983 } 4984 4985 barn = get_barn(s); 4986 if (!barn) { 4987 local_unlock(&s->cpu_sheaves->lock); 4988 return NULL; 4989 } 4990 4991 full = barn_replace_empty_sheaf(barn, pcs->main); 4992 4993 if (full) { 4994 stat(s, BARN_GET); 4995 pcs->main = full; 4996 return pcs; 4997 } 4998 4999 stat(s, BARN_GET_FAIL); 5000 5001 can_alloc = gfpflags_allow_blocking(gfp); 5002 5003 if (can_alloc) { 5004 if (pcs->spare) { 5005 empty = pcs->spare; 5006 pcs->spare = NULL; 5007 } else { 5008 empty = barn_get_empty_sheaf(barn); 5009 } 5010 } 5011 5012 local_unlock(&s->cpu_sheaves->lock); 5013 5014 if (!can_alloc) 5015 return NULL; 5016 5017 if (empty) { 5018 if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) { 5019 full = empty; 5020 } else { 5021 /* 5022 * we must be very low on memory so don't bother 5023 * with the barn 5024 */ 5025 free_empty_sheaf(s, empty); 5026 } 5027 } else { 5028 full = alloc_full_sheaf(s, gfp); 5029 } 5030 5031 if (!full) 5032 return NULL; 5033 5034 /* 5035 * we can reach here only when gfpflags_allow_blocking 5036 * so this must not be an irq 5037 */ 5038 local_lock(&s->cpu_sheaves->lock); 5039 pcs = this_cpu_ptr(s->cpu_sheaves); 5040 5041 /* 5042 * If we are returning empty sheaf, we either got it from the 5043 * barn or had to allocate one. If we are returning a full 5044 * sheaf, it's due to racing or being migrated to a different 5045 * cpu. Breaching the barn's sheaf limits should be thus rare 5046 * enough so just ignore them to simplify the recovery. 5047 */ 5048 5049 if (pcs->main->size == 0) { 5050 barn_put_empty_sheaf(barn, pcs->main); 5051 pcs->main = full; 5052 return pcs; 5053 } 5054 5055 if (!pcs->spare) { 5056 pcs->spare = full; 5057 return pcs; 5058 } 5059 5060 if (pcs->spare->size == 0) { 5061 barn_put_empty_sheaf(barn, pcs->spare); 5062 pcs->spare = full; 5063 return pcs; 5064 } 5065 5066 barn_put_full_sheaf(barn, full); 5067 stat(s, BARN_PUT); 5068 5069 return pcs; 5070 } 5071 5072 static __fastpath_inline 5073 void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node) 5074 { 5075 struct slub_percpu_sheaves *pcs; 5076 bool node_requested; 5077 void *object; 5078 5079 #ifdef CONFIG_NUMA 5080 if (static_branch_unlikely(&strict_numa) && 5081 node == NUMA_NO_NODE) { 5082 5083 struct mempolicy *mpol = current->mempolicy; 5084 5085 if (mpol) { 5086 /* 5087 * Special BIND rule support. If the local node 5088 * is in permitted set then do not redirect 5089 * to a particular node. 5090 * Otherwise we apply the memory policy to get 5091 * the node we need to allocate on. 5092 */ 5093 if (mpol->mode != MPOL_BIND || 5094 !node_isset(numa_mem_id(), mpol->nodes)) 5095 5096 node = mempolicy_slab_node(); 5097 } 5098 } 5099 #endif 5100 5101 node_requested = IS_ENABLED(CONFIG_NUMA) && node != NUMA_NO_NODE; 5102 5103 /* 5104 * We assume the percpu sheaves contain only local objects although it's 5105 * not completely guaranteed, so we verify later. 5106 */ 5107 if (unlikely(node_requested && node != numa_mem_id())) 5108 return NULL; 5109 5110 if (!local_trylock(&s->cpu_sheaves->lock)) 5111 return NULL; 5112 5113 pcs = this_cpu_ptr(s->cpu_sheaves); 5114 5115 if (unlikely(pcs->main->size == 0)) { 5116 pcs = __pcs_replace_empty_main(s, pcs, gfp); 5117 if (unlikely(!pcs)) 5118 return NULL; 5119 } 5120 5121 object = pcs->main->objects[pcs->main->size - 1]; 5122 5123 if (unlikely(node_requested)) { 5124 /* 5125 * Verify that the object was from the node we want. This could 5126 * be false because of cpu migration during an unlocked part of 5127 * the current allocation or previous freeing process. 5128 */ 5129 if (page_to_nid(virt_to_page(object)) != node) { 5130 local_unlock(&s->cpu_sheaves->lock); 5131 return NULL; 5132 } 5133 } 5134 5135 pcs->main->size--; 5136 5137 local_unlock(&s->cpu_sheaves->lock); 5138 5139 stat(s, ALLOC_PCS); 5140 5141 return object; 5142 } 5143 5144 static __fastpath_inline 5145 unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, size_t size, void **p) 5146 { 5147 struct slub_percpu_sheaves *pcs; 5148 struct slab_sheaf *main; 5149 unsigned int allocated = 0; 5150 unsigned int batch; 5151 5152 next_batch: 5153 if (!local_trylock(&s->cpu_sheaves->lock)) 5154 return allocated; 5155 5156 pcs = this_cpu_ptr(s->cpu_sheaves); 5157 5158 if (unlikely(pcs->main->size == 0)) { 5159 5160 struct slab_sheaf *full; 5161 struct node_barn *barn; 5162 5163 if (pcs->spare && pcs->spare->size > 0) { 5164 swap(pcs->main, pcs->spare); 5165 goto do_alloc; 5166 } 5167 5168 barn = get_barn(s); 5169 if (!barn) { 5170 local_unlock(&s->cpu_sheaves->lock); 5171 return allocated; 5172 } 5173 5174 full = barn_replace_empty_sheaf(barn, pcs->main); 5175 5176 if (full) { 5177 stat(s, BARN_GET); 5178 pcs->main = full; 5179 goto do_alloc; 5180 } 5181 5182 stat(s, BARN_GET_FAIL); 5183 5184 local_unlock(&s->cpu_sheaves->lock); 5185 5186 /* 5187 * Once full sheaves in barn are depleted, let the bulk 5188 * allocation continue from slab pages, otherwise we would just 5189 * be copying arrays of pointers twice. 5190 */ 5191 return allocated; 5192 } 5193 5194 do_alloc: 5195 5196 main = pcs->main; 5197 batch = min(size, main->size); 5198 5199 main->size -= batch; 5200 memcpy(p, main->objects + main->size, batch * sizeof(void *)); 5201 5202 local_unlock(&s->cpu_sheaves->lock); 5203 5204 stat_add(s, ALLOC_PCS, batch); 5205 5206 allocated += batch; 5207 5208 if (batch < size) { 5209 p += batch; 5210 size -= batch; 5211 goto next_batch; 5212 } 5213 5214 return allocated; 5215 } 5216 5217 5218 /* 5219 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 5220 * have the fastpath folded into their functions. So no function call 5221 * overhead for requests that can be satisfied on the fastpath. 5222 * 5223 * The fastpath works by first checking if the lockless freelist can be used. 5224 * If not then __slab_alloc is called for slow processing. 5225 * 5226 * Otherwise we can simply pick the next object from the lockless free list. 5227 */ 5228 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, 5229 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 5230 { 5231 void *object; 5232 bool init = false; 5233 5234 s = slab_pre_alloc_hook(s, gfpflags); 5235 if (unlikely(!s)) 5236 return NULL; 5237 5238 object = kfence_alloc(s, orig_size, gfpflags); 5239 if (unlikely(object)) 5240 goto out; 5241 5242 if (s->cpu_sheaves) 5243 object = alloc_from_pcs(s, gfpflags, node); 5244 5245 if (!object) 5246 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); 5247 5248 maybe_wipe_obj_freeptr(s, object); 5249 init = slab_want_init_on_alloc(gfpflags, s); 5250 5251 out: 5252 /* 5253 * When init equals 'true', like for kzalloc() family, only 5254 * @orig_size bytes might be zeroed instead of s->object_size 5255 * In case this fails due to memcg_slab_post_alloc_hook(), 5256 * object is set to NULL 5257 */ 5258 slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size); 5259 5260 return object; 5261 } 5262 5263 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags) 5264 { 5265 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, 5266 s->object_size); 5267 5268 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 5269 5270 return ret; 5271 } 5272 EXPORT_SYMBOL(kmem_cache_alloc_noprof); 5273 5274 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, 5275 gfp_t gfpflags) 5276 { 5277 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, 5278 s->object_size); 5279 5280 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 5281 5282 return ret; 5283 } 5284 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof); 5285 5286 bool kmem_cache_charge(void *objp, gfp_t gfpflags) 5287 { 5288 if (!memcg_kmem_online()) 5289 return true; 5290 5291 return memcg_slab_post_charge(objp, gfpflags); 5292 } 5293 EXPORT_SYMBOL(kmem_cache_charge); 5294 5295 /** 5296 * kmem_cache_alloc_node - Allocate an object on the specified node 5297 * @s: The cache to allocate from. 5298 * @gfpflags: See kmalloc(). 5299 * @node: node number of the target node. 5300 * 5301 * Identical to kmem_cache_alloc but it will allocate memory on the given 5302 * node, which can improve the performance for cpu bound structures. 5303 * 5304 * Fallback to other node is possible if __GFP_THISNODE is not set. 5305 * 5306 * Return: pointer to the new object or %NULL in case of error 5307 */ 5308 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node) 5309 { 5310 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); 5311 5312 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); 5313 5314 return ret; 5315 } 5316 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof); 5317 5318 static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s, 5319 struct slab_sheaf *sheaf, gfp_t gfp) 5320 { 5321 int ret = 0; 5322 5323 ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC); 5324 5325 if (likely(!ret || !gfp_pfmemalloc_allowed(gfp))) 5326 return ret; 5327 5328 /* 5329 * if we are allowed to, refill sheaf with pfmemalloc but then remember 5330 * it for when it's returned 5331 */ 5332 ret = refill_sheaf(s, sheaf, gfp); 5333 sheaf->pfmemalloc = true; 5334 5335 return ret; 5336 } 5337 5338 /* 5339 * returns a sheaf that has at least the requested size 5340 * when prefilling is needed, do so with given gfp flags 5341 * 5342 * return NULL if sheaf allocation or prefilling failed 5343 */ 5344 struct slab_sheaf * 5345 kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size) 5346 { 5347 struct slub_percpu_sheaves *pcs; 5348 struct slab_sheaf *sheaf = NULL; 5349 struct node_barn *barn; 5350 5351 if (unlikely(size > s->sheaf_capacity)) { 5352 5353 /* 5354 * slab_debug disables cpu sheaves intentionally so all 5355 * prefilled sheaves become "oversize" and we give up on 5356 * performance for the debugging. Same with SLUB_TINY. 5357 * Creating a cache without sheaves and then requesting a 5358 * prefilled sheaf is however not expected, so warn. 5359 */ 5360 WARN_ON_ONCE(s->sheaf_capacity == 0 && 5361 !IS_ENABLED(CONFIG_SLUB_TINY) && 5362 !(s->flags & SLAB_DEBUG_FLAGS)); 5363 5364 sheaf = kzalloc(struct_size(sheaf, objects, size), gfp); 5365 if (!sheaf) 5366 return NULL; 5367 5368 stat(s, SHEAF_PREFILL_OVERSIZE); 5369 sheaf->cache = s; 5370 sheaf->capacity = size; 5371 5372 /* 5373 * we do not need to care about pfmemalloc here because oversize 5374 * sheaves area always flushed and freed when returned 5375 */ 5376 if (!__kmem_cache_alloc_bulk(s, gfp, size, 5377 &sheaf->objects[0])) { 5378 kfree(sheaf); 5379 return NULL; 5380 } 5381 5382 sheaf->size = size; 5383 5384 return sheaf; 5385 } 5386 5387 local_lock(&s->cpu_sheaves->lock); 5388 pcs = this_cpu_ptr(s->cpu_sheaves); 5389 5390 if (pcs->spare) { 5391 sheaf = pcs->spare; 5392 pcs->spare = NULL; 5393 stat(s, SHEAF_PREFILL_FAST); 5394 } else { 5395 barn = get_barn(s); 5396 5397 stat(s, SHEAF_PREFILL_SLOW); 5398 if (barn) 5399 sheaf = barn_get_full_or_empty_sheaf(barn); 5400 if (sheaf && sheaf->size) 5401 stat(s, BARN_GET); 5402 else 5403 stat(s, BARN_GET_FAIL); 5404 } 5405 5406 local_unlock(&s->cpu_sheaves->lock); 5407 5408 5409 if (!sheaf) 5410 sheaf = alloc_empty_sheaf(s, gfp); 5411 5412 if (sheaf) { 5413 sheaf->capacity = s->sheaf_capacity; 5414 sheaf->pfmemalloc = false; 5415 5416 if (sheaf->size < size && 5417 __prefill_sheaf_pfmemalloc(s, sheaf, gfp)) { 5418 sheaf_flush_unused(s, sheaf); 5419 free_empty_sheaf(s, sheaf); 5420 sheaf = NULL; 5421 } 5422 } 5423 5424 return sheaf; 5425 } 5426 5427 /* 5428 * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf() 5429 * 5430 * If the sheaf cannot simply become the percpu spare sheaf, but there's space 5431 * for a full sheaf in the barn, we try to refill the sheaf back to the cache's 5432 * sheaf_capacity to avoid handling partially full sheaves. 5433 * 5434 * If the refill fails because gfp is e.g. GFP_NOWAIT, or the barn is full, the 5435 * sheaf is instead flushed and freed. 5436 */ 5437 void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, 5438 struct slab_sheaf *sheaf) 5439 { 5440 struct slub_percpu_sheaves *pcs; 5441 struct node_barn *barn; 5442 5443 if (unlikely((sheaf->capacity != s->sheaf_capacity) 5444 || sheaf->pfmemalloc)) { 5445 sheaf_flush_unused(s, sheaf); 5446 kfree(sheaf); 5447 return; 5448 } 5449 5450 local_lock(&s->cpu_sheaves->lock); 5451 pcs = this_cpu_ptr(s->cpu_sheaves); 5452 barn = get_barn(s); 5453 5454 if (!pcs->spare) { 5455 pcs->spare = sheaf; 5456 sheaf = NULL; 5457 stat(s, SHEAF_RETURN_FAST); 5458 } 5459 5460 local_unlock(&s->cpu_sheaves->lock); 5461 5462 if (!sheaf) 5463 return; 5464 5465 stat(s, SHEAF_RETURN_SLOW); 5466 5467 /* 5468 * If the barn has too many full sheaves or we fail to refill the sheaf, 5469 * simply flush and free it. 5470 */ 5471 if (!barn || data_race(barn->nr_full) >= MAX_FULL_SHEAVES || 5472 refill_sheaf(s, sheaf, gfp)) { 5473 sheaf_flush_unused(s, sheaf); 5474 free_empty_sheaf(s, sheaf); 5475 return; 5476 } 5477 5478 barn_put_full_sheaf(barn, sheaf); 5479 stat(s, BARN_PUT); 5480 } 5481 5482 /* 5483 * refill a sheaf previously returned by kmem_cache_prefill_sheaf to at least 5484 * the given size 5485 * 5486 * the sheaf might be replaced by a new one when requesting more than 5487 * s->sheaf_capacity objects if such replacement is necessary, but the refill 5488 * fails (returning -ENOMEM), the existing sheaf is left intact 5489 * 5490 * In practice we always refill to full sheaf's capacity. 5491 */ 5492 int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, 5493 struct slab_sheaf **sheafp, unsigned int size) 5494 { 5495 struct slab_sheaf *sheaf; 5496 5497 /* 5498 * TODO: do we want to support *sheaf == NULL to be equivalent of 5499 * kmem_cache_prefill_sheaf() ? 5500 */ 5501 if (!sheafp || !(*sheafp)) 5502 return -EINVAL; 5503 5504 sheaf = *sheafp; 5505 if (sheaf->size >= size) 5506 return 0; 5507 5508 if (likely(sheaf->capacity >= size)) { 5509 if (likely(sheaf->capacity == s->sheaf_capacity)) 5510 return __prefill_sheaf_pfmemalloc(s, sheaf, gfp); 5511 5512 if (!__kmem_cache_alloc_bulk(s, gfp, sheaf->capacity - sheaf->size, 5513 &sheaf->objects[sheaf->size])) { 5514 return -ENOMEM; 5515 } 5516 sheaf->size = sheaf->capacity; 5517 5518 return 0; 5519 } 5520 5521 /* 5522 * We had a regular sized sheaf and need an oversize one, or we had an 5523 * oversize one already but need a larger one now. 5524 * This should be a very rare path so let's not complicate it. 5525 */ 5526 sheaf = kmem_cache_prefill_sheaf(s, gfp, size); 5527 if (!sheaf) 5528 return -ENOMEM; 5529 5530 kmem_cache_return_sheaf(s, gfp, *sheafp); 5531 *sheafp = sheaf; 5532 return 0; 5533 } 5534 5535 /* 5536 * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf() 5537 * 5538 * Guaranteed not to fail as many allocations as was the requested size. 5539 * After the sheaf is emptied, it fails - no fallback to the slab cache itself. 5540 * 5541 * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT 5542 * memcg charging is forced over limit if necessary, to avoid failure. 5543 * 5544 * It is possible that the allocation comes from kfence and then the sheaf 5545 * size is not decreased. 5546 */ 5547 void * 5548 kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp, 5549 struct slab_sheaf *sheaf) 5550 { 5551 void *ret = NULL; 5552 bool init; 5553 5554 if (sheaf->size == 0) 5555 goto out; 5556 5557 ret = kfence_alloc(s, s->object_size, gfp); 5558 5559 if (likely(!ret)) 5560 ret = sheaf->objects[--sheaf->size]; 5561 5562 init = slab_want_init_on_alloc(gfp, s); 5563 5564 /* add __GFP_NOFAIL to force successful memcg charging */ 5565 slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size); 5566 out: 5567 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE); 5568 5569 return ret; 5570 } 5571 5572 unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf) 5573 { 5574 return sheaf->size; 5575 } 5576 /* 5577 * To avoid unnecessary overhead, we pass through large allocation requests 5578 * directly to the page allocator. We use __GFP_COMP, because we will need to 5579 * know the allocation order to free the pages properly in kfree. 5580 */ 5581 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node) 5582 { 5583 struct page *page; 5584 void *ptr = NULL; 5585 unsigned int order = get_order(size); 5586 5587 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 5588 flags = kmalloc_fix_flags(flags); 5589 5590 flags |= __GFP_COMP; 5591 5592 if (node == NUMA_NO_NODE) 5593 page = alloc_frozen_pages_noprof(flags, order); 5594 else 5595 page = __alloc_frozen_pages_noprof(flags, order, node, NULL); 5596 5597 if (page) { 5598 ptr = page_address(page); 5599 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 5600 PAGE_SIZE << order); 5601 __SetPageLargeKmalloc(page); 5602 } 5603 5604 ptr = kasan_kmalloc_large(ptr, size, flags); 5605 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 5606 kmemleak_alloc(ptr, size, 1, flags); 5607 kmsan_kmalloc_large(ptr, size, flags); 5608 5609 return ptr; 5610 } 5611 5612 void *__kmalloc_large_noprof(size_t size, gfp_t flags) 5613 { 5614 void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE); 5615 5616 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 5617 flags, NUMA_NO_NODE); 5618 return ret; 5619 } 5620 EXPORT_SYMBOL(__kmalloc_large_noprof); 5621 5622 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) 5623 { 5624 void *ret = ___kmalloc_large_node(size, flags, node); 5625 5626 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 5627 flags, node); 5628 return ret; 5629 } 5630 EXPORT_SYMBOL(__kmalloc_large_node_noprof); 5631 5632 static __always_inline 5633 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node, 5634 unsigned long caller) 5635 { 5636 struct kmem_cache *s; 5637 void *ret; 5638 5639 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 5640 ret = __kmalloc_large_node_noprof(size, flags, node); 5641 trace_kmalloc(caller, ret, size, 5642 PAGE_SIZE << get_order(size), flags, node); 5643 return ret; 5644 } 5645 5646 if (unlikely(!size)) 5647 return ZERO_SIZE_PTR; 5648 5649 s = kmalloc_slab(size, b, flags, caller); 5650 5651 ret = slab_alloc_node(s, NULL, flags, node, caller, size); 5652 ret = kasan_kmalloc(s, ret, size, flags); 5653 trace_kmalloc(caller, ret, size, s->size, flags, node); 5654 return ret; 5655 } 5656 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) 5657 { 5658 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_); 5659 } 5660 EXPORT_SYMBOL(__kmalloc_node_noprof); 5661 5662 void *__kmalloc_noprof(size_t size, gfp_t flags) 5663 { 5664 return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_); 5665 } 5666 EXPORT_SYMBOL(__kmalloc_noprof); 5667 5668 /** 5669 * kmalloc_nolock - Allocate an object of given size from any context. 5670 * @size: size to allocate 5671 * @gfp_flags: GFP flags. Only __GFP_ACCOUNT, __GFP_ZERO, __GFP_NO_OBJ_EXT 5672 * allowed. 5673 * @node: node number of the target node. 5674 * 5675 * Return: pointer to the new object or NULL in case of error. 5676 * NULL does not mean EBUSY or EAGAIN. It means ENOMEM. 5677 * There is no reason to call it again and expect !NULL. 5678 */ 5679 void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node) 5680 { 5681 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_NOMEMALLOC | gfp_flags; 5682 struct kmem_cache *s; 5683 bool can_retry = true; 5684 void *ret = ERR_PTR(-EBUSY); 5685 5686 VM_WARN_ON_ONCE(gfp_flags & ~(__GFP_ACCOUNT | __GFP_ZERO | 5687 __GFP_NO_OBJ_EXT)); 5688 5689 if (unlikely(!size)) 5690 return ZERO_SIZE_PTR; 5691 5692 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) 5693 /* kmalloc_nolock() in PREEMPT_RT is not supported from irq */ 5694 return NULL; 5695 retry: 5696 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 5697 return NULL; 5698 s = kmalloc_slab(size, NULL, alloc_gfp, _RET_IP_); 5699 5700 if (!(s->flags & __CMPXCHG_DOUBLE) && !kmem_cache_debug(s)) 5701 /* 5702 * kmalloc_nolock() is not supported on architectures that 5703 * don't implement cmpxchg16b, but debug caches don't use 5704 * per-cpu slab and per-cpu partial slabs. They rely on 5705 * kmem_cache_node->list_lock, so kmalloc_nolock() can 5706 * attempt to allocate from debug caches by 5707 * spin_trylock_irqsave(&n->list_lock, ...) 5708 */ 5709 return NULL; 5710 5711 /* 5712 * Do not call slab_alloc_node(), since trylock mode isn't 5713 * compatible with slab_pre_alloc_hook/should_failslab and 5714 * kfence_alloc. Hence call __slab_alloc_node() (at most twice) 5715 * and slab_post_alloc_hook() directly. 5716 * 5717 * In !PREEMPT_RT ___slab_alloc() manipulates (freelist,tid) pair 5718 * in irq saved region. It assumes that the same cpu will not 5719 * __update_cpu_freelist_fast() into the same (freelist,tid) pair. 5720 * Therefore use in_nmi() to check whether particular bucket is in 5721 * irq protected section. 5722 * 5723 * If in_nmi() && local_lock_is_locked(s->cpu_slab) then it means that 5724 * this cpu was interrupted somewhere inside ___slab_alloc() after 5725 * it did local_lock_irqsave(&s->cpu_slab->lock, flags). 5726 * In this case fast path with __update_cpu_freelist_fast() is not safe. 5727 */ 5728 if (!in_nmi() || !local_lock_is_locked(&s->cpu_slab->lock)) 5729 ret = __slab_alloc_node(s, alloc_gfp, node, _RET_IP_, size); 5730 5731 if (PTR_ERR(ret) == -EBUSY) { 5732 if (can_retry) { 5733 /* pick the next kmalloc bucket */ 5734 size = s->object_size + 1; 5735 /* 5736 * Another alternative is to 5737 * if (memcg) alloc_gfp &= ~__GFP_ACCOUNT; 5738 * else if (!memcg) alloc_gfp |= __GFP_ACCOUNT; 5739 * to retry from bucket of the same size. 5740 */ 5741 can_retry = false; 5742 goto retry; 5743 } 5744 ret = NULL; 5745 } 5746 5747 maybe_wipe_obj_freeptr(s, ret); 5748 slab_post_alloc_hook(s, NULL, alloc_gfp, 1, &ret, 5749 slab_want_init_on_alloc(alloc_gfp, s), size); 5750 5751 ret = kasan_kmalloc(s, ret, size, alloc_gfp); 5752 return ret; 5753 } 5754 EXPORT_SYMBOL_GPL(kmalloc_nolock_noprof); 5755 5756 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, 5757 int node, unsigned long caller) 5758 { 5759 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller); 5760 5761 } 5762 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof); 5763 5764 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size) 5765 { 5766 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, 5767 _RET_IP_, size); 5768 5769 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); 5770 5771 ret = kasan_kmalloc(s, ret, size, gfpflags); 5772 return ret; 5773 } 5774 EXPORT_SYMBOL(__kmalloc_cache_noprof); 5775 5776 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags, 5777 int node, size_t size) 5778 { 5779 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); 5780 5781 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); 5782 5783 ret = kasan_kmalloc(s, ret, size, gfpflags); 5784 return ret; 5785 } 5786 EXPORT_SYMBOL(__kmalloc_cache_node_noprof); 5787 5788 static noinline void free_to_partial_list( 5789 struct kmem_cache *s, struct slab *slab, 5790 void *head, void *tail, int bulk_cnt, 5791 unsigned long addr) 5792 { 5793 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 5794 struct slab *slab_free = NULL; 5795 int cnt = bulk_cnt; 5796 unsigned long flags; 5797 depot_stack_handle_t handle = 0; 5798 5799 /* 5800 * We cannot use GFP_NOWAIT as there are callsites where waking up 5801 * kswapd could deadlock 5802 */ 5803 if (s->flags & SLAB_STORE_USER) 5804 handle = set_track_prepare(__GFP_NOWARN); 5805 5806 spin_lock_irqsave(&n->list_lock, flags); 5807 5808 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { 5809 void *prior = slab->freelist; 5810 5811 /* Perform the actual freeing while we still hold the locks */ 5812 slab->inuse -= cnt; 5813 set_freepointer(s, tail, prior); 5814 slab->freelist = head; 5815 5816 /* 5817 * If the slab is empty, and node's partial list is full, 5818 * it should be discarded anyway no matter it's on full or 5819 * partial list. 5820 */ 5821 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) 5822 slab_free = slab; 5823 5824 if (!prior) { 5825 /* was on full list */ 5826 remove_full(s, n, slab); 5827 if (!slab_free) { 5828 add_partial(n, slab, DEACTIVATE_TO_TAIL); 5829 stat(s, FREE_ADD_PARTIAL); 5830 } 5831 } else if (slab_free) { 5832 remove_partial(n, slab); 5833 stat(s, FREE_REMOVE_PARTIAL); 5834 } 5835 } 5836 5837 if (slab_free) { 5838 /* 5839 * Update the counters while still holding n->list_lock to 5840 * prevent spurious validation warnings 5841 */ 5842 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); 5843 } 5844 5845 spin_unlock_irqrestore(&n->list_lock, flags); 5846 5847 if (slab_free) { 5848 stat(s, FREE_SLAB); 5849 free_slab(s, slab_free); 5850 } 5851 } 5852 5853 /* 5854 * Slow path handling. This may still be called frequently since objects 5855 * have a longer lifetime than the cpu slabs in most processing loads. 5856 * 5857 * So we still attempt to reduce cache line usage. Just take the slab 5858 * lock and free the item. If there is no additional partial slab 5859 * handling required then we can return immediately. 5860 */ 5861 static void __slab_free(struct kmem_cache *s, struct slab *slab, 5862 void *head, void *tail, int cnt, 5863 unsigned long addr) 5864 5865 { 5866 bool was_frozen, was_full; 5867 struct freelist_counters old, new; 5868 struct kmem_cache_node *n = NULL; 5869 unsigned long flags; 5870 bool on_node_partial; 5871 5872 stat(s, FREE_SLOWPATH); 5873 5874 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 5875 free_to_partial_list(s, slab, head, tail, cnt, addr); 5876 return; 5877 } 5878 5879 /* 5880 * It is enough to test IS_ENABLED(CONFIG_SLUB_CPU_PARTIAL) below 5881 * instead of kmem_cache_has_cpu_partial(s), because kmem_cache_debug(s) 5882 * is the only other reason it can be false, and it is already handled 5883 * above. 5884 */ 5885 5886 do { 5887 if (unlikely(n)) { 5888 spin_unlock_irqrestore(&n->list_lock, flags); 5889 n = NULL; 5890 } 5891 5892 old.freelist = slab->freelist; 5893 old.counters = slab->counters; 5894 5895 was_full = (old.freelist == NULL); 5896 was_frozen = old.frozen; 5897 5898 set_freepointer(s, tail, old.freelist); 5899 5900 new.freelist = head; 5901 new.counters = old.counters; 5902 new.inuse -= cnt; 5903 5904 /* 5905 * Might need to be taken off (due to becoming empty) or added 5906 * to (due to not being full anymore) the partial list. 5907 * Unless it's frozen. 5908 */ 5909 if ((!new.inuse || was_full) && !was_frozen) { 5910 /* 5911 * If slab becomes non-full and we have cpu partial 5912 * lists, we put it there unconditionally to avoid 5913 * taking the list_lock. Otherwise we need it. 5914 */ 5915 if (!(IS_ENABLED(CONFIG_SLUB_CPU_PARTIAL) && was_full)) { 5916 5917 n = get_node(s, slab_nid(slab)); 5918 /* 5919 * Speculatively acquire the list_lock. 5920 * If the cmpxchg does not succeed then we may 5921 * drop the list_lock without any processing. 5922 * 5923 * Otherwise the list_lock will synchronize with 5924 * other processors updating the list of slabs. 5925 */ 5926 spin_lock_irqsave(&n->list_lock, flags); 5927 5928 on_node_partial = slab_test_node_partial(slab); 5929 } 5930 } 5931 5932 } while (!slab_update_freelist(s, slab, &old, &new, "__slab_free")); 5933 5934 if (likely(!n)) { 5935 5936 if (likely(was_frozen)) { 5937 /* 5938 * The list lock was not taken therefore no list 5939 * activity can be necessary. 5940 */ 5941 stat(s, FREE_FROZEN); 5942 } else if (IS_ENABLED(CONFIG_SLUB_CPU_PARTIAL) && was_full) { 5943 /* 5944 * If we started with a full slab then put it onto the 5945 * per cpu partial list. 5946 */ 5947 put_cpu_partial(s, slab, 1); 5948 stat(s, CPU_PARTIAL_FREE); 5949 } 5950 5951 /* 5952 * In other cases we didn't take the list_lock because the slab 5953 * was already on the partial list and will remain there. 5954 */ 5955 5956 return; 5957 } 5958 5959 /* 5960 * This slab was partially empty but not on the per-node partial list, 5961 * in which case we shouldn't manipulate its list, just return. 5962 */ 5963 if (!was_full && !on_node_partial) { 5964 spin_unlock_irqrestore(&n->list_lock, flags); 5965 return; 5966 } 5967 5968 /* 5969 * If slab became empty, should we add/keep it on the partial list or we 5970 * have enough? 5971 */ 5972 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 5973 goto slab_empty; 5974 5975 /* 5976 * Objects left in the slab. If it was not on the partial list before 5977 * then add it. This can only happen when cache has no per cpu partial 5978 * list otherwise we would have put it there. 5979 */ 5980 if (!IS_ENABLED(CONFIG_SLUB_CPU_PARTIAL) && unlikely(was_full)) { 5981 add_partial(n, slab, DEACTIVATE_TO_TAIL); 5982 stat(s, FREE_ADD_PARTIAL); 5983 } 5984 spin_unlock_irqrestore(&n->list_lock, flags); 5985 return; 5986 5987 slab_empty: 5988 /* 5989 * The slab could have a single object and thus go from full to empty in 5990 * a single free, but more likely it was on the partial list. Remove it. 5991 */ 5992 if (likely(!was_full)) { 5993 remove_partial(n, slab); 5994 stat(s, FREE_REMOVE_PARTIAL); 5995 } 5996 5997 spin_unlock_irqrestore(&n->list_lock, flags); 5998 stat(s, FREE_SLAB); 5999 discard_slab(s, slab); 6000 } 6001 6002 /* 6003 * pcs is locked. We should have get rid of the spare sheaf and obtained an 6004 * empty sheaf, while the main sheaf is full. We want to install the empty sheaf 6005 * as a main sheaf, and make the current main sheaf a spare sheaf. 6006 * 6007 * However due to having relinquished the cpu_sheaves lock when obtaining 6008 * the empty sheaf, we need to handle some unlikely but possible cases. 6009 * 6010 * If we put any sheaf to barn here, it's because we were interrupted or have 6011 * been migrated to a different cpu, which should be rare enough so just ignore 6012 * the barn's limits to simplify the handling. 6013 * 6014 * An alternative scenario that gets us here is when we fail 6015 * barn_replace_full_sheaf(), because there's no empty sheaf available in the 6016 * barn, so we had to allocate it by alloc_empty_sheaf(). But because we saw the 6017 * limit on full sheaves was not exceeded, we assume it didn't change and just 6018 * put the full sheaf there. 6019 */ 6020 static void __pcs_install_empty_sheaf(struct kmem_cache *s, 6021 struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty, 6022 struct node_barn *barn) 6023 { 6024 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); 6025 6026 /* This is what we expect to find if nobody interrupted us. */ 6027 if (likely(!pcs->spare)) { 6028 pcs->spare = pcs->main; 6029 pcs->main = empty; 6030 return; 6031 } 6032 6033 /* 6034 * Unlikely because if the main sheaf had space, we would have just 6035 * freed to it. Get rid of our empty sheaf. 6036 */ 6037 if (pcs->main->size < s->sheaf_capacity) { 6038 barn_put_empty_sheaf(barn, empty); 6039 return; 6040 } 6041 6042 /* Also unlikely for the same reason */ 6043 if (pcs->spare->size < s->sheaf_capacity) { 6044 swap(pcs->main, pcs->spare); 6045 barn_put_empty_sheaf(barn, empty); 6046 return; 6047 } 6048 6049 /* 6050 * We probably failed barn_replace_full_sheaf() due to no empty sheaf 6051 * available there, but we allocated one, so finish the job. 6052 */ 6053 barn_put_full_sheaf(barn, pcs->main); 6054 stat(s, BARN_PUT); 6055 pcs->main = empty; 6056 } 6057 6058 /* 6059 * Replace the full main sheaf with a (at least partially) empty sheaf. 6060 * 6061 * Must be called with the cpu_sheaves local lock locked. If successful, returns 6062 * the pcs pointer and the local lock locked (possibly on a different cpu than 6063 * initially called). If not successful, returns NULL and the local lock 6064 * unlocked. 6065 */ 6066 static struct slub_percpu_sheaves * 6067 __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs) 6068 { 6069 struct slab_sheaf *empty; 6070 struct node_barn *barn; 6071 bool put_fail; 6072 6073 restart: 6074 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); 6075 6076 barn = get_barn(s); 6077 if (!barn) { 6078 local_unlock(&s->cpu_sheaves->lock); 6079 return NULL; 6080 } 6081 6082 put_fail = false; 6083 6084 if (!pcs->spare) { 6085 empty = barn_get_empty_sheaf(barn); 6086 if (empty) { 6087 pcs->spare = pcs->main; 6088 pcs->main = empty; 6089 return pcs; 6090 } 6091 goto alloc_empty; 6092 } 6093 6094 if (pcs->spare->size < s->sheaf_capacity) { 6095 swap(pcs->main, pcs->spare); 6096 return pcs; 6097 } 6098 6099 empty = barn_replace_full_sheaf(barn, pcs->main); 6100 6101 if (!IS_ERR(empty)) { 6102 stat(s, BARN_PUT); 6103 pcs->main = empty; 6104 return pcs; 6105 } 6106 6107 if (PTR_ERR(empty) == -E2BIG) { 6108 /* Since we got here, spare exists and is full */ 6109 struct slab_sheaf *to_flush = pcs->spare; 6110 6111 stat(s, BARN_PUT_FAIL); 6112 6113 pcs->spare = NULL; 6114 local_unlock(&s->cpu_sheaves->lock); 6115 6116 sheaf_flush_unused(s, to_flush); 6117 empty = to_flush; 6118 goto got_empty; 6119 } 6120 6121 /* 6122 * We could not replace full sheaf because barn had no empty 6123 * sheaves. We can still allocate it and put the full sheaf in 6124 * __pcs_install_empty_sheaf(), but if we fail to allocate it, 6125 * make sure to count the fail. 6126 */ 6127 put_fail = true; 6128 6129 alloc_empty: 6130 local_unlock(&s->cpu_sheaves->lock); 6131 6132 empty = alloc_empty_sheaf(s, GFP_NOWAIT); 6133 if (empty) 6134 goto got_empty; 6135 6136 if (put_fail) 6137 stat(s, BARN_PUT_FAIL); 6138 6139 if (!sheaf_flush_main(s)) 6140 return NULL; 6141 6142 if (!local_trylock(&s->cpu_sheaves->lock)) 6143 return NULL; 6144 6145 pcs = this_cpu_ptr(s->cpu_sheaves); 6146 6147 /* 6148 * we flushed the main sheaf so it should be empty now, 6149 * but in case we got preempted or migrated, we need to 6150 * check again 6151 */ 6152 if (pcs->main->size == s->sheaf_capacity) 6153 goto restart; 6154 6155 return pcs; 6156 6157 got_empty: 6158 if (!local_trylock(&s->cpu_sheaves->lock)) { 6159 barn_put_empty_sheaf(barn, empty); 6160 return NULL; 6161 } 6162 6163 pcs = this_cpu_ptr(s->cpu_sheaves); 6164 __pcs_install_empty_sheaf(s, pcs, empty, barn); 6165 6166 return pcs; 6167 } 6168 6169 /* 6170 * Free an object to the percpu sheaves. 6171 * The object is expected to have passed slab_free_hook() already. 6172 */ 6173 static __fastpath_inline 6174 bool free_to_pcs(struct kmem_cache *s, void *object) 6175 { 6176 struct slub_percpu_sheaves *pcs; 6177 6178 if (!local_trylock(&s->cpu_sheaves->lock)) 6179 return false; 6180 6181 pcs = this_cpu_ptr(s->cpu_sheaves); 6182 6183 if (unlikely(pcs->main->size == s->sheaf_capacity)) { 6184 6185 pcs = __pcs_replace_full_main(s, pcs); 6186 if (unlikely(!pcs)) 6187 return false; 6188 } 6189 6190 pcs->main->objects[pcs->main->size++] = object; 6191 6192 local_unlock(&s->cpu_sheaves->lock); 6193 6194 stat(s, FREE_PCS); 6195 6196 return true; 6197 } 6198 6199 static void rcu_free_sheaf(struct rcu_head *head) 6200 { 6201 struct kmem_cache_node *n; 6202 struct slab_sheaf *sheaf; 6203 struct node_barn *barn = NULL; 6204 struct kmem_cache *s; 6205 6206 sheaf = container_of(head, struct slab_sheaf, rcu_head); 6207 6208 s = sheaf->cache; 6209 6210 /* 6211 * This may remove some objects due to slab_free_hook() returning false, 6212 * so that the sheaf might no longer be completely full. But it's easier 6213 * to handle it as full (unless it became completely empty), as the code 6214 * handles it fine. The only downside is that sheaf will serve fewer 6215 * allocations when reused. It only happens due to debugging, which is a 6216 * performance hit anyway. 6217 * 6218 * If it returns true, there was at least one object from pfmemalloc 6219 * slab so simply flush everything. 6220 */ 6221 if (__rcu_free_sheaf_prepare(s, sheaf)) 6222 goto flush; 6223 6224 n = get_node(s, sheaf->node); 6225 if (!n) 6226 goto flush; 6227 6228 barn = n->barn; 6229 6230 /* due to slab_free_hook() */ 6231 if (unlikely(sheaf->size == 0)) 6232 goto empty; 6233 6234 /* 6235 * Checking nr_full/nr_empty outside lock avoids contention in case the 6236 * barn is at the respective limit. Due to the race we might go over the 6237 * limit but that should be rare and harmless. 6238 */ 6239 6240 if (data_race(barn->nr_full) < MAX_FULL_SHEAVES) { 6241 stat(s, BARN_PUT); 6242 barn_put_full_sheaf(barn, sheaf); 6243 return; 6244 } 6245 6246 flush: 6247 stat(s, BARN_PUT_FAIL); 6248 sheaf_flush_unused(s, sheaf); 6249 6250 empty: 6251 if (barn && data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) { 6252 barn_put_empty_sheaf(barn, sheaf); 6253 return; 6254 } 6255 6256 free_empty_sheaf(s, sheaf); 6257 } 6258 6259 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj) 6260 { 6261 struct slub_percpu_sheaves *pcs; 6262 struct slab_sheaf *rcu_sheaf; 6263 6264 if (!local_trylock(&s->cpu_sheaves->lock)) 6265 goto fail; 6266 6267 pcs = this_cpu_ptr(s->cpu_sheaves); 6268 6269 if (unlikely(!pcs->rcu_free)) { 6270 6271 struct slab_sheaf *empty; 6272 struct node_barn *barn; 6273 6274 if (pcs->spare && pcs->spare->size == 0) { 6275 pcs->rcu_free = pcs->spare; 6276 pcs->spare = NULL; 6277 goto do_free; 6278 } 6279 6280 barn = get_barn(s); 6281 if (!barn) { 6282 local_unlock(&s->cpu_sheaves->lock); 6283 goto fail; 6284 } 6285 6286 empty = barn_get_empty_sheaf(barn); 6287 6288 if (empty) { 6289 pcs->rcu_free = empty; 6290 goto do_free; 6291 } 6292 6293 local_unlock(&s->cpu_sheaves->lock); 6294 6295 empty = alloc_empty_sheaf(s, GFP_NOWAIT); 6296 6297 if (!empty) 6298 goto fail; 6299 6300 if (!local_trylock(&s->cpu_sheaves->lock)) { 6301 barn_put_empty_sheaf(barn, empty); 6302 goto fail; 6303 } 6304 6305 pcs = this_cpu_ptr(s->cpu_sheaves); 6306 6307 if (unlikely(pcs->rcu_free)) 6308 barn_put_empty_sheaf(barn, empty); 6309 else 6310 pcs->rcu_free = empty; 6311 } 6312 6313 do_free: 6314 6315 rcu_sheaf = pcs->rcu_free; 6316 6317 /* 6318 * Since we flush immediately when size reaches capacity, we never reach 6319 * this with size already at capacity, so no OOB write is possible. 6320 */ 6321 rcu_sheaf->objects[rcu_sheaf->size++] = obj; 6322 6323 if (likely(rcu_sheaf->size < s->sheaf_capacity)) { 6324 rcu_sheaf = NULL; 6325 } else { 6326 pcs->rcu_free = NULL; 6327 rcu_sheaf->node = numa_mem_id(); 6328 } 6329 6330 /* 6331 * we flush before local_unlock to make sure a racing 6332 * flush_all_rcu_sheaves() doesn't miss this sheaf 6333 */ 6334 if (rcu_sheaf) 6335 call_rcu(&rcu_sheaf->rcu_head, rcu_free_sheaf); 6336 6337 local_unlock(&s->cpu_sheaves->lock); 6338 6339 stat(s, FREE_RCU_SHEAF); 6340 return true; 6341 6342 fail: 6343 stat(s, FREE_RCU_SHEAF_FAIL); 6344 return false; 6345 } 6346 6347 /* 6348 * Bulk free objects to the percpu sheaves. 6349 * Unlike free_to_pcs() this includes the calls to all necessary hooks 6350 * and the fallback to freeing to slab pages. 6351 */ 6352 static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p) 6353 { 6354 struct slub_percpu_sheaves *pcs; 6355 struct slab_sheaf *main, *empty; 6356 bool init = slab_want_init_on_free(s); 6357 unsigned int batch, i = 0; 6358 struct node_barn *barn; 6359 void *remote_objects[PCS_BATCH_MAX]; 6360 unsigned int remote_nr = 0; 6361 int node = numa_mem_id(); 6362 6363 next_remote_batch: 6364 while (i < size) { 6365 struct slab *slab = virt_to_slab(p[i]); 6366 6367 memcg_slab_free_hook(s, slab, p + i, 1); 6368 alloc_tagging_slab_free_hook(s, slab, p + i, 1); 6369 6370 if (unlikely(!slab_free_hook(s, p[i], init, false))) { 6371 p[i] = p[--size]; 6372 continue; 6373 } 6374 6375 if (unlikely((IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node) 6376 || slab_test_pfmemalloc(slab))) { 6377 remote_objects[remote_nr] = p[i]; 6378 p[i] = p[--size]; 6379 if (++remote_nr >= PCS_BATCH_MAX) 6380 goto flush_remote; 6381 continue; 6382 } 6383 6384 i++; 6385 } 6386 6387 if (!size) 6388 goto flush_remote; 6389 6390 next_batch: 6391 if (!local_trylock(&s->cpu_sheaves->lock)) 6392 goto fallback; 6393 6394 pcs = this_cpu_ptr(s->cpu_sheaves); 6395 6396 if (likely(pcs->main->size < s->sheaf_capacity)) 6397 goto do_free; 6398 6399 barn = get_barn(s); 6400 if (!barn) 6401 goto no_empty; 6402 6403 if (!pcs->spare) { 6404 empty = barn_get_empty_sheaf(barn); 6405 if (!empty) 6406 goto no_empty; 6407 6408 pcs->spare = pcs->main; 6409 pcs->main = empty; 6410 goto do_free; 6411 } 6412 6413 if (pcs->spare->size < s->sheaf_capacity) { 6414 swap(pcs->main, pcs->spare); 6415 goto do_free; 6416 } 6417 6418 empty = barn_replace_full_sheaf(barn, pcs->main); 6419 if (IS_ERR(empty)) { 6420 stat(s, BARN_PUT_FAIL); 6421 goto no_empty; 6422 } 6423 6424 stat(s, BARN_PUT); 6425 pcs->main = empty; 6426 6427 do_free: 6428 main = pcs->main; 6429 batch = min(size, s->sheaf_capacity - main->size); 6430 6431 memcpy(main->objects + main->size, p, batch * sizeof(void *)); 6432 main->size += batch; 6433 6434 local_unlock(&s->cpu_sheaves->lock); 6435 6436 stat_add(s, FREE_PCS, batch); 6437 6438 if (batch < size) { 6439 p += batch; 6440 size -= batch; 6441 goto next_batch; 6442 } 6443 6444 if (remote_nr) 6445 goto flush_remote; 6446 6447 return; 6448 6449 no_empty: 6450 local_unlock(&s->cpu_sheaves->lock); 6451 6452 /* 6453 * if we depleted all empty sheaves in the barn or there are too 6454 * many full sheaves, free the rest to slab pages 6455 */ 6456 fallback: 6457 __kmem_cache_free_bulk(s, size, p); 6458 6459 flush_remote: 6460 if (remote_nr) { 6461 __kmem_cache_free_bulk(s, remote_nr, &remote_objects[0]); 6462 if (i < size) { 6463 remote_nr = 0; 6464 goto next_remote_batch; 6465 } 6466 } 6467 } 6468 6469 struct defer_free { 6470 struct llist_head objects; 6471 struct llist_head slabs; 6472 struct irq_work work; 6473 }; 6474 6475 static void free_deferred_objects(struct irq_work *work); 6476 6477 static DEFINE_PER_CPU(struct defer_free, defer_free_objects) = { 6478 .objects = LLIST_HEAD_INIT(objects), 6479 .slabs = LLIST_HEAD_INIT(slabs), 6480 .work = IRQ_WORK_INIT(free_deferred_objects), 6481 }; 6482 6483 /* 6484 * In PREEMPT_RT irq_work runs in per-cpu kthread, so it's safe 6485 * to take sleeping spin_locks from __slab_free() and deactivate_slab(). 6486 * In !PREEMPT_RT irq_work will run after local_unlock_irqrestore(). 6487 */ 6488 static void free_deferred_objects(struct irq_work *work) 6489 { 6490 struct defer_free *df = container_of(work, struct defer_free, work); 6491 struct llist_head *objs = &df->objects; 6492 struct llist_head *slabs = &df->slabs; 6493 struct llist_node *llnode, *pos, *t; 6494 6495 if (llist_empty(objs) && llist_empty(slabs)) 6496 return; 6497 6498 llnode = llist_del_all(objs); 6499 llist_for_each_safe(pos, t, llnode) { 6500 struct kmem_cache *s; 6501 struct slab *slab; 6502 void *x = pos; 6503 6504 slab = virt_to_slab(x); 6505 s = slab->slab_cache; 6506 6507 /* Point 'x' back to the beginning of allocated object */ 6508 x -= s->offset; 6509 6510 /* 6511 * We used freepointer in 'x' to link 'x' into df->objects. 6512 * Clear it to NULL to avoid false positive detection 6513 * of "Freepointer corruption". 6514 */ 6515 set_freepointer(s, x, NULL); 6516 6517 __slab_free(s, slab, x, x, 1, _THIS_IP_); 6518 } 6519 6520 llnode = llist_del_all(slabs); 6521 llist_for_each_safe(pos, t, llnode) { 6522 struct slab *slab = container_of(pos, struct slab, llnode); 6523 6524 if (slab->frozen) 6525 deactivate_slab(slab->slab_cache, slab, slab->flush_freelist); 6526 else 6527 free_slab(slab->slab_cache, slab); 6528 } 6529 } 6530 6531 static void defer_free(struct kmem_cache *s, void *head) 6532 { 6533 struct defer_free *df; 6534 6535 guard(preempt)(); 6536 6537 df = this_cpu_ptr(&defer_free_objects); 6538 if (llist_add(head + s->offset, &df->objects)) 6539 irq_work_queue(&df->work); 6540 } 6541 6542 static void defer_deactivate_slab(struct slab *slab, void *flush_freelist) 6543 { 6544 struct defer_free *df; 6545 6546 slab->flush_freelist = flush_freelist; 6547 6548 guard(preempt)(); 6549 6550 df = this_cpu_ptr(&defer_free_objects); 6551 if (llist_add(&slab->llnode, &df->slabs)) 6552 irq_work_queue(&df->work); 6553 } 6554 6555 void defer_free_barrier(void) 6556 { 6557 int cpu; 6558 6559 for_each_possible_cpu(cpu) 6560 irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work); 6561 } 6562 6563 /* 6564 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 6565 * can perform fastpath freeing without additional function calls. 6566 * 6567 * The fastpath is only possible if we are freeing to the current cpu slab 6568 * of this processor. This typically the case if we have just allocated 6569 * the item before. 6570 * 6571 * If fastpath is not possible then fall back to __slab_free where we deal 6572 * with all sorts of special processing. 6573 * 6574 * Bulk free of a freelist with several objects (all pointing to the 6575 * same slab) possible by specifying head and tail ptr, plus objects 6576 * count (cnt). Bulk free indicated by tail pointer being set. 6577 */ 6578 static __always_inline void do_slab_free(struct kmem_cache *s, 6579 struct slab *slab, void *head, void *tail, 6580 int cnt, unsigned long addr) 6581 { 6582 /* cnt == 0 signals that it's called from kfree_nolock() */ 6583 bool allow_spin = cnt; 6584 struct kmem_cache_cpu *c; 6585 unsigned long tid; 6586 void **freelist; 6587 6588 redo: 6589 /* 6590 * Determine the currently cpus per cpu slab. 6591 * The cpu may change afterward. However that does not matter since 6592 * data is retrieved via this pointer. If we are on the same cpu 6593 * during the cmpxchg then the free will succeed. 6594 */ 6595 c = raw_cpu_ptr(s->cpu_slab); 6596 tid = READ_ONCE(c->tid); 6597 6598 /* Same with comment on barrier() in __slab_alloc_node() */ 6599 barrier(); 6600 6601 if (unlikely(slab != c->slab)) { 6602 if (unlikely(!allow_spin)) { 6603 /* 6604 * __slab_free() can locklessly cmpxchg16 into a slab, 6605 * but then it might need to take spin_lock or local_lock 6606 * in put_cpu_partial() for further processing. 6607 * Avoid the complexity and simply add to a deferred list. 6608 */ 6609 defer_free(s, head); 6610 } else { 6611 __slab_free(s, slab, head, tail, cnt, addr); 6612 } 6613 return; 6614 } 6615 6616 if (unlikely(!allow_spin)) { 6617 if ((in_nmi() || !USE_LOCKLESS_FAST_PATH()) && 6618 local_lock_is_locked(&s->cpu_slab->lock)) { 6619 defer_free(s, head); 6620 return; 6621 } 6622 cnt = 1; /* restore cnt. kfree_nolock() frees one object at a time */ 6623 } 6624 6625 if (USE_LOCKLESS_FAST_PATH()) { 6626 freelist = READ_ONCE(c->freelist); 6627 6628 set_freepointer(s, tail, freelist); 6629 6630 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { 6631 note_cmpxchg_failure("slab_free", s, tid); 6632 goto redo; 6633 } 6634 } else { 6635 __maybe_unused unsigned long flags = 0; 6636 6637 /* Update the free list under the local lock */ 6638 local_lock_cpu_slab(s, flags); 6639 c = this_cpu_ptr(s->cpu_slab); 6640 if (unlikely(slab != c->slab)) { 6641 local_unlock_cpu_slab(s, flags); 6642 goto redo; 6643 } 6644 tid = c->tid; 6645 freelist = c->freelist; 6646 6647 set_freepointer(s, tail, freelist); 6648 c->freelist = head; 6649 c->tid = next_tid(tid); 6650 6651 local_unlock_cpu_slab(s, flags); 6652 } 6653 stat_add(s, FREE_FASTPATH, cnt); 6654 } 6655 6656 static __fastpath_inline 6657 void slab_free(struct kmem_cache *s, struct slab *slab, void *object, 6658 unsigned long addr) 6659 { 6660 memcg_slab_free_hook(s, slab, &object, 1); 6661 alloc_tagging_slab_free_hook(s, slab, &object, 1); 6662 6663 if (unlikely(!slab_free_hook(s, object, slab_want_init_on_free(s), false))) 6664 return; 6665 6666 if (s->cpu_sheaves && likely(!IS_ENABLED(CONFIG_NUMA) || 6667 slab_nid(slab) == numa_mem_id()) 6668 && likely(!slab_test_pfmemalloc(slab))) { 6669 if (likely(free_to_pcs(s, object))) 6670 return; 6671 } 6672 6673 do_slab_free(s, slab, object, object, 1, addr); 6674 } 6675 6676 #ifdef CONFIG_MEMCG 6677 /* Do not inline the rare memcg charging failed path into the allocation path */ 6678 static noinline 6679 void memcg_alloc_abort_single(struct kmem_cache *s, void *object) 6680 { 6681 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false))) 6682 do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_); 6683 } 6684 #endif 6685 6686 static __fastpath_inline 6687 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, 6688 void *tail, void **p, int cnt, unsigned long addr) 6689 { 6690 memcg_slab_free_hook(s, slab, p, cnt); 6691 alloc_tagging_slab_free_hook(s, slab, p, cnt); 6692 /* 6693 * With KASAN enabled slab_free_freelist_hook modifies the freelist 6694 * to remove objects, whose reuse must be delayed. 6695 */ 6696 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) 6697 do_slab_free(s, slab, head, tail, cnt, addr); 6698 } 6699 6700 #ifdef CONFIG_SLUB_RCU_DEBUG 6701 static void slab_free_after_rcu_debug(struct rcu_head *rcu_head) 6702 { 6703 struct rcu_delayed_free *delayed_free = 6704 container_of(rcu_head, struct rcu_delayed_free, head); 6705 void *object = delayed_free->object; 6706 struct slab *slab = virt_to_slab(object); 6707 struct kmem_cache *s; 6708 6709 kfree(delayed_free); 6710 6711 if (WARN_ON(is_kfence_address(object))) 6712 return; 6713 6714 /* find the object and the cache again */ 6715 if (WARN_ON(!slab)) 6716 return; 6717 s = slab->slab_cache; 6718 if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU))) 6719 return; 6720 6721 /* resume freeing */ 6722 if (slab_free_hook(s, object, slab_want_init_on_free(s), true)) 6723 do_slab_free(s, slab, object, object, 1, _THIS_IP_); 6724 } 6725 #endif /* CONFIG_SLUB_RCU_DEBUG */ 6726 6727 #ifdef CONFIG_KASAN_GENERIC 6728 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 6729 { 6730 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr); 6731 } 6732 #endif 6733 6734 static inline struct kmem_cache *virt_to_cache(const void *obj) 6735 { 6736 struct slab *slab; 6737 6738 slab = virt_to_slab(obj); 6739 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__)) 6740 return NULL; 6741 return slab->slab_cache; 6742 } 6743 6744 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 6745 { 6746 struct kmem_cache *cachep; 6747 6748 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 6749 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 6750 return s; 6751 6752 cachep = virt_to_cache(x); 6753 if (WARN(cachep && cachep != s, 6754 "%s: Wrong slab cache. %s but object is from %s\n", 6755 __func__, s->name, cachep->name)) 6756 print_tracking(cachep, x); 6757 return cachep; 6758 } 6759 6760 /** 6761 * kmem_cache_free - Deallocate an object 6762 * @s: The cache the allocation was from. 6763 * @x: The previously allocated object. 6764 * 6765 * Free an object which was previously allocated from this 6766 * cache. 6767 */ 6768 void kmem_cache_free(struct kmem_cache *s, void *x) 6769 { 6770 s = cache_from_obj(s, x); 6771 if (!s) 6772 return; 6773 trace_kmem_cache_free(_RET_IP_, x, s); 6774 slab_free(s, virt_to_slab(x), x, _RET_IP_); 6775 } 6776 EXPORT_SYMBOL(kmem_cache_free); 6777 6778 static void free_large_kmalloc(struct page *page, void *object) 6779 { 6780 unsigned int order = compound_order(page); 6781 6782 if (WARN_ON_ONCE(!PageLargeKmalloc(page))) { 6783 dump_page(page, "Not a kmalloc allocation"); 6784 return; 6785 } 6786 6787 if (WARN_ON_ONCE(order == 0)) 6788 pr_warn_once("object pointer: 0x%p\n", object); 6789 6790 kmemleak_free(object); 6791 kasan_kfree_large(object); 6792 kmsan_kfree_large(object); 6793 6794 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 6795 -(PAGE_SIZE << order)); 6796 __ClearPageLargeKmalloc(page); 6797 free_frozen_pages(page, order); 6798 } 6799 6800 /* 6801 * Given an rcu_head embedded within an object obtained from kvmalloc at an 6802 * offset < 4k, free the object in question. 6803 */ 6804 void kvfree_rcu_cb(struct rcu_head *head) 6805 { 6806 void *obj = head; 6807 struct page *page; 6808 struct slab *slab; 6809 struct kmem_cache *s; 6810 void *slab_addr; 6811 6812 if (is_vmalloc_addr(obj)) { 6813 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); 6814 vfree(obj); 6815 return; 6816 } 6817 6818 page = virt_to_page(obj); 6819 slab = page_slab(page); 6820 if (!slab) { 6821 /* 6822 * rcu_head offset can be only less than page size so no need to 6823 * consider allocation order 6824 */ 6825 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); 6826 free_large_kmalloc(page, obj); 6827 return; 6828 } 6829 6830 s = slab->slab_cache; 6831 slab_addr = slab_address(slab); 6832 6833 if (is_kfence_address(obj)) { 6834 obj = kfence_object_start(obj); 6835 } else { 6836 unsigned int idx = __obj_to_index(s, slab_addr, obj); 6837 6838 obj = slab_addr + s->size * idx; 6839 obj = fixup_red_left(s, obj); 6840 } 6841 6842 slab_free(s, slab, obj, _RET_IP_); 6843 } 6844 6845 /** 6846 * kfree - free previously allocated memory 6847 * @object: pointer returned by kmalloc() or kmem_cache_alloc() 6848 * 6849 * If @object is NULL, no operation is performed. 6850 */ 6851 void kfree(const void *object) 6852 { 6853 struct page *page; 6854 struct slab *slab; 6855 struct kmem_cache *s; 6856 void *x = (void *)object; 6857 6858 trace_kfree(_RET_IP_, object); 6859 6860 if (unlikely(ZERO_OR_NULL_PTR(object))) 6861 return; 6862 6863 page = virt_to_page(object); 6864 slab = page_slab(page); 6865 if (!slab) { 6866 free_large_kmalloc(page, (void *)object); 6867 return; 6868 } 6869 6870 s = slab->slab_cache; 6871 slab_free(s, slab, x, _RET_IP_); 6872 } 6873 EXPORT_SYMBOL(kfree); 6874 6875 /* 6876 * Can be called while holding raw_spinlock_t or from IRQ and NMI, 6877 * but ONLY for objects allocated by kmalloc_nolock(). 6878 * Debug checks (like kmemleak and kfence) were skipped on allocation, 6879 * hence 6880 * obj = kmalloc(); kfree_nolock(obj); 6881 * will miss kmemleak/kfence book keeping and will cause false positives. 6882 * large_kmalloc is not supported either. 6883 */ 6884 void kfree_nolock(const void *object) 6885 { 6886 struct slab *slab; 6887 struct kmem_cache *s; 6888 void *x = (void *)object; 6889 6890 if (unlikely(ZERO_OR_NULL_PTR(object))) 6891 return; 6892 6893 slab = virt_to_slab(object); 6894 if (unlikely(!slab)) { 6895 WARN_ONCE(1, "large_kmalloc is not supported by kfree_nolock()"); 6896 return; 6897 } 6898 6899 s = slab->slab_cache; 6900 6901 memcg_slab_free_hook(s, slab, &x, 1); 6902 alloc_tagging_slab_free_hook(s, slab, &x, 1); 6903 /* 6904 * Unlike slab_free() do NOT call the following: 6905 * kmemleak_free_recursive(x, s->flags); 6906 * debug_check_no_locks_freed(x, s->object_size); 6907 * debug_check_no_obj_freed(x, s->object_size); 6908 * __kcsan_check_access(x, s->object_size, ..); 6909 * kfence_free(x); 6910 * since they take spinlocks or not safe from any context. 6911 */ 6912 kmsan_slab_free(s, x); 6913 /* 6914 * If KASAN finds a kernel bug it will do kasan_report_invalid_free() 6915 * which will call raw_spin_lock_irqsave() which is technically 6916 * unsafe from NMI, but take chance and report kernel bug. 6917 * The sequence of 6918 * kasan_report_invalid_free() -> raw_spin_lock_irqsave() -> NMI 6919 * -> kfree_nolock() -> kasan_report_invalid_free() on the same CPU 6920 * is double buggy and deserves to deadlock. 6921 */ 6922 if (kasan_slab_pre_free(s, x)) 6923 return; 6924 /* 6925 * memcg, kasan_slab_pre_free are done for 'x'. 6926 * The only thing left is kasan_poison without quarantine, 6927 * since kasan quarantine takes locks and not supported from NMI. 6928 */ 6929 kasan_slab_free(s, x, false, false, /* skip quarantine */true); 6930 do_slab_free(s, slab, x, x, 0, _RET_IP_); 6931 } 6932 EXPORT_SYMBOL_GPL(kfree_nolock); 6933 6934 static __always_inline __realloc_size(2) void * 6935 __do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags, int nid) 6936 { 6937 void *ret; 6938 size_t ks = 0; 6939 int orig_size = 0; 6940 struct kmem_cache *s = NULL; 6941 6942 if (unlikely(ZERO_OR_NULL_PTR(p))) 6943 goto alloc_new; 6944 6945 /* Check for double-free. */ 6946 if (!kasan_check_byte(p)) 6947 return NULL; 6948 6949 /* 6950 * If reallocation is not necessary (e. g. the new size is less 6951 * than the current allocated size), the current allocation will be 6952 * preserved unless __GFP_THISNODE is set. In the latter case a new 6953 * allocation on the requested node will be attempted. 6954 */ 6955 if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE && 6956 nid != page_to_nid(virt_to_page(p))) 6957 goto alloc_new; 6958 6959 if (is_kfence_address(p)) { 6960 ks = orig_size = kfence_ksize(p); 6961 } else { 6962 struct page *page = virt_to_page(p); 6963 struct slab *slab = page_slab(page); 6964 6965 if (!slab) { 6966 /* Big kmalloc object */ 6967 ks = page_size(page); 6968 WARN_ON(ks <= KMALLOC_MAX_CACHE_SIZE); 6969 WARN_ON(p != page_address(page)); 6970 } else { 6971 s = slab->slab_cache; 6972 orig_size = get_orig_size(s, (void *)p); 6973 ks = s->object_size; 6974 } 6975 } 6976 6977 /* If the old object doesn't fit, allocate a bigger one */ 6978 if (new_size > ks) 6979 goto alloc_new; 6980 6981 /* If the old object doesn't satisfy the new alignment, allocate a new one */ 6982 if (!IS_ALIGNED((unsigned long)p, align)) 6983 goto alloc_new; 6984 6985 /* Zero out spare memory. */ 6986 if (want_init_on_alloc(flags)) { 6987 kasan_disable_current(); 6988 if (orig_size && orig_size < new_size) 6989 memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size); 6990 else 6991 memset(kasan_reset_tag(p) + new_size, 0, ks - new_size); 6992 kasan_enable_current(); 6993 } 6994 6995 /* Setup kmalloc redzone when needed */ 6996 if (s && slub_debug_orig_size(s)) { 6997 set_orig_size(s, (void *)p, new_size); 6998 if (s->flags & SLAB_RED_ZONE && new_size < ks) 6999 memset_no_sanitize_memory(kasan_reset_tag(p) + new_size, 7000 SLUB_RED_ACTIVE, ks - new_size); 7001 } 7002 7003 p = kasan_krealloc(p, new_size, flags); 7004 return (void *)p; 7005 7006 alloc_new: 7007 ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_); 7008 if (ret && p) { 7009 /* Disable KASAN checks as the object's redzone is accessed. */ 7010 kasan_disable_current(); 7011 memcpy(ret, kasan_reset_tag(p), orig_size ?: ks); 7012 kasan_enable_current(); 7013 } 7014 7015 return ret; 7016 } 7017 7018 /** 7019 * krealloc_node_align - reallocate memory. The contents will remain unchanged. 7020 * @p: object to reallocate memory for. 7021 * @new_size: how many bytes of memory are required. 7022 * @align: desired alignment. 7023 * @flags: the type of memory to allocate. 7024 * @nid: NUMA node or NUMA_NO_NODE 7025 * 7026 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size 7027 * is 0 and @p is not a %NULL pointer, the object pointed to is freed. 7028 * 7029 * Only alignments up to those guaranteed by kmalloc() will be honored. Please see 7030 * Documentation/core-api/memory-allocation.rst for more details. 7031 * 7032 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 7033 * initial memory allocation, every subsequent call to this API for the same 7034 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 7035 * __GFP_ZERO is not fully honored by this API. 7036 * 7037 * When slub_debug_orig_size() is off, krealloc() only knows about the bucket 7038 * size of an allocation (but not the exact size it was allocated with) and 7039 * hence implements the following semantics for shrinking and growing buffers 7040 * with __GFP_ZERO:: 7041 * 7042 * new bucket 7043 * 0 size size 7044 * |--------|----------------| 7045 * | keep | zero | 7046 * 7047 * Otherwise, the original allocation size 'orig_size' could be used to 7048 * precisely clear the requested size, and the new size will also be stored 7049 * as the new 'orig_size'. 7050 * 7051 * In any case, the contents of the object pointed to are preserved up to the 7052 * lesser of the new and old sizes. 7053 * 7054 * Return: pointer to the allocated memory or %NULL in case of error 7055 */ 7056 void *krealloc_node_align_noprof(const void *p, size_t new_size, unsigned long align, 7057 gfp_t flags, int nid) 7058 { 7059 void *ret; 7060 7061 if (unlikely(!new_size)) { 7062 kfree(p); 7063 return ZERO_SIZE_PTR; 7064 } 7065 7066 ret = __do_krealloc(p, new_size, align, flags, nid); 7067 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) 7068 kfree(p); 7069 7070 return ret; 7071 } 7072 EXPORT_SYMBOL(krealloc_node_align_noprof); 7073 7074 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size) 7075 { 7076 /* 7077 * We want to attempt a large physically contiguous block first because 7078 * it is less likely to fragment multiple larger blocks and therefore 7079 * contribute to a long term fragmentation less than vmalloc fallback. 7080 * However make sure that larger requests are not too disruptive - i.e. 7081 * do not direct reclaim unless physically continuous memory is preferred 7082 * (__GFP_RETRY_MAYFAIL mode). We still kick in kswapd/kcompactd to 7083 * start working in the background 7084 */ 7085 if (size > PAGE_SIZE) { 7086 flags |= __GFP_NOWARN; 7087 7088 if (!(flags & __GFP_RETRY_MAYFAIL)) 7089 flags &= ~__GFP_DIRECT_RECLAIM; 7090 7091 /* nofail semantic is implemented by the vmalloc fallback */ 7092 flags &= ~__GFP_NOFAIL; 7093 } 7094 7095 return flags; 7096 } 7097 7098 /** 7099 * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon 7100 * failure, fall back to non-contiguous (vmalloc) allocation. 7101 * @size: size of the request. 7102 * @b: which set of kmalloc buckets to allocate from. 7103 * @align: desired alignment. 7104 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 7105 * @node: numa node to allocate from 7106 * 7107 * Only alignments up to those guaranteed by kmalloc() will be honored. Please see 7108 * Documentation/core-api/memory-allocation.rst for more details. 7109 * 7110 * Uses kmalloc to get the memory but if the allocation fails then falls back 7111 * to the vmalloc allocator. Use kvfree for freeing the memory. 7112 * 7113 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. 7114 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is 7115 * preferable to the vmalloc fallback, due to visible performance drawbacks. 7116 * 7117 * Return: pointer to the allocated memory of %NULL in case of failure 7118 */ 7119 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align, 7120 gfp_t flags, int node) 7121 { 7122 void *ret; 7123 7124 /* 7125 * It doesn't really make sense to fallback to vmalloc for sub page 7126 * requests 7127 */ 7128 ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), 7129 kmalloc_gfp_adjust(flags, size), 7130 node, _RET_IP_); 7131 if (ret || size <= PAGE_SIZE) 7132 return ret; 7133 7134 /* non-sleeping allocations are not supported by vmalloc */ 7135 if (!gfpflags_allow_blocking(flags)) 7136 return NULL; 7137 7138 /* Don't even allow crazy sizes */ 7139 if (unlikely(size > INT_MAX)) { 7140 WARN_ON_ONCE(!(flags & __GFP_NOWARN)); 7141 return NULL; 7142 } 7143 7144 /* 7145 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP, 7146 * since the callers already cannot assume anything 7147 * about the resulting pointer, and cannot play 7148 * protection games. 7149 */ 7150 return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END, 7151 flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 7152 node, __builtin_return_address(0)); 7153 } 7154 EXPORT_SYMBOL(__kvmalloc_node_noprof); 7155 7156 /** 7157 * kvfree() - Free memory. 7158 * @addr: Pointer to allocated memory. 7159 * 7160 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). 7161 * It is slightly more efficient to use kfree() or vfree() if you are certain 7162 * that you know which one to use. 7163 * 7164 * Context: Either preemptible task context or not-NMI interrupt. 7165 */ 7166 void kvfree(const void *addr) 7167 { 7168 if (is_vmalloc_addr(addr)) 7169 vfree(addr); 7170 else 7171 kfree(addr); 7172 } 7173 EXPORT_SYMBOL(kvfree); 7174 7175 /** 7176 * kvfree_sensitive - Free a data object containing sensitive information. 7177 * @addr: address of the data object to be freed. 7178 * @len: length of the data object. 7179 * 7180 * Use the special memzero_explicit() function to clear the content of a 7181 * kvmalloc'ed object containing sensitive data to make sure that the 7182 * compiler won't optimize out the data clearing. 7183 */ 7184 void kvfree_sensitive(const void *addr, size_t len) 7185 { 7186 if (likely(!ZERO_OR_NULL_PTR(addr))) { 7187 memzero_explicit((void *)addr, len); 7188 kvfree(addr); 7189 } 7190 } 7191 EXPORT_SYMBOL(kvfree_sensitive); 7192 7193 /** 7194 * kvrealloc_node_align - reallocate memory; contents remain unchanged 7195 * @p: object to reallocate memory for 7196 * @size: the size to reallocate 7197 * @align: desired alignment 7198 * @flags: the flags for the page level allocator 7199 * @nid: NUMA node id 7200 * 7201 * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0 7202 * and @p is not a %NULL pointer, the object pointed to is freed. 7203 * 7204 * Only alignments up to those guaranteed by kmalloc() will be honored. Please see 7205 * Documentation/core-api/memory-allocation.rst for more details. 7206 * 7207 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 7208 * initial memory allocation, every subsequent call to this API for the same 7209 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 7210 * __GFP_ZERO is not fully honored by this API. 7211 * 7212 * In any case, the contents of the object pointed to are preserved up to the 7213 * lesser of the new and old sizes. 7214 * 7215 * This function must not be called concurrently with itself or kvfree() for the 7216 * same memory allocation. 7217 * 7218 * Return: pointer to the allocated memory or %NULL in case of error 7219 */ 7220 void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align, 7221 gfp_t flags, int nid) 7222 { 7223 void *n; 7224 7225 if (is_vmalloc_addr(p)) 7226 return vrealloc_node_align_noprof(p, size, align, flags, nid); 7227 7228 n = krealloc_node_align_noprof(p, size, align, kmalloc_gfp_adjust(flags, size), nid); 7229 if (!n) { 7230 /* We failed to krealloc(), fall back to kvmalloc(). */ 7231 n = kvmalloc_node_align_noprof(size, align, flags, nid); 7232 if (!n) 7233 return NULL; 7234 7235 if (p) { 7236 /* We already know that `p` is not a vmalloc address. */ 7237 kasan_disable_current(); 7238 memcpy(n, kasan_reset_tag(p), ksize(p)); 7239 kasan_enable_current(); 7240 7241 kfree(p); 7242 } 7243 } 7244 7245 return n; 7246 } 7247 EXPORT_SYMBOL(kvrealloc_node_align_noprof); 7248 7249 struct detached_freelist { 7250 struct slab *slab; 7251 void *tail; 7252 void *freelist; 7253 int cnt; 7254 struct kmem_cache *s; 7255 }; 7256 7257 /* 7258 * This function progressively scans the array with free objects (with 7259 * a limited look ahead) and extract objects belonging to the same 7260 * slab. It builds a detached freelist directly within the given 7261 * slab/objects. This can happen without any need for 7262 * synchronization, because the objects are owned by running process. 7263 * The freelist is build up as a single linked list in the objects. 7264 * The idea is, that this detached freelist can then be bulk 7265 * transferred to the real freelist(s), but only requiring a single 7266 * synchronization primitive. Look ahead in the array is limited due 7267 * to performance reasons. 7268 */ 7269 static inline 7270 int build_detached_freelist(struct kmem_cache *s, size_t size, 7271 void **p, struct detached_freelist *df) 7272 { 7273 int lookahead = 3; 7274 void *object; 7275 struct page *page; 7276 struct slab *slab; 7277 size_t same; 7278 7279 object = p[--size]; 7280 page = virt_to_page(object); 7281 slab = page_slab(page); 7282 if (!s) { 7283 /* Handle kalloc'ed objects */ 7284 if (!slab) { 7285 free_large_kmalloc(page, object); 7286 df->slab = NULL; 7287 return size; 7288 } 7289 /* Derive kmem_cache from object */ 7290 df->slab = slab; 7291 df->s = slab->slab_cache; 7292 } else { 7293 df->slab = slab; 7294 df->s = cache_from_obj(s, object); /* Support for memcg */ 7295 } 7296 7297 /* Start new detached freelist */ 7298 df->tail = object; 7299 df->freelist = object; 7300 df->cnt = 1; 7301 7302 if (is_kfence_address(object)) 7303 return size; 7304 7305 set_freepointer(df->s, object, NULL); 7306 7307 same = size; 7308 while (size) { 7309 object = p[--size]; 7310 /* df->slab is always set at this point */ 7311 if (df->slab == virt_to_slab(object)) { 7312 /* Opportunity build freelist */ 7313 set_freepointer(df->s, object, df->freelist); 7314 df->freelist = object; 7315 df->cnt++; 7316 same--; 7317 if (size != same) 7318 swap(p[size], p[same]); 7319 continue; 7320 } 7321 7322 /* Limit look ahead search */ 7323 if (!--lookahead) 7324 break; 7325 } 7326 7327 return same; 7328 } 7329 7330 /* 7331 * Internal bulk free of objects that were not initialised by the post alloc 7332 * hooks and thus should not be processed by the free hooks 7333 */ 7334 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 7335 { 7336 if (!size) 7337 return; 7338 7339 do { 7340 struct detached_freelist df; 7341 7342 size = build_detached_freelist(s, size, p, &df); 7343 if (!df.slab) 7344 continue; 7345 7346 if (kfence_free(df.freelist)) 7347 continue; 7348 7349 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, 7350 _RET_IP_); 7351 } while (likely(size)); 7352 } 7353 7354 /* Note that interrupts must be enabled when calling this function. */ 7355 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 7356 { 7357 if (!size) 7358 return; 7359 7360 /* 7361 * freeing to sheaves is so incompatible with the detached freelist so 7362 * once we go that way, we have to do everything differently 7363 */ 7364 if (s && s->cpu_sheaves) { 7365 free_to_pcs_bulk(s, size, p); 7366 return; 7367 } 7368 7369 do { 7370 struct detached_freelist df; 7371 7372 size = build_detached_freelist(s, size, p, &df); 7373 if (!df.slab) 7374 continue; 7375 7376 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size], 7377 df.cnt, _RET_IP_); 7378 } while (likely(size)); 7379 } 7380 EXPORT_SYMBOL(kmem_cache_free_bulk); 7381 7382 static inline 7383 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 7384 void **p) 7385 { 7386 struct kmem_cache_cpu *c; 7387 unsigned long irqflags; 7388 int i; 7389 7390 /* 7391 * Drain objects in the per cpu slab, while disabling local 7392 * IRQs, which protects against PREEMPT and interrupts 7393 * handlers invoking normal fastpath. 7394 */ 7395 c = slub_get_cpu_ptr(s->cpu_slab); 7396 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 7397 7398 for (i = 0; i < size; i++) { 7399 void *object = c->freelist; 7400 7401 if (unlikely(!object)) { 7402 /* 7403 * We may have removed an object from c->freelist using 7404 * the fastpath in the previous iteration; in that case, 7405 * c->tid has not been bumped yet. 7406 * Since ___slab_alloc() may reenable interrupts while 7407 * allocating memory, we should bump c->tid now. 7408 */ 7409 c->tid = next_tid(c->tid); 7410 7411 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 7412 7413 /* 7414 * Invoking slow path likely have side-effect 7415 * of re-populating per CPU c->freelist 7416 */ 7417 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 7418 _RET_IP_, c, s->object_size); 7419 if (unlikely(!p[i])) 7420 goto error; 7421 7422 c = this_cpu_ptr(s->cpu_slab); 7423 maybe_wipe_obj_freeptr(s, p[i]); 7424 7425 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 7426 7427 continue; /* goto for-loop */ 7428 } 7429 c->freelist = get_freepointer(s, object); 7430 p[i] = object; 7431 maybe_wipe_obj_freeptr(s, p[i]); 7432 stat(s, ALLOC_FASTPATH); 7433 } 7434 c->tid = next_tid(c->tid); 7435 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 7436 slub_put_cpu_ptr(s->cpu_slab); 7437 7438 return i; 7439 7440 error: 7441 slub_put_cpu_ptr(s->cpu_slab); 7442 __kmem_cache_free_bulk(s, i, p); 7443 return 0; 7444 7445 } 7446 7447 /* Note that interrupts must be enabled when calling this function. */ 7448 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, 7449 void **p) 7450 { 7451 unsigned int i = 0; 7452 void *kfence_obj; 7453 7454 if (!size) 7455 return 0; 7456 7457 s = slab_pre_alloc_hook(s, flags); 7458 if (unlikely(!s)) 7459 return 0; 7460 7461 /* 7462 * to make things simpler, only assume at most once kfence allocated 7463 * object per bulk allocation and choose its index randomly 7464 */ 7465 kfence_obj = kfence_alloc(s, s->object_size, flags); 7466 7467 if (unlikely(kfence_obj)) { 7468 if (unlikely(size == 1)) { 7469 p[0] = kfence_obj; 7470 goto out; 7471 } 7472 size--; 7473 } 7474 7475 if (s->cpu_sheaves) 7476 i = alloc_from_pcs_bulk(s, size, p); 7477 7478 if (i < size) { 7479 /* 7480 * If we ran out of memory, don't bother with freeing back to 7481 * the percpu sheaves, we have bigger problems. 7482 */ 7483 if (unlikely(__kmem_cache_alloc_bulk(s, flags, size - i, p + i) == 0)) { 7484 if (i > 0) 7485 __kmem_cache_free_bulk(s, i, p); 7486 if (kfence_obj) 7487 __kfence_free(kfence_obj); 7488 return 0; 7489 } 7490 } 7491 7492 if (unlikely(kfence_obj)) { 7493 int idx = get_random_u32_below(size + 1); 7494 7495 if (idx != size) 7496 p[size] = p[idx]; 7497 p[idx] = kfence_obj; 7498 7499 size++; 7500 } 7501 7502 out: 7503 /* 7504 * memcg and kmem_cache debug support and memory initialization. 7505 * Done outside of the IRQ disabled fastpath loop. 7506 */ 7507 if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p, 7508 slab_want_init_on_alloc(flags, s), s->object_size))) { 7509 return 0; 7510 } 7511 7512 return size; 7513 } 7514 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof); 7515 7516 /* 7517 * Object placement in a slab is made very easy because we always start at 7518 * offset 0. If we tune the size of the object to the alignment then we can 7519 * get the required alignment by putting one properly sized object after 7520 * another. 7521 * 7522 * Notice that the allocation order determines the sizes of the per cpu 7523 * caches. Each processor has always one slab available for allocations. 7524 * Increasing the allocation order reduces the number of times that slabs 7525 * must be moved on and off the partial lists and is therefore a factor in 7526 * locking overhead. 7527 */ 7528 7529 /* 7530 * Minimum / Maximum order of slab pages. This influences locking overhead 7531 * and slab fragmentation. A higher order reduces the number of partial slabs 7532 * and increases the number of allocations possible without having to 7533 * take the list_lock. 7534 */ 7535 static unsigned int slub_min_order; 7536 static unsigned int slub_max_order = 7537 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; 7538 static unsigned int slub_min_objects; 7539 7540 /* 7541 * Calculate the order of allocation given an slab object size. 7542 * 7543 * The order of allocation has significant impact on performance and other 7544 * system components. Generally order 0 allocations should be preferred since 7545 * order 0 does not cause fragmentation in the page allocator. Larger objects 7546 * be problematic to put into order 0 slabs because there may be too much 7547 * unused space left. We go to a higher order if more than 1/16th of the slab 7548 * would be wasted. 7549 * 7550 * In order to reach satisfactory performance we must ensure that a minimum 7551 * number of objects is in one slab. Otherwise we may generate too much 7552 * activity on the partial lists which requires taking the list_lock. This is 7553 * less a concern for large slabs though which are rarely used. 7554 * 7555 * slab_max_order specifies the order where we begin to stop considering the 7556 * number of objects in a slab as critical. If we reach slab_max_order then 7557 * we try to keep the page order as low as possible. So we accept more waste 7558 * of space in favor of a small page order. 7559 * 7560 * Higher order allocations also allow the placement of more objects in a 7561 * slab and thereby reduce object handling overhead. If the user has 7562 * requested a higher minimum order then we start with that one instead of 7563 * the smallest order which will fit the object. 7564 */ 7565 static inline unsigned int calc_slab_order(unsigned int size, 7566 unsigned int min_order, unsigned int max_order, 7567 unsigned int fract_leftover) 7568 { 7569 unsigned int order; 7570 7571 for (order = min_order; order <= max_order; order++) { 7572 7573 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 7574 unsigned int rem; 7575 7576 rem = slab_size % size; 7577 7578 if (rem <= slab_size / fract_leftover) 7579 break; 7580 } 7581 7582 return order; 7583 } 7584 7585 static inline int calculate_order(unsigned int size) 7586 { 7587 unsigned int order; 7588 unsigned int min_objects; 7589 unsigned int max_objects; 7590 unsigned int min_order; 7591 7592 min_objects = slub_min_objects; 7593 if (!min_objects) { 7594 /* 7595 * Some architectures will only update present cpus when 7596 * onlining them, so don't trust the number if it's just 1. But 7597 * we also don't want to use nr_cpu_ids always, as on some other 7598 * architectures, there can be many possible cpus, but never 7599 * onlined. Here we compromise between trying to avoid too high 7600 * order on systems that appear larger than they are, and too 7601 * low order on systems that appear smaller than they are. 7602 */ 7603 unsigned int nr_cpus = num_present_cpus(); 7604 if (nr_cpus <= 1) 7605 nr_cpus = nr_cpu_ids; 7606 min_objects = 4 * (fls(nr_cpus) + 1); 7607 } 7608 /* min_objects can't be 0 because get_order(0) is undefined */ 7609 max_objects = max(order_objects(slub_max_order, size), 1U); 7610 min_objects = min(min_objects, max_objects); 7611 7612 min_order = max_t(unsigned int, slub_min_order, 7613 get_order(min_objects * size)); 7614 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 7615 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 7616 7617 /* 7618 * Attempt to find best configuration for a slab. This works by first 7619 * attempting to generate a layout with the best possible configuration 7620 * and backing off gradually. 7621 * 7622 * We start with accepting at most 1/16 waste and try to find the 7623 * smallest order from min_objects-derived/slab_min_order up to 7624 * slab_max_order that will satisfy the constraint. Note that increasing 7625 * the order can only result in same or less fractional waste, not more. 7626 * 7627 * If that fails, we increase the acceptable fraction of waste and try 7628 * again. The last iteration with fraction of 1/2 would effectively 7629 * accept any waste and give us the order determined by min_objects, as 7630 * long as at least single object fits within slab_max_order. 7631 */ 7632 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { 7633 order = calc_slab_order(size, min_order, slub_max_order, 7634 fraction); 7635 if (order <= slub_max_order) 7636 return order; 7637 } 7638 7639 /* 7640 * Doh this slab cannot be placed using slab_max_order. 7641 */ 7642 order = get_order(size); 7643 if (order <= MAX_PAGE_ORDER) 7644 return order; 7645 return -ENOSYS; 7646 } 7647 7648 static void 7649 init_kmem_cache_node(struct kmem_cache_node *n, struct node_barn *barn) 7650 { 7651 n->nr_partial = 0; 7652 spin_lock_init(&n->list_lock); 7653 INIT_LIST_HEAD(&n->partial); 7654 #ifdef CONFIG_SLUB_DEBUG 7655 atomic_long_set(&n->nr_slabs, 0); 7656 atomic_long_set(&n->total_objects, 0); 7657 INIT_LIST_HEAD(&n->full); 7658 #endif 7659 n->barn = barn; 7660 if (barn) 7661 barn_init(barn); 7662 } 7663 7664 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 7665 { 7666 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 7667 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * 7668 sizeof(struct kmem_cache_cpu)); 7669 7670 /* 7671 * Must align to double word boundary for the double cmpxchg 7672 * instructions to work; see __pcpu_double_call_return_bool(). 7673 */ 7674 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 7675 2 * sizeof(void *)); 7676 7677 if (!s->cpu_slab) 7678 return 0; 7679 7680 init_kmem_cache_cpus(s); 7681 7682 return 1; 7683 } 7684 7685 static int init_percpu_sheaves(struct kmem_cache *s) 7686 { 7687 int cpu; 7688 7689 for_each_possible_cpu(cpu) { 7690 struct slub_percpu_sheaves *pcs; 7691 7692 pcs = per_cpu_ptr(s->cpu_sheaves, cpu); 7693 7694 local_trylock_init(&pcs->lock); 7695 7696 pcs->main = alloc_empty_sheaf(s, GFP_KERNEL); 7697 7698 if (!pcs->main) 7699 return -ENOMEM; 7700 } 7701 7702 return 0; 7703 } 7704 7705 static struct kmem_cache *kmem_cache_node; 7706 7707 /* 7708 * No kmalloc_node yet so do it by hand. We know that this is the first 7709 * slab on the node for this slabcache. There are no concurrent accesses 7710 * possible. 7711 * 7712 * Note that this function only works on the kmem_cache_node 7713 * when allocating for the kmem_cache_node. This is used for bootstrapping 7714 * memory on a fresh node that has no slab structures yet. 7715 */ 7716 static void early_kmem_cache_node_alloc(int node) 7717 { 7718 struct slab *slab; 7719 struct kmem_cache_node *n; 7720 7721 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 7722 7723 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); 7724 7725 BUG_ON(!slab); 7726 if (slab_nid(slab) != node) { 7727 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 7728 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 7729 } 7730 7731 n = slab->freelist; 7732 BUG_ON(!n); 7733 #ifdef CONFIG_SLUB_DEBUG 7734 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 7735 #endif 7736 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 7737 slab->freelist = get_freepointer(kmem_cache_node, n); 7738 slab->inuse = 1; 7739 kmem_cache_node->node[node] = n; 7740 init_kmem_cache_node(n, NULL); 7741 inc_slabs_node(kmem_cache_node, node, slab->objects); 7742 7743 /* 7744 * No locks need to be taken here as it has just been 7745 * initialized and there is no concurrent access. 7746 */ 7747 __add_partial(n, slab, DEACTIVATE_TO_HEAD); 7748 } 7749 7750 static void free_kmem_cache_nodes(struct kmem_cache *s) 7751 { 7752 int node; 7753 struct kmem_cache_node *n; 7754 7755 for_each_kmem_cache_node(s, node, n) { 7756 if (n->barn) { 7757 WARN_ON(n->barn->nr_full); 7758 WARN_ON(n->barn->nr_empty); 7759 kfree(n->barn); 7760 n->barn = NULL; 7761 } 7762 7763 s->node[node] = NULL; 7764 kmem_cache_free(kmem_cache_node, n); 7765 } 7766 } 7767 7768 void __kmem_cache_release(struct kmem_cache *s) 7769 { 7770 cache_random_seq_destroy(s); 7771 if (s->cpu_sheaves) 7772 pcs_destroy(s); 7773 #ifdef CONFIG_PREEMPT_RT 7774 if (s->cpu_slab) 7775 lockdep_unregister_key(&s->lock_key); 7776 #endif 7777 free_percpu(s->cpu_slab); 7778 free_kmem_cache_nodes(s); 7779 } 7780 7781 static int init_kmem_cache_nodes(struct kmem_cache *s) 7782 { 7783 int node; 7784 7785 for_each_node_mask(node, slab_nodes) { 7786 struct kmem_cache_node *n; 7787 struct node_barn *barn = NULL; 7788 7789 if (slab_state == DOWN) { 7790 early_kmem_cache_node_alloc(node); 7791 continue; 7792 } 7793 7794 if (s->cpu_sheaves) { 7795 barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node); 7796 7797 if (!barn) 7798 return 0; 7799 } 7800 7801 n = kmem_cache_alloc_node(kmem_cache_node, 7802 GFP_KERNEL, node); 7803 if (!n) { 7804 kfree(barn); 7805 return 0; 7806 } 7807 7808 init_kmem_cache_node(n, barn); 7809 7810 s->node[node] = n; 7811 } 7812 return 1; 7813 } 7814 7815 static void set_cpu_partial(struct kmem_cache *s) 7816 { 7817 #ifdef CONFIG_SLUB_CPU_PARTIAL 7818 unsigned int nr_objects; 7819 7820 /* 7821 * cpu_partial determined the maximum number of objects kept in the 7822 * per cpu partial lists of a processor. 7823 * 7824 * Per cpu partial lists mainly contain slabs that just have one 7825 * object freed. If they are used for allocation then they can be 7826 * filled up again with minimal effort. The slab will never hit the 7827 * per node partial lists and therefore no locking will be required. 7828 * 7829 * For backwards compatibility reasons, this is determined as number 7830 * of objects, even though we now limit maximum number of pages, see 7831 * slub_set_cpu_partial() 7832 */ 7833 if (!kmem_cache_has_cpu_partial(s)) 7834 nr_objects = 0; 7835 else if (s->size >= PAGE_SIZE) 7836 nr_objects = 6; 7837 else if (s->size >= 1024) 7838 nr_objects = 24; 7839 else if (s->size >= 256) 7840 nr_objects = 52; 7841 else 7842 nr_objects = 120; 7843 7844 slub_set_cpu_partial(s, nr_objects); 7845 #endif 7846 } 7847 7848 /* 7849 * calculate_sizes() determines the order and the distribution of data within 7850 * a slab object. 7851 */ 7852 static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) 7853 { 7854 slab_flags_t flags = s->flags; 7855 unsigned int size = s->object_size; 7856 unsigned int order; 7857 7858 /* 7859 * Round up object size to the next word boundary. We can only 7860 * place the free pointer at word boundaries and this determines 7861 * the possible location of the free pointer. 7862 */ 7863 size = ALIGN(size, sizeof(void *)); 7864 7865 #ifdef CONFIG_SLUB_DEBUG 7866 /* 7867 * Determine if we can poison the object itself. If the user of 7868 * the slab may touch the object after free or before allocation 7869 * then we should never poison the object itself. 7870 */ 7871 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 7872 !s->ctor) 7873 s->flags |= __OBJECT_POISON; 7874 else 7875 s->flags &= ~__OBJECT_POISON; 7876 7877 7878 /* 7879 * If we are Redzoning then check if there is some space between the 7880 * end of the object and the free pointer. If not then add an 7881 * additional word to have some bytes to store Redzone information. 7882 */ 7883 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 7884 size += sizeof(void *); 7885 #endif 7886 7887 /* 7888 * With that we have determined the number of bytes in actual use 7889 * by the object and redzoning. 7890 */ 7891 s->inuse = size; 7892 7893 if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) || 7894 (flags & SLAB_POISON) || s->ctor || 7895 ((flags & SLAB_RED_ZONE) && 7896 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { 7897 /* 7898 * Relocate free pointer after the object if it is not 7899 * permitted to overwrite the first word of the object on 7900 * kmem_cache_free. 7901 * 7902 * This is the case if we do RCU, have a constructor or 7903 * destructor, are poisoning the objects, or are 7904 * redzoning an object smaller than sizeof(void *) or are 7905 * redzoning an object with slub_debug_orig_size() enabled, 7906 * in which case the right redzone may be extended. 7907 * 7908 * The assumption that s->offset >= s->inuse means free 7909 * pointer is outside of the object is used in the 7910 * freeptr_outside_object() function. If that is no 7911 * longer true, the function needs to be modified. 7912 */ 7913 s->offset = size; 7914 size += sizeof(void *); 7915 } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) { 7916 s->offset = args->freeptr_offset; 7917 } else { 7918 /* 7919 * Store freelist pointer near middle of object to keep 7920 * it away from the edges of the object to avoid small 7921 * sized over/underflows from neighboring allocations. 7922 */ 7923 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 7924 } 7925 7926 #ifdef CONFIG_SLUB_DEBUG 7927 if (flags & SLAB_STORE_USER) { 7928 /* 7929 * Need to store information about allocs and frees after 7930 * the object. 7931 */ 7932 size += 2 * sizeof(struct track); 7933 7934 /* Save the original kmalloc request size */ 7935 if (flags & SLAB_KMALLOC) 7936 size += sizeof(unsigned int); 7937 } 7938 #endif 7939 7940 kasan_cache_create(s, &size, &s->flags); 7941 #ifdef CONFIG_SLUB_DEBUG 7942 if (flags & SLAB_RED_ZONE) { 7943 /* 7944 * Add some empty padding so that we can catch 7945 * overwrites from earlier objects rather than let 7946 * tracking information or the free pointer be 7947 * corrupted if a user writes before the start 7948 * of the object. 7949 */ 7950 size += sizeof(void *); 7951 7952 s->red_left_pad = sizeof(void *); 7953 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 7954 size += s->red_left_pad; 7955 } 7956 #endif 7957 7958 /* 7959 * SLUB stores one object immediately after another beginning from 7960 * offset 0. In order to align the objects we have to simply size 7961 * each object to conform to the alignment. 7962 */ 7963 size = ALIGN(size, s->align); 7964 s->size = size; 7965 s->reciprocal_size = reciprocal_value(size); 7966 order = calculate_order(size); 7967 7968 if ((int)order < 0) 7969 return 0; 7970 7971 s->allocflags = __GFP_COMP; 7972 7973 if (s->flags & SLAB_CACHE_DMA) 7974 s->allocflags |= GFP_DMA; 7975 7976 if (s->flags & SLAB_CACHE_DMA32) 7977 s->allocflags |= GFP_DMA32; 7978 7979 if (s->flags & SLAB_RECLAIM_ACCOUNT) 7980 s->allocflags |= __GFP_RECLAIMABLE; 7981 7982 /* 7983 * Determine the number of objects per slab 7984 */ 7985 s->oo = oo_make(order, size); 7986 s->min = oo_make(get_order(size), size); 7987 7988 return !!oo_objects(s->oo); 7989 } 7990 7991 static void list_slab_objects(struct kmem_cache *s, struct slab *slab) 7992 { 7993 #ifdef CONFIG_SLUB_DEBUG 7994 void *addr = slab_address(slab); 7995 void *p; 7996 7997 if (!slab_add_kunit_errors()) 7998 slab_bug(s, "Objects remaining on __kmem_cache_shutdown()"); 7999 8000 spin_lock(&object_map_lock); 8001 __fill_map(object_map, s, slab); 8002 8003 for_each_object(p, s, addr, slab->objects) { 8004 8005 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { 8006 if (slab_add_kunit_errors()) 8007 continue; 8008 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 8009 print_tracking(s, p); 8010 } 8011 } 8012 spin_unlock(&object_map_lock); 8013 8014 __slab_err(slab); 8015 #endif 8016 } 8017 8018 /* 8019 * Attempt to free all partial slabs on a node. 8020 * This is called from __kmem_cache_shutdown(). We must take list_lock 8021 * because sysfs file might still access partial list after the shutdowning. 8022 */ 8023 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 8024 { 8025 LIST_HEAD(discard); 8026 struct slab *slab, *h; 8027 8028 BUG_ON(irqs_disabled()); 8029 spin_lock_irq(&n->list_lock); 8030 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { 8031 if (!slab->inuse) { 8032 remove_partial(n, slab); 8033 list_add(&slab->slab_list, &discard); 8034 } else { 8035 list_slab_objects(s, slab); 8036 } 8037 } 8038 spin_unlock_irq(&n->list_lock); 8039 8040 list_for_each_entry_safe(slab, h, &discard, slab_list) 8041 discard_slab(s, slab); 8042 } 8043 8044 bool __kmem_cache_empty(struct kmem_cache *s) 8045 { 8046 int node; 8047 struct kmem_cache_node *n; 8048 8049 for_each_kmem_cache_node(s, node, n) 8050 if (n->nr_partial || node_nr_slabs(n)) 8051 return false; 8052 return true; 8053 } 8054 8055 /* 8056 * Release all resources used by a slab cache. 8057 */ 8058 int __kmem_cache_shutdown(struct kmem_cache *s) 8059 { 8060 int node; 8061 struct kmem_cache_node *n; 8062 8063 flush_all_cpus_locked(s); 8064 8065 /* we might have rcu sheaves in flight */ 8066 if (s->cpu_sheaves) 8067 rcu_barrier(); 8068 8069 /* Attempt to free all objects */ 8070 for_each_kmem_cache_node(s, node, n) { 8071 if (n->barn) 8072 barn_shrink(s, n->barn); 8073 free_partial(s, n); 8074 if (n->nr_partial || node_nr_slabs(n)) 8075 return 1; 8076 } 8077 return 0; 8078 } 8079 8080 #ifdef CONFIG_PRINTK 8081 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 8082 { 8083 void *base; 8084 int __maybe_unused i; 8085 unsigned int objnr; 8086 void *objp; 8087 void *objp0; 8088 struct kmem_cache *s = slab->slab_cache; 8089 struct track __maybe_unused *trackp; 8090 8091 kpp->kp_ptr = object; 8092 kpp->kp_slab = slab; 8093 kpp->kp_slab_cache = s; 8094 base = slab_address(slab); 8095 objp0 = kasan_reset_tag(object); 8096 #ifdef CONFIG_SLUB_DEBUG 8097 objp = restore_red_left(s, objp0); 8098 #else 8099 objp = objp0; 8100 #endif 8101 objnr = obj_to_index(s, slab, objp); 8102 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 8103 objp = base + s->size * objnr; 8104 kpp->kp_objp = objp; 8105 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size 8106 || (objp - base) % s->size) || 8107 !(s->flags & SLAB_STORE_USER)) 8108 return; 8109 #ifdef CONFIG_SLUB_DEBUG 8110 objp = fixup_red_left(s, objp); 8111 trackp = get_track(s, objp, TRACK_ALLOC); 8112 kpp->kp_ret = (void *)trackp->addr; 8113 #ifdef CONFIG_STACKDEPOT 8114 { 8115 depot_stack_handle_t handle; 8116 unsigned long *entries; 8117 unsigned int nr_entries; 8118 8119 handle = READ_ONCE(trackp->handle); 8120 if (handle) { 8121 nr_entries = stack_depot_fetch(handle, &entries); 8122 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 8123 kpp->kp_stack[i] = (void *)entries[i]; 8124 } 8125 8126 trackp = get_track(s, objp, TRACK_FREE); 8127 handle = READ_ONCE(trackp->handle); 8128 if (handle) { 8129 nr_entries = stack_depot_fetch(handle, &entries); 8130 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 8131 kpp->kp_free_stack[i] = (void *)entries[i]; 8132 } 8133 } 8134 #endif 8135 #endif 8136 } 8137 #endif 8138 8139 /******************************************************************** 8140 * Kmalloc subsystem 8141 *******************************************************************/ 8142 8143 static int __init setup_slub_min_order(const char *str, const struct kernel_param *kp) 8144 { 8145 int ret; 8146 8147 ret = kstrtouint(str, 0, &slub_min_order); 8148 if (ret) 8149 return ret; 8150 8151 if (slub_min_order > slub_max_order) 8152 slub_max_order = slub_min_order; 8153 8154 return 0; 8155 } 8156 8157 static const struct kernel_param_ops param_ops_slab_min_order __initconst = { 8158 .set = setup_slub_min_order, 8159 }; 8160 __core_param_cb(slab_min_order, ¶m_ops_slab_min_order, &slub_min_order, 0); 8161 __core_param_cb(slub_min_order, ¶m_ops_slab_min_order, &slub_min_order, 0); 8162 8163 static int __init setup_slub_max_order(const char *str, const struct kernel_param *kp) 8164 { 8165 int ret; 8166 8167 ret = kstrtouint(str, 0, &slub_max_order); 8168 if (ret) 8169 return ret; 8170 8171 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER); 8172 8173 if (slub_min_order > slub_max_order) 8174 slub_min_order = slub_max_order; 8175 8176 return 0; 8177 } 8178 8179 static const struct kernel_param_ops param_ops_slab_max_order __initconst = { 8180 .set = setup_slub_max_order, 8181 }; 8182 __core_param_cb(slab_max_order, ¶m_ops_slab_max_order, &slub_max_order, 0); 8183 __core_param_cb(slub_max_order, ¶m_ops_slab_max_order, &slub_max_order, 0); 8184 8185 core_param(slab_min_objects, slub_min_objects, uint, 0); 8186 core_param(slub_min_objects, slub_min_objects, uint, 0); 8187 8188 #ifdef CONFIG_NUMA 8189 static int __init setup_slab_strict_numa(const char *str, const struct kernel_param *kp) 8190 { 8191 if (nr_node_ids > 1) { 8192 static_branch_enable(&strict_numa); 8193 pr_info("SLUB: Strict NUMA enabled.\n"); 8194 } else { 8195 pr_warn("slab_strict_numa parameter set on non NUMA system.\n"); 8196 } 8197 8198 return 0; 8199 } 8200 8201 static const struct kernel_param_ops param_ops_slab_strict_numa __initconst = { 8202 .flags = KERNEL_PARAM_OPS_FL_NOARG, 8203 .set = setup_slab_strict_numa, 8204 }; 8205 __core_param_cb(slab_strict_numa, ¶m_ops_slab_strict_numa, NULL, 0); 8206 #endif 8207 8208 8209 #ifdef CONFIG_HARDENED_USERCOPY 8210 /* 8211 * Rejects incorrectly sized objects and objects that are to be copied 8212 * to/from userspace but do not fall entirely within the containing slab 8213 * cache's usercopy region. 8214 * 8215 * Returns NULL if check passes, otherwise const char * to name of cache 8216 * to indicate an error. 8217 */ 8218 void __check_heap_object(const void *ptr, unsigned long n, 8219 const struct slab *slab, bool to_user) 8220 { 8221 struct kmem_cache *s; 8222 unsigned int offset; 8223 bool is_kfence = is_kfence_address(ptr); 8224 8225 ptr = kasan_reset_tag(ptr); 8226 8227 /* Find object and usable object size. */ 8228 s = slab->slab_cache; 8229 8230 /* Reject impossible pointers. */ 8231 if (ptr < slab_address(slab)) 8232 usercopy_abort("SLUB object not in SLUB page?!", NULL, 8233 to_user, 0, n); 8234 8235 /* Find offset within object. */ 8236 if (is_kfence) 8237 offset = ptr - kfence_object_start(ptr); 8238 else 8239 offset = (ptr - slab_address(slab)) % s->size; 8240 8241 /* Adjust for redzone and reject if within the redzone. */ 8242 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 8243 if (offset < s->red_left_pad) 8244 usercopy_abort("SLUB object in left red zone", 8245 s->name, to_user, offset, n); 8246 offset -= s->red_left_pad; 8247 } 8248 8249 /* Allow address range falling entirely within usercopy region. */ 8250 if (offset >= s->useroffset && 8251 offset - s->useroffset <= s->usersize && 8252 n <= s->useroffset - offset + s->usersize) 8253 return; 8254 8255 usercopy_abort("SLUB object", s->name, to_user, offset, n); 8256 } 8257 #endif /* CONFIG_HARDENED_USERCOPY */ 8258 8259 #define SHRINK_PROMOTE_MAX 32 8260 8261 /* 8262 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 8263 * up most to the head of the partial lists. New allocations will then 8264 * fill those up and thus they can be removed from the partial lists. 8265 * 8266 * The slabs with the least items are placed last. This results in them 8267 * being allocated from last increasing the chance that the last objects 8268 * are freed in them. 8269 */ 8270 static int __kmem_cache_do_shrink(struct kmem_cache *s) 8271 { 8272 int node; 8273 int i; 8274 struct kmem_cache_node *n; 8275 struct slab *slab; 8276 struct slab *t; 8277 struct list_head discard; 8278 struct list_head promote[SHRINK_PROMOTE_MAX]; 8279 unsigned long flags; 8280 int ret = 0; 8281 8282 for_each_kmem_cache_node(s, node, n) { 8283 INIT_LIST_HEAD(&discard); 8284 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 8285 INIT_LIST_HEAD(promote + i); 8286 8287 if (n->barn) 8288 barn_shrink(s, n->barn); 8289 8290 spin_lock_irqsave(&n->list_lock, flags); 8291 8292 /* 8293 * Build lists of slabs to discard or promote. 8294 * 8295 * Note that concurrent frees may occur while we hold the 8296 * list_lock. slab->inuse here is the upper limit. 8297 */ 8298 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { 8299 int free = slab->objects - slab->inuse; 8300 8301 /* Do not reread slab->inuse */ 8302 barrier(); 8303 8304 /* We do not keep full slabs on the list */ 8305 BUG_ON(free <= 0); 8306 8307 if (free == slab->objects) { 8308 list_move(&slab->slab_list, &discard); 8309 slab_clear_node_partial(slab); 8310 n->nr_partial--; 8311 dec_slabs_node(s, node, slab->objects); 8312 } else if (free <= SHRINK_PROMOTE_MAX) 8313 list_move(&slab->slab_list, promote + free - 1); 8314 } 8315 8316 /* 8317 * Promote the slabs filled up most to the head of the 8318 * partial list. 8319 */ 8320 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 8321 list_splice(promote + i, &n->partial); 8322 8323 spin_unlock_irqrestore(&n->list_lock, flags); 8324 8325 /* Release empty slabs */ 8326 list_for_each_entry_safe(slab, t, &discard, slab_list) 8327 free_slab(s, slab); 8328 8329 if (node_nr_slabs(n)) 8330 ret = 1; 8331 } 8332 8333 return ret; 8334 } 8335 8336 int __kmem_cache_shrink(struct kmem_cache *s) 8337 { 8338 flush_all(s); 8339 return __kmem_cache_do_shrink(s); 8340 } 8341 8342 static int slab_mem_going_offline_callback(void) 8343 { 8344 struct kmem_cache *s; 8345 8346 mutex_lock(&slab_mutex); 8347 list_for_each_entry(s, &slab_caches, list) { 8348 flush_all_cpus_locked(s); 8349 __kmem_cache_do_shrink(s); 8350 } 8351 mutex_unlock(&slab_mutex); 8352 8353 return 0; 8354 } 8355 8356 static int slab_mem_going_online_callback(int nid) 8357 { 8358 struct kmem_cache_node *n; 8359 struct kmem_cache *s; 8360 int ret = 0; 8361 8362 /* 8363 * We are bringing a node online. No memory is available yet. We must 8364 * allocate a kmem_cache_node structure in order to bring the node 8365 * online. 8366 */ 8367 mutex_lock(&slab_mutex); 8368 list_for_each_entry(s, &slab_caches, list) { 8369 struct node_barn *barn = NULL; 8370 8371 /* 8372 * The structure may already exist if the node was previously 8373 * onlined and offlined. 8374 */ 8375 if (get_node(s, nid)) 8376 continue; 8377 8378 if (s->cpu_sheaves) { 8379 barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, nid); 8380 8381 if (!barn) { 8382 ret = -ENOMEM; 8383 goto out; 8384 } 8385 } 8386 8387 /* 8388 * XXX: kmem_cache_alloc_node will fallback to other nodes 8389 * since memory is not yet available from the node that 8390 * is brought up. 8391 */ 8392 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 8393 if (!n) { 8394 kfree(barn); 8395 ret = -ENOMEM; 8396 goto out; 8397 } 8398 8399 init_kmem_cache_node(n, barn); 8400 8401 s->node[nid] = n; 8402 } 8403 /* 8404 * Any cache created after this point will also have kmem_cache_node 8405 * initialized for the new node. 8406 */ 8407 node_set(nid, slab_nodes); 8408 out: 8409 mutex_unlock(&slab_mutex); 8410 return ret; 8411 } 8412 8413 static int slab_memory_callback(struct notifier_block *self, 8414 unsigned long action, void *arg) 8415 { 8416 struct node_notify *nn = arg; 8417 int nid = nn->nid; 8418 int ret = 0; 8419 8420 switch (action) { 8421 case NODE_ADDING_FIRST_MEMORY: 8422 ret = slab_mem_going_online_callback(nid); 8423 break; 8424 case NODE_REMOVING_LAST_MEMORY: 8425 ret = slab_mem_going_offline_callback(); 8426 break; 8427 } 8428 if (ret) 8429 ret = notifier_from_errno(ret); 8430 else 8431 ret = NOTIFY_OK; 8432 return ret; 8433 } 8434 8435 /******************************************************************** 8436 * Basic setup of slabs 8437 *******************************************************************/ 8438 8439 /* 8440 * Used for early kmem_cache structures that were allocated using 8441 * the page allocator. Allocate them properly then fix up the pointers 8442 * that may be pointing to the wrong kmem_cache structure. 8443 */ 8444 8445 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 8446 { 8447 int node; 8448 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 8449 struct kmem_cache_node *n; 8450 8451 memcpy(s, static_cache, kmem_cache->object_size); 8452 8453 /* 8454 * This runs very early, and only the boot processor is supposed to be 8455 * up. Even if it weren't true, IRQs are not up so we couldn't fire 8456 * IPIs around. 8457 */ 8458 __flush_cpu_slab(s, smp_processor_id()); 8459 for_each_kmem_cache_node(s, node, n) { 8460 struct slab *p; 8461 8462 list_for_each_entry(p, &n->partial, slab_list) 8463 p->slab_cache = s; 8464 8465 #ifdef CONFIG_SLUB_DEBUG 8466 list_for_each_entry(p, &n->full, slab_list) 8467 p->slab_cache = s; 8468 #endif 8469 } 8470 list_add(&s->list, &slab_caches); 8471 return s; 8472 } 8473 8474 void __init kmem_cache_init(void) 8475 { 8476 static __initdata struct kmem_cache boot_kmem_cache, 8477 boot_kmem_cache_node; 8478 int node; 8479 8480 if (debug_guardpage_minorder()) 8481 slub_max_order = 0; 8482 8483 /* Inform pointer hashing choice about slub debugging state. */ 8484 hash_pointers_finalize(__slub_debug_enabled()); 8485 8486 kmem_cache_node = &boot_kmem_cache_node; 8487 kmem_cache = &boot_kmem_cache; 8488 8489 /* 8490 * Initialize the nodemask for which we will allocate per node 8491 * structures. Here we don't need taking slab_mutex yet. 8492 */ 8493 for_each_node_state(node, N_MEMORY) 8494 node_set(node, slab_nodes); 8495 8496 create_boot_cache(kmem_cache_node, "kmem_cache_node", 8497 sizeof(struct kmem_cache_node), 8498 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 8499 8500 hotplug_node_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 8501 8502 /* Able to allocate the per node structures */ 8503 slab_state = PARTIAL; 8504 8505 create_boot_cache(kmem_cache, "kmem_cache", 8506 offsetof(struct kmem_cache, node) + 8507 nr_node_ids * sizeof(struct kmem_cache_node *), 8508 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 8509 8510 kmem_cache = bootstrap(&boot_kmem_cache); 8511 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 8512 8513 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 8514 setup_kmalloc_cache_index_table(); 8515 create_kmalloc_caches(); 8516 8517 /* Setup random freelists for each cache */ 8518 init_freelist_randomization(); 8519 8520 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 8521 slub_cpu_dead); 8522 8523 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 8524 cache_line_size(), 8525 slub_min_order, slub_max_order, slub_min_objects, 8526 nr_cpu_ids, nr_node_ids); 8527 } 8528 8529 void __init kmem_cache_init_late(void) 8530 { 8531 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); 8532 WARN_ON(!flushwq); 8533 } 8534 8535 struct kmem_cache * 8536 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 8537 slab_flags_t flags, void (*ctor)(void *)) 8538 { 8539 struct kmem_cache *s; 8540 8541 s = find_mergeable(size, align, flags, name, ctor); 8542 if (s) { 8543 if (sysfs_slab_alias(s, name)) 8544 pr_err("SLUB: Unable to add cache alias %s to sysfs\n", 8545 name); 8546 8547 s->refcount++; 8548 8549 /* 8550 * Adjust the object sizes so that we clear 8551 * the complete object on kzalloc. 8552 */ 8553 s->object_size = max(s->object_size, size); 8554 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 8555 } 8556 8557 return s; 8558 } 8559 8560 int do_kmem_cache_create(struct kmem_cache *s, const char *name, 8561 unsigned int size, struct kmem_cache_args *args, 8562 slab_flags_t flags) 8563 { 8564 int err = -EINVAL; 8565 8566 s->name = name; 8567 s->size = s->object_size = size; 8568 8569 s->flags = kmem_cache_flags(flags, s->name); 8570 #ifdef CONFIG_SLAB_FREELIST_HARDENED 8571 s->random = get_random_long(); 8572 #endif 8573 s->align = args->align; 8574 s->ctor = args->ctor; 8575 #ifdef CONFIG_HARDENED_USERCOPY 8576 s->useroffset = args->useroffset; 8577 s->usersize = args->usersize; 8578 #endif 8579 8580 if (!calculate_sizes(args, s)) 8581 goto out; 8582 if (disable_higher_order_debug) { 8583 /* 8584 * Disable debugging flags that store metadata if the min slab 8585 * order increased. 8586 */ 8587 if (get_order(s->size) > get_order(s->object_size)) { 8588 s->flags &= ~DEBUG_METADATA_FLAGS; 8589 s->offset = 0; 8590 if (!calculate_sizes(args, s)) 8591 goto out; 8592 } 8593 } 8594 8595 #ifdef system_has_freelist_aba 8596 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { 8597 /* Enable fast mode */ 8598 s->flags |= __CMPXCHG_DOUBLE; 8599 } 8600 #endif 8601 8602 /* 8603 * The larger the object size is, the more slabs we want on the partial 8604 * list to avoid pounding the page allocator excessively. 8605 */ 8606 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); 8607 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); 8608 8609 set_cpu_partial(s); 8610 8611 if (args->sheaf_capacity && !IS_ENABLED(CONFIG_SLUB_TINY) 8612 && !(s->flags & SLAB_DEBUG_FLAGS)) { 8613 s->cpu_sheaves = alloc_percpu(struct slub_percpu_sheaves); 8614 if (!s->cpu_sheaves) { 8615 err = -ENOMEM; 8616 goto out; 8617 } 8618 // TODO: increase capacity to grow slab_sheaf up to next kmalloc size? 8619 s->sheaf_capacity = args->sheaf_capacity; 8620 } 8621 8622 #ifdef CONFIG_NUMA 8623 s->remote_node_defrag_ratio = 1000; 8624 #endif 8625 8626 /* Initialize the pre-computed randomized freelist if slab is up */ 8627 if (slab_state >= UP) { 8628 if (init_cache_random_seq(s)) 8629 goto out; 8630 } 8631 8632 if (!init_kmem_cache_nodes(s)) 8633 goto out; 8634 8635 if (!alloc_kmem_cache_cpus(s)) 8636 goto out; 8637 8638 if (s->cpu_sheaves) { 8639 err = init_percpu_sheaves(s); 8640 if (err) 8641 goto out; 8642 } 8643 8644 err = 0; 8645 8646 /* Mutex is not taken during early boot */ 8647 if (slab_state <= UP) 8648 goto out; 8649 8650 /* 8651 * Failing to create sysfs files is not critical to SLUB functionality. 8652 * If it fails, proceed with cache creation without these files. 8653 */ 8654 if (sysfs_slab_add(s)) 8655 pr_err("SLUB: Unable to add cache %s to sysfs\n", s->name); 8656 8657 if (s->flags & SLAB_STORE_USER) 8658 debugfs_slab_add(s); 8659 8660 out: 8661 if (err) 8662 __kmem_cache_release(s); 8663 return err; 8664 } 8665 8666 #ifdef SLAB_SUPPORTS_SYSFS 8667 static int count_inuse(struct slab *slab) 8668 { 8669 return slab->inuse; 8670 } 8671 8672 static int count_total(struct slab *slab) 8673 { 8674 return slab->objects; 8675 } 8676 #endif 8677 8678 #ifdef CONFIG_SLUB_DEBUG 8679 static void validate_slab(struct kmem_cache *s, struct slab *slab, 8680 unsigned long *obj_map) 8681 { 8682 void *p; 8683 void *addr = slab_address(slab); 8684 8685 if (!validate_slab_ptr(slab)) { 8686 slab_err(s, slab, "Not a valid slab page"); 8687 return; 8688 } 8689 8690 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) 8691 return; 8692 8693 /* Now we know that a valid freelist exists */ 8694 __fill_map(obj_map, s, slab); 8695 for_each_object(p, s, addr, slab->objects) { 8696 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 8697 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 8698 8699 if (!check_object(s, slab, p, val)) 8700 break; 8701 } 8702 } 8703 8704 static int validate_slab_node(struct kmem_cache *s, 8705 struct kmem_cache_node *n, unsigned long *obj_map) 8706 { 8707 unsigned long count = 0; 8708 struct slab *slab; 8709 unsigned long flags; 8710 8711 spin_lock_irqsave(&n->list_lock, flags); 8712 8713 list_for_each_entry(slab, &n->partial, slab_list) { 8714 validate_slab(s, slab, obj_map); 8715 count++; 8716 } 8717 if (count != n->nr_partial) { 8718 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 8719 s->name, count, n->nr_partial); 8720 slab_add_kunit_errors(); 8721 } 8722 8723 if (!(s->flags & SLAB_STORE_USER)) 8724 goto out; 8725 8726 list_for_each_entry(slab, &n->full, slab_list) { 8727 validate_slab(s, slab, obj_map); 8728 count++; 8729 } 8730 if (count != node_nr_slabs(n)) { 8731 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 8732 s->name, count, node_nr_slabs(n)); 8733 slab_add_kunit_errors(); 8734 } 8735 8736 out: 8737 spin_unlock_irqrestore(&n->list_lock, flags); 8738 return count; 8739 } 8740 8741 long validate_slab_cache(struct kmem_cache *s) 8742 { 8743 int node; 8744 unsigned long count = 0; 8745 struct kmem_cache_node *n; 8746 unsigned long *obj_map; 8747 8748 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 8749 if (!obj_map) 8750 return -ENOMEM; 8751 8752 flush_all(s); 8753 for_each_kmem_cache_node(s, node, n) 8754 count += validate_slab_node(s, n, obj_map); 8755 8756 bitmap_free(obj_map); 8757 8758 return count; 8759 } 8760 EXPORT_SYMBOL(validate_slab_cache); 8761 8762 #ifdef CONFIG_DEBUG_FS 8763 /* 8764 * Generate lists of code addresses where slabcache objects are allocated 8765 * and freed. 8766 */ 8767 8768 struct location { 8769 depot_stack_handle_t handle; 8770 unsigned long count; 8771 unsigned long addr; 8772 unsigned long waste; 8773 long long sum_time; 8774 long min_time; 8775 long max_time; 8776 long min_pid; 8777 long max_pid; 8778 DECLARE_BITMAP(cpus, NR_CPUS); 8779 nodemask_t nodes; 8780 }; 8781 8782 struct loc_track { 8783 unsigned long max; 8784 unsigned long count; 8785 struct location *loc; 8786 loff_t idx; 8787 }; 8788 8789 static struct dentry *slab_debugfs_root; 8790 8791 static void free_loc_track(struct loc_track *t) 8792 { 8793 if (t->max) 8794 free_pages((unsigned long)t->loc, 8795 get_order(sizeof(struct location) * t->max)); 8796 } 8797 8798 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 8799 { 8800 struct location *l; 8801 int order; 8802 8803 order = get_order(sizeof(struct location) * max); 8804 8805 l = (void *)__get_free_pages(flags, order); 8806 if (!l) 8807 return 0; 8808 8809 if (t->count) { 8810 memcpy(l, t->loc, sizeof(struct location) * t->count); 8811 free_loc_track(t); 8812 } 8813 t->max = max; 8814 t->loc = l; 8815 return 1; 8816 } 8817 8818 static int add_location(struct loc_track *t, struct kmem_cache *s, 8819 const struct track *track, 8820 unsigned int orig_size) 8821 { 8822 long start, end, pos; 8823 struct location *l; 8824 unsigned long caddr, chandle, cwaste; 8825 unsigned long age = jiffies - track->when; 8826 depot_stack_handle_t handle = 0; 8827 unsigned int waste = s->object_size - orig_size; 8828 8829 #ifdef CONFIG_STACKDEPOT 8830 handle = READ_ONCE(track->handle); 8831 #endif 8832 start = -1; 8833 end = t->count; 8834 8835 for ( ; ; ) { 8836 pos = start + (end - start + 1) / 2; 8837 8838 /* 8839 * There is nothing at "end". If we end up there 8840 * we need to add something to before end. 8841 */ 8842 if (pos == end) 8843 break; 8844 8845 l = &t->loc[pos]; 8846 caddr = l->addr; 8847 chandle = l->handle; 8848 cwaste = l->waste; 8849 if ((track->addr == caddr) && (handle == chandle) && 8850 (waste == cwaste)) { 8851 8852 l->count++; 8853 if (track->when) { 8854 l->sum_time += age; 8855 if (age < l->min_time) 8856 l->min_time = age; 8857 if (age > l->max_time) 8858 l->max_time = age; 8859 8860 if (track->pid < l->min_pid) 8861 l->min_pid = track->pid; 8862 if (track->pid > l->max_pid) 8863 l->max_pid = track->pid; 8864 8865 cpumask_set_cpu(track->cpu, 8866 to_cpumask(l->cpus)); 8867 } 8868 node_set(page_to_nid(virt_to_page(track)), l->nodes); 8869 return 1; 8870 } 8871 8872 if (track->addr < caddr) 8873 end = pos; 8874 else if (track->addr == caddr && handle < chandle) 8875 end = pos; 8876 else if (track->addr == caddr && handle == chandle && 8877 waste < cwaste) 8878 end = pos; 8879 else 8880 start = pos; 8881 } 8882 8883 /* 8884 * Not found. Insert new tracking element. 8885 */ 8886 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 8887 return 0; 8888 8889 l = t->loc + pos; 8890 if (pos < t->count) 8891 memmove(l + 1, l, 8892 (t->count - pos) * sizeof(struct location)); 8893 t->count++; 8894 l->count = 1; 8895 l->addr = track->addr; 8896 l->sum_time = age; 8897 l->min_time = age; 8898 l->max_time = age; 8899 l->min_pid = track->pid; 8900 l->max_pid = track->pid; 8901 l->handle = handle; 8902 l->waste = waste; 8903 cpumask_clear(to_cpumask(l->cpus)); 8904 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 8905 nodes_clear(l->nodes); 8906 node_set(page_to_nid(virt_to_page(track)), l->nodes); 8907 return 1; 8908 } 8909 8910 static void process_slab(struct loc_track *t, struct kmem_cache *s, 8911 struct slab *slab, enum track_item alloc, 8912 unsigned long *obj_map) 8913 { 8914 void *addr = slab_address(slab); 8915 bool is_alloc = (alloc == TRACK_ALLOC); 8916 void *p; 8917 8918 __fill_map(obj_map, s, slab); 8919 8920 for_each_object(p, s, addr, slab->objects) 8921 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 8922 add_location(t, s, get_track(s, p, alloc), 8923 is_alloc ? get_orig_size(s, p) : 8924 s->object_size); 8925 } 8926 #endif /* CONFIG_DEBUG_FS */ 8927 #endif /* CONFIG_SLUB_DEBUG */ 8928 8929 #ifdef SLAB_SUPPORTS_SYSFS 8930 enum slab_stat_type { 8931 SL_ALL, /* All slabs */ 8932 SL_PARTIAL, /* Only partially allocated slabs */ 8933 SL_CPU, /* Only slabs used for cpu caches */ 8934 SL_OBJECTS, /* Determine allocated objects not slabs */ 8935 SL_TOTAL /* Determine object capacity not slabs */ 8936 }; 8937 8938 #define SO_ALL (1 << SL_ALL) 8939 #define SO_PARTIAL (1 << SL_PARTIAL) 8940 #define SO_CPU (1 << SL_CPU) 8941 #define SO_OBJECTS (1 << SL_OBJECTS) 8942 #define SO_TOTAL (1 << SL_TOTAL) 8943 8944 static ssize_t show_slab_objects(struct kmem_cache *s, 8945 char *buf, unsigned long flags) 8946 { 8947 unsigned long total = 0; 8948 int node; 8949 int x; 8950 unsigned long *nodes; 8951 int len = 0; 8952 8953 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 8954 if (!nodes) 8955 return -ENOMEM; 8956 8957 if (flags & SO_CPU) { 8958 int cpu; 8959 8960 for_each_possible_cpu(cpu) { 8961 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 8962 cpu); 8963 int node; 8964 struct slab *slab; 8965 8966 slab = READ_ONCE(c->slab); 8967 if (!slab) 8968 continue; 8969 8970 node = slab_nid(slab); 8971 if (flags & SO_TOTAL) 8972 x = slab->objects; 8973 else if (flags & SO_OBJECTS) 8974 x = slab->inuse; 8975 else 8976 x = 1; 8977 8978 total += x; 8979 nodes[node] += x; 8980 8981 #ifdef CONFIG_SLUB_CPU_PARTIAL 8982 slab = slub_percpu_partial_read_once(c); 8983 if (slab) { 8984 node = slab_nid(slab); 8985 if (flags & SO_TOTAL) 8986 WARN_ON_ONCE(1); 8987 else if (flags & SO_OBJECTS) 8988 WARN_ON_ONCE(1); 8989 else 8990 x = data_race(slab->slabs); 8991 total += x; 8992 nodes[node] += x; 8993 } 8994 #endif 8995 } 8996 } 8997 8998 /* 8999 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 9000 * already held which will conflict with an existing lock order: 9001 * 9002 * mem_hotplug_lock->slab_mutex->kernfs_mutex 9003 * 9004 * We don't really need mem_hotplug_lock (to hold off 9005 * slab_mem_going_offline_callback) here because slab's memory hot 9006 * unplug code doesn't destroy the kmem_cache->node[] data. 9007 */ 9008 9009 #ifdef CONFIG_SLUB_DEBUG 9010 if (flags & SO_ALL) { 9011 struct kmem_cache_node *n; 9012 9013 for_each_kmem_cache_node(s, node, n) { 9014 9015 if (flags & SO_TOTAL) 9016 x = node_nr_objs(n); 9017 else if (flags & SO_OBJECTS) 9018 x = node_nr_objs(n) - count_partial(n, count_free); 9019 else 9020 x = node_nr_slabs(n); 9021 total += x; 9022 nodes[node] += x; 9023 } 9024 9025 } else 9026 #endif 9027 if (flags & SO_PARTIAL) { 9028 struct kmem_cache_node *n; 9029 9030 for_each_kmem_cache_node(s, node, n) { 9031 if (flags & SO_TOTAL) 9032 x = count_partial(n, count_total); 9033 else if (flags & SO_OBJECTS) 9034 x = count_partial(n, count_inuse); 9035 else 9036 x = n->nr_partial; 9037 total += x; 9038 nodes[node] += x; 9039 } 9040 } 9041 9042 len += sysfs_emit_at(buf, len, "%lu", total); 9043 #ifdef CONFIG_NUMA 9044 for (node = 0; node < nr_node_ids; node++) { 9045 if (nodes[node]) 9046 len += sysfs_emit_at(buf, len, " N%d=%lu", 9047 node, nodes[node]); 9048 } 9049 #endif 9050 len += sysfs_emit_at(buf, len, "\n"); 9051 kfree(nodes); 9052 9053 return len; 9054 } 9055 9056 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 9057 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 9058 9059 struct slab_attribute { 9060 struct attribute attr; 9061 ssize_t (*show)(struct kmem_cache *s, char *buf); 9062 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 9063 }; 9064 9065 #define SLAB_ATTR_RO(_name) \ 9066 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) 9067 9068 #define SLAB_ATTR(_name) \ 9069 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) 9070 9071 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 9072 { 9073 return sysfs_emit(buf, "%u\n", s->size); 9074 } 9075 SLAB_ATTR_RO(slab_size); 9076 9077 static ssize_t align_show(struct kmem_cache *s, char *buf) 9078 { 9079 return sysfs_emit(buf, "%u\n", s->align); 9080 } 9081 SLAB_ATTR_RO(align); 9082 9083 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 9084 { 9085 return sysfs_emit(buf, "%u\n", s->object_size); 9086 } 9087 SLAB_ATTR_RO(object_size); 9088 9089 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 9090 { 9091 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 9092 } 9093 SLAB_ATTR_RO(objs_per_slab); 9094 9095 static ssize_t order_show(struct kmem_cache *s, char *buf) 9096 { 9097 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 9098 } 9099 SLAB_ATTR_RO(order); 9100 9101 static ssize_t sheaf_capacity_show(struct kmem_cache *s, char *buf) 9102 { 9103 return sysfs_emit(buf, "%u\n", s->sheaf_capacity); 9104 } 9105 SLAB_ATTR_RO(sheaf_capacity); 9106 9107 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 9108 { 9109 return sysfs_emit(buf, "%lu\n", s->min_partial); 9110 } 9111 9112 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 9113 size_t length) 9114 { 9115 unsigned long min; 9116 int err; 9117 9118 err = kstrtoul(buf, 10, &min); 9119 if (err) 9120 return err; 9121 9122 s->min_partial = min; 9123 return length; 9124 } 9125 SLAB_ATTR(min_partial); 9126 9127 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 9128 { 9129 unsigned int nr_partial = 0; 9130 #ifdef CONFIG_SLUB_CPU_PARTIAL 9131 nr_partial = s->cpu_partial; 9132 #endif 9133 9134 return sysfs_emit(buf, "%u\n", nr_partial); 9135 } 9136 9137 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 9138 size_t length) 9139 { 9140 unsigned int objects; 9141 int err; 9142 9143 err = kstrtouint(buf, 10, &objects); 9144 if (err) 9145 return err; 9146 if (objects && !kmem_cache_has_cpu_partial(s)) 9147 return -EINVAL; 9148 9149 slub_set_cpu_partial(s, objects); 9150 flush_all(s); 9151 return length; 9152 } 9153 SLAB_ATTR(cpu_partial); 9154 9155 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 9156 { 9157 if (!s->ctor) 9158 return 0; 9159 return sysfs_emit(buf, "%pS\n", s->ctor); 9160 } 9161 SLAB_ATTR_RO(ctor); 9162 9163 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 9164 { 9165 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 9166 } 9167 SLAB_ATTR_RO(aliases); 9168 9169 static ssize_t partial_show(struct kmem_cache *s, char *buf) 9170 { 9171 return show_slab_objects(s, buf, SO_PARTIAL); 9172 } 9173 SLAB_ATTR_RO(partial); 9174 9175 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 9176 { 9177 return show_slab_objects(s, buf, SO_CPU); 9178 } 9179 SLAB_ATTR_RO(cpu_slabs); 9180 9181 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 9182 { 9183 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 9184 } 9185 SLAB_ATTR_RO(objects_partial); 9186 9187 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 9188 { 9189 int objects = 0; 9190 int slabs = 0; 9191 int cpu __maybe_unused; 9192 int len = 0; 9193 9194 #ifdef CONFIG_SLUB_CPU_PARTIAL 9195 for_each_online_cpu(cpu) { 9196 struct slab *slab; 9197 9198 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 9199 9200 if (slab) 9201 slabs += data_race(slab->slabs); 9202 } 9203 #endif 9204 9205 /* Approximate half-full slabs, see slub_set_cpu_partial() */ 9206 objects = (slabs * oo_objects(s->oo)) / 2; 9207 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); 9208 9209 #ifdef CONFIG_SLUB_CPU_PARTIAL 9210 for_each_online_cpu(cpu) { 9211 struct slab *slab; 9212 9213 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 9214 if (slab) { 9215 slabs = data_race(slab->slabs); 9216 objects = (slabs * oo_objects(s->oo)) / 2; 9217 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 9218 cpu, objects, slabs); 9219 } 9220 } 9221 #endif 9222 len += sysfs_emit_at(buf, len, "\n"); 9223 9224 return len; 9225 } 9226 SLAB_ATTR_RO(slabs_cpu_partial); 9227 9228 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 9229 { 9230 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 9231 } 9232 SLAB_ATTR_RO(reclaim_account); 9233 9234 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 9235 { 9236 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 9237 } 9238 SLAB_ATTR_RO(hwcache_align); 9239 9240 #ifdef CONFIG_ZONE_DMA 9241 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 9242 { 9243 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 9244 } 9245 SLAB_ATTR_RO(cache_dma); 9246 #endif 9247 9248 #ifdef CONFIG_HARDENED_USERCOPY 9249 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 9250 { 9251 return sysfs_emit(buf, "%u\n", s->usersize); 9252 } 9253 SLAB_ATTR_RO(usersize); 9254 #endif 9255 9256 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 9257 { 9258 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 9259 } 9260 SLAB_ATTR_RO(destroy_by_rcu); 9261 9262 #ifdef CONFIG_SLUB_DEBUG 9263 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 9264 { 9265 return show_slab_objects(s, buf, SO_ALL); 9266 } 9267 SLAB_ATTR_RO(slabs); 9268 9269 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 9270 { 9271 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 9272 } 9273 SLAB_ATTR_RO(total_objects); 9274 9275 static ssize_t objects_show(struct kmem_cache *s, char *buf) 9276 { 9277 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 9278 } 9279 SLAB_ATTR_RO(objects); 9280 9281 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 9282 { 9283 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 9284 } 9285 SLAB_ATTR_RO(sanity_checks); 9286 9287 static ssize_t trace_show(struct kmem_cache *s, char *buf) 9288 { 9289 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 9290 } 9291 SLAB_ATTR_RO(trace); 9292 9293 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 9294 { 9295 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 9296 } 9297 9298 SLAB_ATTR_RO(red_zone); 9299 9300 static ssize_t poison_show(struct kmem_cache *s, char *buf) 9301 { 9302 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 9303 } 9304 9305 SLAB_ATTR_RO(poison); 9306 9307 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 9308 { 9309 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 9310 } 9311 9312 SLAB_ATTR_RO(store_user); 9313 9314 static ssize_t validate_show(struct kmem_cache *s, char *buf) 9315 { 9316 return 0; 9317 } 9318 9319 static ssize_t validate_store(struct kmem_cache *s, 9320 const char *buf, size_t length) 9321 { 9322 int ret = -EINVAL; 9323 9324 if (buf[0] == '1' && kmem_cache_debug(s)) { 9325 ret = validate_slab_cache(s); 9326 if (ret >= 0) 9327 ret = length; 9328 } 9329 return ret; 9330 } 9331 SLAB_ATTR(validate); 9332 9333 #endif /* CONFIG_SLUB_DEBUG */ 9334 9335 #ifdef CONFIG_FAILSLAB 9336 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 9337 { 9338 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 9339 } 9340 9341 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 9342 size_t length) 9343 { 9344 if (s->refcount > 1) 9345 return -EINVAL; 9346 9347 if (buf[0] == '1') 9348 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); 9349 else 9350 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); 9351 9352 return length; 9353 } 9354 SLAB_ATTR(failslab); 9355 #endif 9356 9357 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 9358 { 9359 return 0; 9360 } 9361 9362 static ssize_t shrink_store(struct kmem_cache *s, 9363 const char *buf, size_t length) 9364 { 9365 if (buf[0] == '1') 9366 kmem_cache_shrink(s); 9367 else 9368 return -EINVAL; 9369 return length; 9370 } 9371 SLAB_ATTR(shrink); 9372 9373 #ifdef CONFIG_NUMA 9374 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 9375 { 9376 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 9377 } 9378 9379 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 9380 const char *buf, size_t length) 9381 { 9382 unsigned int ratio; 9383 int err; 9384 9385 err = kstrtouint(buf, 10, &ratio); 9386 if (err) 9387 return err; 9388 if (ratio > 100) 9389 return -ERANGE; 9390 9391 s->remote_node_defrag_ratio = ratio * 10; 9392 9393 return length; 9394 } 9395 SLAB_ATTR(remote_node_defrag_ratio); 9396 #endif 9397 9398 #ifdef CONFIG_SLUB_STATS 9399 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 9400 { 9401 unsigned long sum = 0; 9402 int cpu; 9403 int len = 0; 9404 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 9405 9406 if (!data) 9407 return -ENOMEM; 9408 9409 for_each_online_cpu(cpu) { 9410 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 9411 9412 data[cpu] = x; 9413 sum += x; 9414 } 9415 9416 len += sysfs_emit_at(buf, len, "%lu", sum); 9417 9418 #ifdef CONFIG_SMP 9419 for_each_online_cpu(cpu) { 9420 if (data[cpu]) 9421 len += sysfs_emit_at(buf, len, " C%d=%u", 9422 cpu, data[cpu]); 9423 } 9424 #endif 9425 kfree(data); 9426 len += sysfs_emit_at(buf, len, "\n"); 9427 9428 return len; 9429 } 9430 9431 static void clear_stat(struct kmem_cache *s, enum stat_item si) 9432 { 9433 int cpu; 9434 9435 for_each_online_cpu(cpu) 9436 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 9437 } 9438 9439 #define STAT_ATTR(si, text) \ 9440 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 9441 { \ 9442 return show_stat(s, buf, si); \ 9443 } \ 9444 static ssize_t text##_store(struct kmem_cache *s, \ 9445 const char *buf, size_t length) \ 9446 { \ 9447 if (buf[0] != '0') \ 9448 return -EINVAL; \ 9449 clear_stat(s, si); \ 9450 return length; \ 9451 } \ 9452 SLAB_ATTR(text); \ 9453 9454 STAT_ATTR(ALLOC_PCS, alloc_cpu_sheaf); 9455 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 9456 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 9457 STAT_ATTR(FREE_PCS, free_cpu_sheaf); 9458 STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf); 9459 STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail); 9460 STAT_ATTR(FREE_FASTPATH, free_fastpath); 9461 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 9462 STAT_ATTR(FREE_FROZEN, free_frozen); 9463 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 9464 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 9465 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 9466 STAT_ATTR(ALLOC_SLAB, alloc_slab); 9467 STAT_ATTR(ALLOC_REFILL, alloc_refill); 9468 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 9469 STAT_ATTR(FREE_SLAB, free_slab); 9470 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 9471 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 9472 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 9473 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 9474 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 9475 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 9476 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 9477 STAT_ATTR(ORDER_FALLBACK, order_fallback); 9478 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 9479 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 9480 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 9481 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 9482 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 9483 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 9484 STAT_ATTR(SHEAF_FLUSH, sheaf_flush); 9485 STAT_ATTR(SHEAF_REFILL, sheaf_refill); 9486 STAT_ATTR(SHEAF_ALLOC, sheaf_alloc); 9487 STAT_ATTR(SHEAF_FREE, sheaf_free); 9488 STAT_ATTR(BARN_GET, barn_get); 9489 STAT_ATTR(BARN_GET_FAIL, barn_get_fail); 9490 STAT_ATTR(BARN_PUT, barn_put); 9491 STAT_ATTR(BARN_PUT_FAIL, barn_put_fail); 9492 STAT_ATTR(SHEAF_PREFILL_FAST, sheaf_prefill_fast); 9493 STAT_ATTR(SHEAF_PREFILL_SLOW, sheaf_prefill_slow); 9494 STAT_ATTR(SHEAF_PREFILL_OVERSIZE, sheaf_prefill_oversize); 9495 STAT_ATTR(SHEAF_RETURN_FAST, sheaf_return_fast); 9496 STAT_ATTR(SHEAF_RETURN_SLOW, sheaf_return_slow); 9497 #endif /* CONFIG_SLUB_STATS */ 9498 9499 #ifdef CONFIG_KFENCE 9500 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) 9501 { 9502 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); 9503 } 9504 9505 static ssize_t skip_kfence_store(struct kmem_cache *s, 9506 const char *buf, size_t length) 9507 { 9508 int ret = length; 9509 9510 if (buf[0] == '0') 9511 s->flags &= ~SLAB_SKIP_KFENCE; 9512 else if (buf[0] == '1') 9513 s->flags |= SLAB_SKIP_KFENCE; 9514 else 9515 ret = -EINVAL; 9516 9517 return ret; 9518 } 9519 SLAB_ATTR(skip_kfence); 9520 #endif 9521 9522 static struct attribute *slab_attrs[] = { 9523 &slab_size_attr.attr, 9524 &object_size_attr.attr, 9525 &objs_per_slab_attr.attr, 9526 &order_attr.attr, 9527 &sheaf_capacity_attr.attr, 9528 &min_partial_attr.attr, 9529 &cpu_partial_attr.attr, 9530 &objects_partial_attr.attr, 9531 &partial_attr.attr, 9532 &cpu_slabs_attr.attr, 9533 &ctor_attr.attr, 9534 &aliases_attr.attr, 9535 &align_attr.attr, 9536 &hwcache_align_attr.attr, 9537 &reclaim_account_attr.attr, 9538 &destroy_by_rcu_attr.attr, 9539 &shrink_attr.attr, 9540 &slabs_cpu_partial_attr.attr, 9541 #ifdef CONFIG_SLUB_DEBUG 9542 &total_objects_attr.attr, 9543 &objects_attr.attr, 9544 &slabs_attr.attr, 9545 &sanity_checks_attr.attr, 9546 &trace_attr.attr, 9547 &red_zone_attr.attr, 9548 &poison_attr.attr, 9549 &store_user_attr.attr, 9550 &validate_attr.attr, 9551 #endif 9552 #ifdef CONFIG_ZONE_DMA 9553 &cache_dma_attr.attr, 9554 #endif 9555 #ifdef CONFIG_NUMA 9556 &remote_node_defrag_ratio_attr.attr, 9557 #endif 9558 #ifdef CONFIG_SLUB_STATS 9559 &alloc_cpu_sheaf_attr.attr, 9560 &alloc_fastpath_attr.attr, 9561 &alloc_slowpath_attr.attr, 9562 &free_cpu_sheaf_attr.attr, 9563 &free_rcu_sheaf_attr.attr, 9564 &free_rcu_sheaf_fail_attr.attr, 9565 &free_fastpath_attr.attr, 9566 &free_slowpath_attr.attr, 9567 &free_frozen_attr.attr, 9568 &free_add_partial_attr.attr, 9569 &free_remove_partial_attr.attr, 9570 &alloc_from_partial_attr.attr, 9571 &alloc_slab_attr.attr, 9572 &alloc_refill_attr.attr, 9573 &alloc_node_mismatch_attr.attr, 9574 &free_slab_attr.attr, 9575 &cpuslab_flush_attr.attr, 9576 &deactivate_full_attr.attr, 9577 &deactivate_empty_attr.attr, 9578 &deactivate_to_head_attr.attr, 9579 &deactivate_to_tail_attr.attr, 9580 &deactivate_remote_frees_attr.attr, 9581 &deactivate_bypass_attr.attr, 9582 &order_fallback_attr.attr, 9583 &cmpxchg_double_fail_attr.attr, 9584 &cmpxchg_double_cpu_fail_attr.attr, 9585 &cpu_partial_alloc_attr.attr, 9586 &cpu_partial_free_attr.attr, 9587 &cpu_partial_node_attr.attr, 9588 &cpu_partial_drain_attr.attr, 9589 &sheaf_flush_attr.attr, 9590 &sheaf_refill_attr.attr, 9591 &sheaf_alloc_attr.attr, 9592 &sheaf_free_attr.attr, 9593 &barn_get_attr.attr, 9594 &barn_get_fail_attr.attr, 9595 &barn_put_attr.attr, 9596 &barn_put_fail_attr.attr, 9597 &sheaf_prefill_fast_attr.attr, 9598 &sheaf_prefill_slow_attr.attr, 9599 &sheaf_prefill_oversize_attr.attr, 9600 &sheaf_return_fast_attr.attr, 9601 &sheaf_return_slow_attr.attr, 9602 #endif 9603 #ifdef CONFIG_FAILSLAB 9604 &failslab_attr.attr, 9605 #endif 9606 #ifdef CONFIG_HARDENED_USERCOPY 9607 &usersize_attr.attr, 9608 #endif 9609 #ifdef CONFIG_KFENCE 9610 &skip_kfence_attr.attr, 9611 #endif 9612 9613 NULL 9614 }; 9615 9616 static const struct attribute_group slab_attr_group = { 9617 .attrs = slab_attrs, 9618 }; 9619 9620 static ssize_t slab_attr_show(struct kobject *kobj, 9621 struct attribute *attr, 9622 char *buf) 9623 { 9624 struct slab_attribute *attribute; 9625 struct kmem_cache *s; 9626 9627 attribute = to_slab_attr(attr); 9628 s = to_slab(kobj); 9629 9630 if (!attribute->show) 9631 return -EIO; 9632 9633 return attribute->show(s, buf); 9634 } 9635 9636 static ssize_t slab_attr_store(struct kobject *kobj, 9637 struct attribute *attr, 9638 const char *buf, size_t len) 9639 { 9640 struct slab_attribute *attribute; 9641 struct kmem_cache *s; 9642 9643 attribute = to_slab_attr(attr); 9644 s = to_slab(kobj); 9645 9646 if (!attribute->store) 9647 return -EIO; 9648 9649 return attribute->store(s, buf, len); 9650 } 9651 9652 static void kmem_cache_release(struct kobject *k) 9653 { 9654 slab_kmem_cache_release(to_slab(k)); 9655 } 9656 9657 static const struct sysfs_ops slab_sysfs_ops = { 9658 .show = slab_attr_show, 9659 .store = slab_attr_store, 9660 }; 9661 9662 static const struct kobj_type slab_ktype = { 9663 .sysfs_ops = &slab_sysfs_ops, 9664 .release = kmem_cache_release, 9665 }; 9666 9667 static struct kset *slab_kset; 9668 9669 static inline struct kset *cache_kset(struct kmem_cache *s) 9670 { 9671 return slab_kset; 9672 } 9673 9674 #define ID_STR_LENGTH 32 9675 9676 /* Create a unique string id for a slab cache: 9677 * 9678 * Format :[flags-]size 9679 */ 9680 static char *create_unique_id(struct kmem_cache *s) 9681 { 9682 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 9683 char *p = name; 9684 9685 if (!name) 9686 return ERR_PTR(-ENOMEM); 9687 9688 *p++ = ':'; 9689 /* 9690 * First flags affecting slabcache operations. We will only 9691 * get here for aliasable slabs so we do not need to support 9692 * too many flags. The flags here must cover all flags that 9693 * are matched during merging to guarantee that the id is 9694 * unique. 9695 */ 9696 if (s->flags & SLAB_CACHE_DMA) 9697 *p++ = 'd'; 9698 if (s->flags & SLAB_CACHE_DMA32) 9699 *p++ = 'D'; 9700 if (s->flags & SLAB_RECLAIM_ACCOUNT) 9701 *p++ = 'a'; 9702 if (s->flags & SLAB_CONSISTENCY_CHECKS) 9703 *p++ = 'F'; 9704 if (s->flags & SLAB_ACCOUNT) 9705 *p++ = 'A'; 9706 if (p != name + 1) 9707 *p++ = '-'; 9708 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); 9709 9710 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { 9711 kfree(name); 9712 return ERR_PTR(-EINVAL); 9713 } 9714 kmsan_unpoison_memory(name, p - name); 9715 return name; 9716 } 9717 9718 static int sysfs_slab_add(struct kmem_cache *s) 9719 { 9720 int err; 9721 const char *name; 9722 struct kset *kset = cache_kset(s); 9723 int unmergeable = slab_unmergeable(s); 9724 9725 if (!unmergeable && disable_higher_order_debug && 9726 (slub_debug & DEBUG_METADATA_FLAGS)) 9727 unmergeable = 1; 9728 9729 if (unmergeable) { 9730 /* 9731 * Slabcache can never be merged so we can use the name proper. 9732 * This is typically the case for debug situations. In that 9733 * case we can catch duplicate names easily. 9734 */ 9735 sysfs_remove_link(&slab_kset->kobj, s->name); 9736 name = s->name; 9737 } else { 9738 /* 9739 * Create a unique name for the slab as a target 9740 * for the symlinks. 9741 */ 9742 name = create_unique_id(s); 9743 if (IS_ERR(name)) 9744 return PTR_ERR(name); 9745 } 9746 9747 s->kobj.kset = kset; 9748 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 9749 if (err) 9750 goto out; 9751 9752 err = sysfs_create_group(&s->kobj, &slab_attr_group); 9753 if (err) 9754 goto out_del_kobj; 9755 9756 if (!unmergeable) { 9757 /* Setup first alias */ 9758 sysfs_slab_alias(s, s->name); 9759 } 9760 out: 9761 if (!unmergeable) 9762 kfree(name); 9763 return err; 9764 out_del_kobj: 9765 kobject_del(&s->kobj); 9766 goto out; 9767 } 9768 9769 void sysfs_slab_unlink(struct kmem_cache *s) 9770 { 9771 if (s->kobj.state_in_sysfs) 9772 kobject_del(&s->kobj); 9773 } 9774 9775 void sysfs_slab_release(struct kmem_cache *s) 9776 { 9777 kobject_put(&s->kobj); 9778 } 9779 9780 /* 9781 * Need to buffer aliases during bootup until sysfs becomes 9782 * available lest we lose that information. 9783 */ 9784 struct saved_alias { 9785 struct kmem_cache *s; 9786 const char *name; 9787 struct saved_alias *next; 9788 }; 9789 9790 static struct saved_alias *alias_list; 9791 9792 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 9793 { 9794 struct saved_alias *al; 9795 9796 if (slab_state == FULL) { 9797 /* 9798 * If we have a leftover link then remove it. 9799 */ 9800 sysfs_remove_link(&slab_kset->kobj, name); 9801 /* 9802 * The original cache may have failed to generate sysfs file. 9803 * In that case, sysfs_create_link() returns -ENOENT and 9804 * symbolic link creation is skipped. 9805 */ 9806 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 9807 } 9808 9809 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 9810 if (!al) 9811 return -ENOMEM; 9812 9813 al->s = s; 9814 al->name = name; 9815 al->next = alias_list; 9816 alias_list = al; 9817 kmsan_unpoison_memory(al, sizeof(*al)); 9818 return 0; 9819 } 9820 9821 static int __init slab_sysfs_init(void) 9822 { 9823 struct kmem_cache *s; 9824 int err; 9825 9826 mutex_lock(&slab_mutex); 9827 9828 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 9829 if (!slab_kset) { 9830 mutex_unlock(&slab_mutex); 9831 pr_err("Cannot register slab subsystem.\n"); 9832 return -ENOMEM; 9833 } 9834 9835 slab_state = FULL; 9836 9837 list_for_each_entry(s, &slab_caches, list) { 9838 err = sysfs_slab_add(s); 9839 if (err) 9840 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 9841 s->name); 9842 } 9843 9844 while (alias_list) { 9845 struct saved_alias *al = alias_list; 9846 9847 alias_list = alias_list->next; 9848 err = sysfs_slab_alias(al->s, al->name); 9849 if (err) 9850 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 9851 al->name); 9852 kfree(al); 9853 } 9854 9855 mutex_unlock(&slab_mutex); 9856 return 0; 9857 } 9858 late_initcall(slab_sysfs_init); 9859 #endif /* SLAB_SUPPORTS_SYSFS */ 9860 9861 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 9862 static int slab_debugfs_show(struct seq_file *seq, void *v) 9863 { 9864 struct loc_track *t = seq->private; 9865 struct location *l; 9866 unsigned long idx; 9867 9868 idx = (unsigned long) t->idx; 9869 if (idx < t->count) { 9870 l = &t->loc[idx]; 9871 9872 seq_printf(seq, "%7ld ", l->count); 9873 9874 if (l->addr) 9875 seq_printf(seq, "%pS", (void *)l->addr); 9876 else 9877 seq_puts(seq, "<not-available>"); 9878 9879 if (l->waste) 9880 seq_printf(seq, " waste=%lu/%lu", 9881 l->count * l->waste, l->waste); 9882 9883 if (l->sum_time != l->min_time) { 9884 seq_printf(seq, " age=%ld/%llu/%ld", 9885 l->min_time, div_u64(l->sum_time, l->count), 9886 l->max_time); 9887 } else 9888 seq_printf(seq, " age=%ld", l->min_time); 9889 9890 if (l->min_pid != l->max_pid) 9891 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 9892 else 9893 seq_printf(seq, " pid=%ld", 9894 l->min_pid); 9895 9896 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 9897 seq_printf(seq, " cpus=%*pbl", 9898 cpumask_pr_args(to_cpumask(l->cpus))); 9899 9900 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 9901 seq_printf(seq, " nodes=%*pbl", 9902 nodemask_pr_args(&l->nodes)); 9903 9904 #ifdef CONFIG_STACKDEPOT 9905 { 9906 depot_stack_handle_t handle; 9907 unsigned long *entries; 9908 unsigned int nr_entries, j; 9909 9910 handle = READ_ONCE(l->handle); 9911 if (handle) { 9912 nr_entries = stack_depot_fetch(handle, &entries); 9913 seq_puts(seq, "\n"); 9914 for (j = 0; j < nr_entries; j++) 9915 seq_printf(seq, " %pS\n", (void *)entries[j]); 9916 } 9917 } 9918 #endif 9919 seq_puts(seq, "\n"); 9920 } 9921 9922 if (!idx && !t->count) 9923 seq_puts(seq, "No data\n"); 9924 9925 return 0; 9926 } 9927 9928 static void slab_debugfs_stop(struct seq_file *seq, void *v) 9929 { 9930 } 9931 9932 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 9933 { 9934 struct loc_track *t = seq->private; 9935 9936 t->idx = ++(*ppos); 9937 if (*ppos <= t->count) 9938 return ppos; 9939 9940 return NULL; 9941 } 9942 9943 static int cmp_loc_by_count(const void *a, const void *b) 9944 { 9945 struct location *loc1 = (struct location *)a; 9946 struct location *loc2 = (struct location *)b; 9947 9948 return cmp_int(loc2->count, loc1->count); 9949 } 9950 9951 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 9952 { 9953 struct loc_track *t = seq->private; 9954 9955 t->idx = *ppos; 9956 return ppos; 9957 } 9958 9959 static const struct seq_operations slab_debugfs_sops = { 9960 .start = slab_debugfs_start, 9961 .next = slab_debugfs_next, 9962 .stop = slab_debugfs_stop, 9963 .show = slab_debugfs_show, 9964 }; 9965 9966 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 9967 { 9968 9969 struct kmem_cache_node *n; 9970 enum track_item alloc; 9971 int node; 9972 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 9973 sizeof(struct loc_track)); 9974 struct kmem_cache *s = file_inode(filep)->i_private; 9975 unsigned long *obj_map; 9976 9977 if (!t) 9978 return -ENOMEM; 9979 9980 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 9981 if (!obj_map) { 9982 seq_release_private(inode, filep); 9983 return -ENOMEM; 9984 } 9985 9986 alloc = debugfs_get_aux_num(filep); 9987 9988 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 9989 bitmap_free(obj_map); 9990 seq_release_private(inode, filep); 9991 return -ENOMEM; 9992 } 9993 9994 for_each_kmem_cache_node(s, node, n) { 9995 unsigned long flags; 9996 struct slab *slab; 9997 9998 if (!node_nr_slabs(n)) 9999 continue; 10000 10001 spin_lock_irqsave(&n->list_lock, flags); 10002 list_for_each_entry(slab, &n->partial, slab_list) 10003 process_slab(t, s, slab, alloc, obj_map); 10004 list_for_each_entry(slab, &n->full, slab_list) 10005 process_slab(t, s, slab, alloc, obj_map); 10006 spin_unlock_irqrestore(&n->list_lock, flags); 10007 } 10008 10009 /* Sort locations by count */ 10010 sort(t->loc, t->count, sizeof(struct location), 10011 cmp_loc_by_count, NULL); 10012 10013 bitmap_free(obj_map); 10014 return 0; 10015 } 10016 10017 static int slab_debug_trace_release(struct inode *inode, struct file *file) 10018 { 10019 struct seq_file *seq = file->private_data; 10020 struct loc_track *t = seq->private; 10021 10022 free_loc_track(t); 10023 return seq_release_private(inode, file); 10024 } 10025 10026 static const struct file_operations slab_debugfs_fops = { 10027 .open = slab_debug_trace_open, 10028 .read = seq_read, 10029 .llseek = seq_lseek, 10030 .release = slab_debug_trace_release, 10031 }; 10032 10033 static void debugfs_slab_add(struct kmem_cache *s) 10034 { 10035 struct dentry *slab_cache_dir; 10036 10037 if (unlikely(!slab_debugfs_root)) 10038 return; 10039 10040 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 10041 10042 debugfs_create_file_aux_num("alloc_traces", 0400, slab_cache_dir, s, 10043 TRACK_ALLOC, &slab_debugfs_fops); 10044 10045 debugfs_create_file_aux_num("free_traces", 0400, slab_cache_dir, s, 10046 TRACK_FREE, &slab_debugfs_fops); 10047 } 10048 10049 void debugfs_slab_release(struct kmem_cache *s) 10050 { 10051 debugfs_lookup_and_remove(s->name, slab_debugfs_root); 10052 } 10053 10054 static int __init slab_debugfs_init(void) 10055 { 10056 struct kmem_cache *s; 10057 10058 slab_debugfs_root = debugfs_create_dir("slab", NULL); 10059 10060 list_for_each_entry(s, &slab_caches, list) 10061 if (s->flags & SLAB_STORE_USER) 10062 debugfs_slab_add(s); 10063 10064 return 0; 10065 10066 } 10067 __initcall(slab_debugfs_init); 10068 #endif 10069 /* 10070 * The /proc/slabinfo ABI 10071 */ 10072 #ifdef CONFIG_SLUB_DEBUG 10073 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 10074 { 10075 unsigned long nr_slabs = 0; 10076 unsigned long nr_objs = 0; 10077 unsigned long nr_free = 0; 10078 int node; 10079 struct kmem_cache_node *n; 10080 10081 for_each_kmem_cache_node(s, node, n) { 10082 nr_slabs += node_nr_slabs(n); 10083 nr_objs += node_nr_objs(n); 10084 nr_free += count_partial_free_approx(n); 10085 } 10086 10087 sinfo->active_objs = nr_objs - nr_free; 10088 sinfo->num_objs = nr_objs; 10089 sinfo->active_slabs = nr_slabs; 10090 sinfo->num_slabs = nr_slabs; 10091 sinfo->objects_per_slab = oo_objects(s->oo); 10092 sinfo->cache_order = oo_order(s->oo); 10093 } 10094 #endif /* CONFIG_SLUB_DEBUG */ 10095