1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/kmsan.h> 26 #include <linux/cpu.h> 27 #include <linux/cpuset.h> 28 #include <linux/mempolicy.h> 29 #include <linux/ctype.h> 30 #include <linux/stackdepot.h> 31 #include <linux/debugobjects.h> 32 #include <linux/kallsyms.h> 33 #include <linux/kfence.h> 34 #include <linux/memory.h> 35 #include <linux/math64.h> 36 #include <linux/fault-inject.h> 37 #include <linux/kmemleak.h> 38 #include <linux/stacktrace.h> 39 #include <linux/prefetch.h> 40 #include <linux/memcontrol.h> 41 #include <linux/random.h> 42 #include <kunit/test.h> 43 #include <kunit/test-bug.h> 44 #include <linux/sort.h> 45 46 #include <linux/debugfs.h> 47 #include <trace/events/kmem.h> 48 49 #include "internal.h" 50 51 /* 52 * Lock order: 53 * 1. slab_mutex (Global Mutex) 54 * 2. node->list_lock (Spinlock) 55 * 3. kmem_cache->cpu_slab->lock (Local lock) 56 * 4. slab_lock(slab) (Only on some arches) 57 * 5. object_map_lock (Only for debugging) 58 * 59 * slab_mutex 60 * 61 * The role of the slab_mutex is to protect the list of all the slabs 62 * and to synchronize major metadata changes to slab cache structures. 63 * Also synchronizes memory hotplug callbacks. 64 * 65 * slab_lock 66 * 67 * The slab_lock is a wrapper around the page lock, thus it is a bit 68 * spinlock. 69 * 70 * The slab_lock is only used on arches that do not have the ability 71 * to do a cmpxchg_double. It only protects: 72 * 73 * A. slab->freelist -> List of free objects in a slab 74 * B. slab->inuse -> Number of objects in use 75 * C. slab->objects -> Number of objects in slab 76 * D. slab->frozen -> frozen state 77 * 78 * Frozen slabs 79 * 80 * If a slab is frozen then it is exempt from list management. It is 81 * the cpu slab which is actively allocated from by the processor that 82 * froze it and it is not on any list. The processor that froze the 83 * slab is the one who can perform list operations on the slab. Other 84 * processors may put objects onto the freelist but the processor that 85 * froze the slab is the only one that can retrieve the objects from the 86 * slab's freelist. 87 * 88 * CPU partial slabs 89 * 90 * The partially empty slabs cached on the CPU partial list are used 91 * for performance reasons, which speeds up the allocation process. 92 * These slabs are not frozen, but are also exempt from list management, 93 * by clearing the PG_workingset flag when moving out of the node 94 * partial list. Please see __slab_free() for more details. 95 * 96 * To sum up, the current scheme is: 97 * - node partial slab: PG_Workingset && !frozen 98 * - cpu partial slab: !PG_Workingset && !frozen 99 * - cpu slab: !PG_Workingset && frozen 100 * - full slab: !PG_Workingset && !frozen 101 * 102 * list_lock 103 * 104 * The list_lock protects the partial and full list on each node and 105 * the partial slab counter. If taken then no new slabs may be added or 106 * removed from the lists nor make the number of partial slabs be modified. 107 * (Note that the total number of slabs is an atomic value that may be 108 * modified without taking the list lock). 109 * 110 * The list_lock is a centralized lock and thus we avoid taking it as 111 * much as possible. As long as SLUB does not have to handle partial 112 * slabs, operations can continue without any centralized lock. F.e. 113 * allocating a long series of objects that fill up slabs does not require 114 * the list lock. 115 * 116 * For debug caches, all allocations are forced to go through a list_lock 117 * protected region to serialize against concurrent validation. 118 * 119 * cpu_slab->lock local lock 120 * 121 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 122 * except the stat counters. This is a percpu structure manipulated only by 123 * the local cpu, so the lock protects against being preempted or interrupted 124 * by an irq. Fast path operations rely on lockless operations instead. 125 * 126 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption 127 * which means the lockless fastpath cannot be used as it might interfere with 128 * an in-progress slow path operations. In this case the local lock is always 129 * taken but it still utilizes the freelist for the common operations. 130 * 131 * lockless fastpaths 132 * 133 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 134 * are fully lockless when satisfied from the percpu slab (and when 135 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 136 * They also don't disable preemption or migration or irqs. They rely on 137 * the transaction id (tid) field to detect being preempted or moved to 138 * another cpu. 139 * 140 * irq, preemption, migration considerations 141 * 142 * Interrupts are disabled as part of list_lock or local_lock operations, or 143 * around the slab_lock operation, in order to make the slab allocator safe 144 * to use in the context of an irq. 145 * 146 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 147 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 148 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 149 * doesn't have to be revalidated in each section protected by the local lock. 150 * 151 * SLUB assigns one slab for allocation to each processor. 152 * Allocations only occur from these slabs called cpu slabs. 153 * 154 * Slabs with free elements are kept on a partial list and during regular 155 * operations no list for full slabs is used. If an object in a full slab is 156 * freed then the slab will show up again on the partial lists. 157 * We track full slabs for debugging purposes though because otherwise we 158 * cannot scan all objects. 159 * 160 * Slabs are freed when they become empty. Teardown and setup is 161 * minimal so we rely on the page allocators per cpu caches for 162 * fast frees and allocs. 163 * 164 * slab->frozen The slab is frozen and exempt from list processing. 165 * This means that the slab is dedicated to a purpose 166 * such as satisfying allocations for a specific 167 * processor. Objects may be freed in the slab while 168 * it is frozen but slab_free will then skip the usual 169 * list operations. It is up to the processor holding 170 * the slab to integrate the slab into the slab lists 171 * when the slab is no longer needed. 172 * 173 * One use of this flag is to mark slabs that are 174 * used for allocations. Then such a slab becomes a cpu 175 * slab. The cpu slab may be equipped with an additional 176 * freelist that allows lockless access to 177 * free objects in addition to the regular freelist 178 * that requires the slab lock. 179 * 180 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 181 * options set. This moves slab handling out of 182 * the fast path and disables lockless freelists. 183 */ 184 185 /* 186 * We could simply use migrate_disable()/enable() but as long as it's a 187 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 188 */ 189 #ifndef CONFIG_PREEMPT_RT 190 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 191 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 192 #define USE_LOCKLESS_FAST_PATH() (true) 193 #else 194 #define slub_get_cpu_ptr(var) \ 195 ({ \ 196 migrate_disable(); \ 197 this_cpu_ptr(var); \ 198 }) 199 #define slub_put_cpu_ptr(var) \ 200 do { \ 201 (void)(var); \ 202 migrate_enable(); \ 203 } while (0) 204 #define USE_LOCKLESS_FAST_PATH() (false) 205 #endif 206 207 #ifndef CONFIG_SLUB_TINY 208 #define __fastpath_inline __always_inline 209 #else 210 #define __fastpath_inline 211 #endif 212 213 #ifdef CONFIG_SLUB_DEBUG 214 #ifdef CONFIG_SLUB_DEBUG_ON 215 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 216 #else 217 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 218 #endif 219 #endif /* CONFIG_SLUB_DEBUG */ 220 221 /* Structure holding parameters for get_partial() call chain */ 222 struct partial_context { 223 gfp_t flags; 224 unsigned int orig_size; 225 void *object; 226 }; 227 228 static inline bool kmem_cache_debug(struct kmem_cache *s) 229 { 230 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 231 } 232 233 static inline bool slub_debug_orig_size(struct kmem_cache *s) 234 { 235 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) && 236 (s->flags & SLAB_KMALLOC)); 237 } 238 239 void *fixup_red_left(struct kmem_cache *s, void *p) 240 { 241 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 242 p += s->red_left_pad; 243 244 return p; 245 } 246 247 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 248 { 249 #ifdef CONFIG_SLUB_CPU_PARTIAL 250 return !kmem_cache_debug(s); 251 #else 252 return false; 253 #endif 254 } 255 256 /* 257 * Issues still to be resolved: 258 * 259 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 260 * 261 * - Variable sizing of the per node arrays 262 */ 263 264 /* Enable to log cmpxchg failures */ 265 #undef SLUB_DEBUG_CMPXCHG 266 267 #ifndef CONFIG_SLUB_TINY 268 /* 269 * Minimum number of partial slabs. These will be left on the partial 270 * lists even if they are empty. kmem_cache_shrink may reclaim them. 271 */ 272 #define MIN_PARTIAL 5 273 274 /* 275 * Maximum number of desirable partial slabs. 276 * The existence of more partial slabs makes kmem_cache_shrink 277 * sort the partial list by the number of objects in use. 278 */ 279 #define MAX_PARTIAL 10 280 #else 281 #define MIN_PARTIAL 0 282 #define MAX_PARTIAL 0 283 #endif 284 285 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 286 SLAB_POISON | SLAB_STORE_USER) 287 288 /* 289 * These debug flags cannot use CMPXCHG because there might be consistency 290 * issues when checking or reading debug information 291 */ 292 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 293 SLAB_TRACE) 294 295 296 /* 297 * Debugging flags that require metadata to be stored in the slab. These get 298 * disabled when slab_debug=O is used and a cache's min order increases with 299 * metadata. 300 */ 301 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 302 303 #define OO_SHIFT 16 304 #define OO_MASK ((1 << OO_SHIFT) - 1) 305 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ 306 307 /* Internal SLUB flags */ 308 /* Poison object */ 309 #define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON) 310 /* Use cmpxchg_double */ 311 312 #ifdef system_has_freelist_aba 313 #define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE) 314 #else 315 #define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED 316 #endif 317 318 /* 319 * Tracking user of a slab. 320 */ 321 #define TRACK_ADDRS_COUNT 16 322 struct track { 323 unsigned long addr; /* Called from address */ 324 #ifdef CONFIG_STACKDEPOT 325 depot_stack_handle_t handle; 326 #endif 327 int cpu; /* Was running on cpu */ 328 int pid; /* Pid context */ 329 unsigned long when; /* When did the operation occur */ 330 }; 331 332 enum track_item { TRACK_ALLOC, TRACK_FREE }; 333 334 #ifdef SLAB_SUPPORTS_SYSFS 335 static int sysfs_slab_add(struct kmem_cache *); 336 static int sysfs_slab_alias(struct kmem_cache *, const char *); 337 #else 338 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 339 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 340 { return 0; } 341 #endif 342 343 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 344 static void debugfs_slab_add(struct kmem_cache *); 345 #else 346 static inline void debugfs_slab_add(struct kmem_cache *s) { } 347 #endif 348 349 enum stat_item { 350 ALLOC_FASTPATH, /* Allocation from cpu slab */ 351 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 352 FREE_FASTPATH, /* Free to cpu slab */ 353 FREE_SLOWPATH, /* Freeing not to cpu slab */ 354 FREE_FROZEN, /* Freeing to frozen slab */ 355 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 356 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 357 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ 358 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 359 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 360 ALLOC_NODE_MISMATCH, /* Switching cpu slab */ 361 FREE_SLAB, /* Slab freed to the page allocator */ 362 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 363 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 364 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 365 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 366 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 367 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 368 DEACTIVATE_BYPASS, /* Implicit deactivation */ 369 ORDER_FALLBACK, /* Number of times fallback was necessary */ 370 CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */ 371 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */ 372 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ 373 CPU_PARTIAL_FREE, /* Refill cpu partial on free */ 374 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ 375 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ 376 NR_SLUB_STAT_ITEMS 377 }; 378 379 #ifndef CONFIG_SLUB_TINY 380 /* 381 * When changing the layout, make sure freelist and tid are still compatible 382 * with this_cpu_cmpxchg_double() alignment requirements. 383 */ 384 struct kmem_cache_cpu { 385 union { 386 struct { 387 void **freelist; /* Pointer to next available object */ 388 unsigned long tid; /* Globally unique transaction id */ 389 }; 390 freelist_aba_t freelist_tid; 391 }; 392 struct slab *slab; /* The slab from which we are allocating */ 393 #ifdef CONFIG_SLUB_CPU_PARTIAL 394 struct slab *partial; /* Partially allocated slabs */ 395 #endif 396 local_lock_t lock; /* Protects the fields above */ 397 #ifdef CONFIG_SLUB_STATS 398 unsigned int stat[NR_SLUB_STAT_ITEMS]; 399 #endif 400 }; 401 #endif /* CONFIG_SLUB_TINY */ 402 403 static inline void stat(const struct kmem_cache *s, enum stat_item si) 404 { 405 #ifdef CONFIG_SLUB_STATS 406 /* 407 * The rmw is racy on a preemptible kernel but this is acceptable, so 408 * avoid this_cpu_add()'s irq-disable overhead. 409 */ 410 raw_cpu_inc(s->cpu_slab->stat[si]); 411 #endif 412 } 413 414 static inline 415 void stat_add(const struct kmem_cache *s, enum stat_item si, int v) 416 { 417 #ifdef CONFIG_SLUB_STATS 418 raw_cpu_add(s->cpu_slab->stat[si], v); 419 #endif 420 } 421 422 /* 423 * The slab lists for all objects. 424 */ 425 struct kmem_cache_node { 426 spinlock_t list_lock; 427 unsigned long nr_partial; 428 struct list_head partial; 429 #ifdef CONFIG_SLUB_DEBUG 430 atomic_long_t nr_slabs; 431 atomic_long_t total_objects; 432 struct list_head full; 433 #endif 434 }; 435 436 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 437 { 438 return s->node[node]; 439 } 440 441 /* 442 * Iterator over all nodes. The body will be executed for each node that has 443 * a kmem_cache_node structure allocated (which is true for all online nodes) 444 */ 445 #define for_each_kmem_cache_node(__s, __node, __n) \ 446 for (__node = 0; __node < nr_node_ids; __node++) \ 447 if ((__n = get_node(__s, __node))) 448 449 /* 450 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 451 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 452 * differ during memory hotplug/hotremove operations. 453 * Protected by slab_mutex. 454 */ 455 static nodemask_t slab_nodes; 456 457 #ifndef CONFIG_SLUB_TINY 458 /* 459 * Workqueue used for flush_cpu_slab(). 460 */ 461 static struct workqueue_struct *flushwq; 462 #endif 463 464 /******************************************************************** 465 * Core slab cache functions 466 *******************************************************************/ 467 468 /* 469 * freeptr_t represents a SLUB freelist pointer, which might be encoded 470 * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. 471 */ 472 typedef struct { unsigned long v; } freeptr_t; 473 474 /* 475 * Returns freelist pointer (ptr). With hardening, this is obfuscated 476 * with an XOR of the address where the pointer is held and a per-cache 477 * random number. 478 */ 479 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s, 480 void *ptr, unsigned long ptr_addr) 481 { 482 unsigned long encoded; 483 484 #ifdef CONFIG_SLAB_FREELIST_HARDENED 485 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); 486 #else 487 encoded = (unsigned long)ptr; 488 #endif 489 return (freeptr_t){.v = encoded}; 490 } 491 492 static inline void *freelist_ptr_decode(const struct kmem_cache *s, 493 freeptr_t ptr, unsigned long ptr_addr) 494 { 495 void *decoded; 496 497 #ifdef CONFIG_SLAB_FREELIST_HARDENED 498 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); 499 #else 500 decoded = (void *)ptr.v; 501 #endif 502 return decoded; 503 } 504 505 static inline void *get_freepointer(struct kmem_cache *s, void *object) 506 { 507 unsigned long ptr_addr; 508 freeptr_t p; 509 510 object = kasan_reset_tag(object); 511 ptr_addr = (unsigned long)object + s->offset; 512 p = *(freeptr_t *)(ptr_addr); 513 return freelist_ptr_decode(s, p, ptr_addr); 514 } 515 516 #ifndef CONFIG_SLUB_TINY 517 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 518 { 519 prefetchw(object + s->offset); 520 } 521 #endif 522 523 /* 524 * When running under KMSAN, get_freepointer_safe() may return an uninitialized 525 * pointer value in the case the current thread loses the race for the next 526 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in 527 * slab_alloc_node() will fail, so the uninitialized value won't be used, but 528 * KMSAN will still check all arguments of cmpxchg because of imperfect 529 * handling of inline assembly. 530 * To work around this problem, we apply __no_kmsan_checks to ensure that 531 * get_freepointer_safe() returns initialized memory. 532 */ 533 __no_kmsan_checks 534 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 535 { 536 unsigned long freepointer_addr; 537 freeptr_t p; 538 539 if (!debug_pagealloc_enabled_static()) 540 return get_freepointer(s, object); 541 542 object = kasan_reset_tag(object); 543 freepointer_addr = (unsigned long)object + s->offset; 544 copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p)); 545 return freelist_ptr_decode(s, p, freepointer_addr); 546 } 547 548 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 549 { 550 unsigned long freeptr_addr = (unsigned long)object + s->offset; 551 552 #ifdef CONFIG_SLAB_FREELIST_HARDENED 553 BUG_ON(object == fp); /* naive detection of double free or corruption */ 554 #endif 555 556 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 557 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); 558 } 559 560 /* 561 * See comment in calculate_sizes(). 562 */ 563 static inline bool freeptr_outside_object(struct kmem_cache *s) 564 { 565 return s->offset >= s->inuse; 566 } 567 568 /* 569 * Return offset of the end of info block which is inuse + free pointer if 570 * not overlapping with object. 571 */ 572 static inline unsigned int get_info_end(struct kmem_cache *s) 573 { 574 if (freeptr_outside_object(s)) 575 return s->inuse + sizeof(void *); 576 else 577 return s->inuse; 578 } 579 580 /* Loop over all objects in a slab */ 581 #define for_each_object(__p, __s, __addr, __objects) \ 582 for (__p = fixup_red_left(__s, __addr); \ 583 __p < (__addr) + (__objects) * (__s)->size; \ 584 __p += (__s)->size) 585 586 static inline unsigned int order_objects(unsigned int order, unsigned int size) 587 { 588 return ((unsigned int)PAGE_SIZE << order) / size; 589 } 590 591 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 592 unsigned int size) 593 { 594 struct kmem_cache_order_objects x = { 595 (order << OO_SHIFT) + order_objects(order, size) 596 }; 597 598 return x; 599 } 600 601 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 602 { 603 return x.x >> OO_SHIFT; 604 } 605 606 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 607 { 608 return x.x & OO_MASK; 609 } 610 611 #ifdef CONFIG_SLUB_CPU_PARTIAL 612 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 613 { 614 unsigned int nr_slabs; 615 616 s->cpu_partial = nr_objects; 617 618 /* 619 * We take the number of objects but actually limit the number of 620 * slabs on the per cpu partial list, in order to limit excessive 621 * growth of the list. For simplicity we assume that the slabs will 622 * be half-full. 623 */ 624 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); 625 s->cpu_partial_slabs = nr_slabs; 626 } 627 628 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 629 { 630 return s->cpu_partial_slabs; 631 } 632 #else 633 static inline void 634 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 635 { 636 } 637 638 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 639 { 640 return 0; 641 } 642 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 643 644 /* 645 * Per slab locking using the pagelock 646 */ 647 static __always_inline void slab_lock(struct slab *slab) 648 { 649 bit_spin_lock(PG_locked, &slab->__page_flags); 650 } 651 652 static __always_inline void slab_unlock(struct slab *slab) 653 { 654 bit_spin_unlock(PG_locked, &slab->__page_flags); 655 } 656 657 static inline bool 658 __update_freelist_fast(struct slab *slab, 659 void *freelist_old, unsigned long counters_old, 660 void *freelist_new, unsigned long counters_new) 661 { 662 #ifdef system_has_freelist_aba 663 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; 664 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; 665 666 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); 667 #else 668 return false; 669 #endif 670 } 671 672 static inline bool 673 __update_freelist_slow(struct slab *slab, 674 void *freelist_old, unsigned long counters_old, 675 void *freelist_new, unsigned long counters_new) 676 { 677 bool ret = false; 678 679 slab_lock(slab); 680 if (slab->freelist == freelist_old && 681 slab->counters == counters_old) { 682 slab->freelist = freelist_new; 683 slab->counters = counters_new; 684 ret = true; 685 } 686 slab_unlock(slab); 687 688 return ret; 689 } 690 691 /* 692 * Interrupts must be disabled (for the fallback code to work right), typically 693 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is 694 * part of bit_spin_lock(), is sufficient because the policy is not to allow any 695 * allocation/ free operation in hardirq context. Therefore nothing can 696 * interrupt the operation. 697 */ 698 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, 699 void *freelist_old, unsigned long counters_old, 700 void *freelist_new, unsigned long counters_new, 701 const char *n) 702 { 703 bool ret; 704 705 if (USE_LOCKLESS_FAST_PATH()) 706 lockdep_assert_irqs_disabled(); 707 708 if (s->flags & __CMPXCHG_DOUBLE) { 709 ret = __update_freelist_fast(slab, freelist_old, counters_old, 710 freelist_new, counters_new); 711 } else { 712 ret = __update_freelist_slow(slab, freelist_old, counters_old, 713 freelist_new, counters_new); 714 } 715 if (likely(ret)) 716 return true; 717 718 cpu_relax(); 719 stat(s, CMPXCHG_DOUBLE_FAIL); 720 721 #ifdef SLUB_DEBUG_CMPXCHG 722 pr_info("%s %s: cmpxchg double redo ", n, s->name); 723 #endif 724 725 return false; 726 } 727 728 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, 729 void *freelist_old, unsigned long counters_old, 730 void *freelist_new, unsigned long counters_new, 731 const char *n) 732 { 733 bool ret; 734 735 if (s->flags & __CMPXCHG_DOUBLE) { 736 ret = __update_freelist_fast(slab, freelist_old, counters_old, 737 freelist_new, counters_new); 738 } else { 739 unsigned long flags; 740 741 local_irq_save(flags); 742 ret = __update_freelist_slow(slab, freelist_old, counters_old, 743 freelist_new, counters_new); 744 local_irq_restore(flags); 745 } 746 if (likely(ret)) 747 return true; 748 749 cpu_relax(); 750 stat(s, CMPXCHG_DOUBLE_FAIL); 751 752 #ifdef SLUB_DEBUG_CMPXCHG 753 pr_info("%s %s: cmpxchg double redo ", n, s->name); 754 #endif 755 756 return false; 757 } 758 759 #ifdef CONFIG_SLUB_DEBUG 760 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 761 static DEFINE_SPINLOCK(object_map_lock); 762 763 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 764 struct slab *slab) 765 { 766 void *addr = slab_address(slab); 767 void *p; 768 769 bitmap_zero(obj_map, slab->objects); 770 771 for (p = slab->freelist; p; p = get_freepointer(s, p)) 772 set_bit(__obj_to_index(s, addr, p), obj_map); 773 } 774 775 #if IS_ENABLED(CONFIG_KUNIT) 776 static bool slab_add_kunit_errors(void) 777 { 778 struct kunit_resource *resource; 779 780 if (!kunit_get_current_test()) 781 return false; 782 783 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 784 if (!resource) 785 return false; 786 787 (*(int *)resource->data)++; 788 kunit_put_resource(resource); 789 return true; 790 } 791 #else 792 static inline bool slab_add_kunit_errors(void) { return false; } 793 #endif 794 795 static inline unsigned int size_from_object(struct kmem_cache *s) 796 { 797 if (s->flags & SLAB_RED_ZONE) 798 return s->size - s->red_left_pad; 799 800 return s->size; 801 } 802 803 static inline void *restore_red_left(struct kmem_cache *s, void *p) 804 { 805 if (s->flags & SLAB_RED_ZONE) 806 p -= s->red_left_pad; 807 808 return p; 809 } 810 811 /* 812 * Debug settings: 813 */ 814 #if defined(CONFIG_SLUB_DEBUG_ON) 815 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 816 #else 817 static slab_flags_t slub_debug; 818 #endif 819 820 static char *slub_debug_string; 821 static int disable_higher_order_debug; 822 823 /* 824 * slub is about to manipulate internal object metadata. This memory lies 825 * outside the range of the allocated object, so accessing it would normally 826 * be reported by kasan as a bounds error. metadata_access_enable() is used 827 * to tell kasan that these accesses are OK. 828 */ 829 static inline void metadata_access_enable(void) 830 { 831 kasan_disable_current(); 832 } 833 834 static inline void metadata_access_disable(void) 835 { 836 kasan_enable_current(); 837 } 838 839 /* 840 * Object debugging 841 */ 842 843 /* Verify that a pointer has an address that is valid within a slab page */ 844 static inline int check_valid_pointer(struct kmem_cache *s, 845 struct slab *slab, void *object) 846 { 847 void *base; 848 849 if (!object) 850 return 1; 851 852 base = slab_address(slab); 853 object = kasan_reset_tag(object); 854 object = restore_red_left(s, object); 855 if (object < base || object >= base + slab->objects * s->size || 856 (object - base) % s->size) { 857 return 0; 858 } 859 860 return 1; 861 } 862 863 static void print_section(char *level, char *text, u8 *addr, 864 unsigned int length) 865 { 866 metadata_access_enable(); 867 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 868 16, 1, kasan_reset_tag((void *)addr), length, 1); 869 metadata_access_disable(); 870 } 871 872 static struct track *get_track(struct kmem_cache *s, void *object, 873 enum track_item alloc) 874 { 875 struct track *p; 876 877 p = object + get_info_end(s); 878 879 return kasan_reset_tag(p + alloc); 880 } 881 882 #ifdef CONFIG_STACKDEPOT 883 static noinline depot_stack_handle_t set_track_prepare(void) 884 { 885 depot_stack_handle_t handle; 886 unsigned long entries[TRACK_ADDRS_COUNT]; 887 unsigned int nr_entries; 888 889 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 890 handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); 891 892 return handle; 893 } 894 #else 895 static inline depot_stack_handle_t set_track_prepare(void) 896 { 897 return 0; 898 } 899 #endif 900 901 static void set_track_update(struct kmem_cache *s, void *object, 902 enum track_item alloc, unsigned long addr, 903 depot_stack_handle_t handle) 904 { 905 struct track *p = get_track(s, object, alloc); 906 907 #ifdef CONFIG_STACKDEPOT 908 p->handle = handle; 909 #endif 910 p->addr = addr; 911 p->cpu = smp_processor_id(); 912 p->pid = current->pid; 913 p->when = jiffies; 914 } 915 916 static __always_inline void set_track(struct kmem_cache *s, void *object, 917 enum track_item alloc, unsigned long addr) 918 { 919 depot_stack_handle_t handle = set_track_prepare(); 920 921 set_track_update(s, object, alloc, addr, handle); 922 } 923 924 static void init_tracking(struct kmem_cache *s, void *object) 925 { 926 struct track *p; 927 928 if (!(s->flags & SLAB_STORE_USER)) 929 return; 930 931 p = get_track(s, object, TRACK_ALLOC); 932 memset(p, 0, 2*sizeof(struct track)); 933 } 934 935 static void print_track(const char *s, struct track *t, unsigned long pr_time) 936 { 937 depot_stack_handle_t handle __maybe_unused; 938 939 if (!t->addr) 940 return; 941 942 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 943 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 944 #ifdef CONFIG_STACKDEPOT 945 handle = READ_ONCE(t->handle); 946 if (handle) 947 stack_depot_print(handle); 948 else 949 pr_err("object allocation/free stack trace missing\n"); 950 #endif 951 } 952 953 void print_tracking(struct kmem_cache *s, void *object) 954 { 955 unsigned long pr_time = jiffies; 956 if (!(s->flags & SLAB_STORE_USER)) 957 return; 958 959 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 960 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 961 } 962 963 static void print_slab_info(const struct slab *slab) 964 { 965 struct folio *folio = (struct folio *)slab_folio(slab); 966 967 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", 968 slab, slab->objects, slab->inuse, slab->freelist, 969 folio_flags(folio, 0)); 970 } 971 972 /* 973 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API 974 * family will round up the real request size to these fixed ones, so 975 * there could be an extra area than what is requested. Save the original 976 * request size in the meta data area, for better debug and sanity check. 977 */ 978 static inline void set_orig_size(struct kmem_cache *s, 979 void *object, unsigned int orig_size) 980 { 981 void *p = kasan_reset_tag(object); 982 unsigned int kasan_meta_size; 983 984 if (!slub_debug_orig_size(s)) 985 return; 986 987 /* 988 * KASAN can save its free meta data inside of the object at offset 0. 989 * If this meta data size is larger than 'orig_size', it will overlap 990 * the data redzone in [orig_size+1, object_size]. Thus, we adjust 991 * 'orig_size' to be as at least as big as KASAN's meta data. 992 */ 993 kasan_meta_size = kasan_metadata_size(s, true); 994 if (kasan_meta_size > orig_size) 995 orig_size = kasan_meta_size; 996 997 p += get_info_end(s); 998 p += sizeof(struct track) * 2; 999 1000 *(unsigned int *)p = orig_size; 1001 } 1002 1003 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) 1004 { 1005 void *p = kasan_reset_tag(object); 1006 1007 if (!slub_debug_orig_size(s)) 1008 return s->object_size; 1009 1010 p += get_info_end(s); 1011 p += sizeof(struct track) * 2; 1012 1013 return *(unsigned int *)p; 1014 } 1015 1016 void skip_orig_size_check(struct kmem_cache *s, const void *object) 1017 { 1018 set_orig_size(s, (void *)object, s->object_size); 1019 } 1020 1021 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 1022 { 1023 struct va_format vaf; 1024 va_list args; 1025 1026 va_start(args, fmt); 1027 vaf.fmt = fmt; 1028 vaf.va = &args; 1029 pr_err("=============================================================================\n"); 1030 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 1031 pr_err("-----------------------------------------------------------------------------\n\n"); 1032 va_end(args); 1033 } 1034 1035 __printf(2, 3) 1036 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 1037 { 1038 struct va_format vaf; 1039 va_list args; 1040 1041 if (slab_add_kunit_errors()) 1042 return; 1043 1044 va_start(args, fmt); 1045 vaf.fmt = fmt; 1046 vaf.va = &args; 1047 pr_err("FIX %s: %pV\n", s->name, &vaf); 1048 va_end(args); 1049 } 1050 1051 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) 1052 { 1053 unsigned int off; /* Offset of last byte */ 1054 u8 *addr = slab_address(slab); 1055 1056 print_tracking(s, p); 1057 1058 print_slab_info(slab); 1059 1060 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 1061 p, p - addr, get_freepointer(s, p)); 1062 1063 if (s->flags & SLAB_RED_ZONE) 1064 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 1065 s->red_left_pad); 1066 else if (p > addr + 16) 1067 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 1068 1069 print_section(KERN_ERR, "Object ", p, 1070 min_t(unsigned int, s->object_size, PAGE_SIZE)); 1071 if (s->flags & SLAB_RED_ZONE) 1072 print_section(KERN_ERR, "Redzone ", p + s->object_size, 1073 s->inuse - s->object_size); 1074 1075 off = get_info_end(s); 1076 1077 if (s->flags & SLAB_STORE_USER) 1078 off += 2 * sizeof(struct track); 1079 1080 if (slub_debug_orig_size(s)) 1081 off += sizeof(unsigned int); 1082 1083 off += kasan_metadata_size(s, false); 1084 1085 if (off != size_from_object(s)) 1086 /* Beginning of the filler is the free pointer */ 1087 print_section(KERN_ERR, "Padding ", p + off, 1088 size_from_object(s) - off); 1089 1090 dump_stack(); 1091 } 1092 1093 static void object_err(struct kmem_cache *s, struct slab *slab, 1094 u8 *object, char *reason) 1095 { 1096 if (slab_add_kunit_errors()) 1097 return; 1098 1099 slab_bug(s, "%s", reason); 1100 print_trailer(s, slab, object); 1101 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1102 } 1103 1104 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1105 void **freelist, void *nextfree) 1106 { 1107 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 1108 !check_valid_pointer(s, slab, nextfree) && freelist) { 1109 object_err(s, slab, *freelist, "Freechain corrupt"); 1110 *freelist = NULL; 1111 slab_fix(s, "Isolate corrupted freechain"); 1112 return true; 1113 } 1114 1115 return false; 1116 } 1117 1118 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, 1119 const char *fmt, ...) 1120 { 1121 va_list args; 1122 char buf[100]; 1123 1124 if (slab_add_kunit_errors()) 1125 return; 1126 1127 va_start(args, fmt); 1128 vsnprintf(buf, sizeof(buf), fmt, args); 1129 va_end(args); 1130 slab_bug(s, "%s", buf); 1131 print_slab_info(slab); 1132 dump_stack(); 1133 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1134 } 1135 1136 static void init_object(struct kmem_cache *s, void *object, u8 val) 1137 { 1138 u8 *p = kasan_reset_tag(object); 1139 unsigned int poison_size = s->object_size; 1140 1141 if (s->flags & SLAB_RED_ZONE) { 1142 memset(p - s->red_left_pad, val, s->red_left_pad); 1143 1144 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1145 /* 1146 * Redzone the extra allocated space by kmalloc than 1147 * requested, and the poison size will be limited to 1148 * the original request size accordingly. 1149 */ 1150 poison_size = get_orig_size(s, object); 1151 } 1152 } 1153 1154 if (s->flags & __OBJECT_POISON) { 1155 memset(p, POISON_FREE, poison_size - 1); 1156 p[poison_size - 1] = POISON_END; 1157 } 1158 1159 if (s->flags & SLAB_RED_ZONE) 1160 memset(p + poison_size, val, s->inuse - poison_size); 1161 } 1162 1163 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 1164 void *from, void *to) 1165 { 1166 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 1167 memset(from, data, to - from); 1168 } 1169 1170 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab, 1171 u8 *object, char *what, 1172 u8 *start, unsigned int value, unsigned int bytes) 1173 { 1174 u8 *fault; 1175 u8 *end; 1176 u8 *addr = slab_address(slab); 1177 1178 metadata_access_enable(); 1179 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 1180 metadata_access_disable(); 1181 if (!fault) 1182 return 1; 1183 1184 end = start + bytes; 1185 while (end > fault && end[-1] == value) 1186 end--; 1187 1188 if (slab_add_kunit_errors()) 1189 goto skip_bug_print; 1190 1191 slab_bug(s, "%s overwritten", what); 1192 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 1193 fault, end - 1, fault - addr, 1194 fault[0], value); 1195 print_trailer(s, slab, object); 1196 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1197 1198 skip_bug_print: 1199 restore_bytes(s, what, value, fault, end); 1200 return 0; 1201 } 1202 1203 /* 1204 * Object layout: 1205 * 1206 * object address 1207 * Bytes of the object to be managed. 1208 * If the freepointer may overlay the object then the free 1209 * pointer is at the middle of the object. 1210 * 1211 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 1212 * 0xa5 (POISON_END) 1213 * 1214 * object + s->object_size 1215 * Padding to reach word boundary. This is also used for Redzoning. 1216 * Padding is extended by another word if Redzoning is enabled and 1217 * object_size == inuse. 1218 * 1219 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 1220 * 0xcc (RED_ACTIVE) for objects in use. 1221 * 1222 * object + s->inuse 1223 * Meta data starts here. 1224 * 1225 * A. Free pointer (if we cannot overwrite object on free) 1226 * B. Tracking data for SLAB_STORE_USER 1227 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) 1228 * D. Padding to reach required alignment boundary or at minimum 1229 * one word if debugging is on to be able to detect writes 1230 * before the word boundary. 1231 * 1232 * Padding is done using 0x5a (POISON_INUSE) 1233 * 1234 * object + s->size 1235 * Nothing is used beyond s->size. 1236 * 1237 * If slabcaches are merged then the object_size and inuse boundaries are mostly 1238 * ignored. And therefore no slab options that rely on these boundaries 1239 * may be used with merged slabcaches. 1240 */ 1241 1242 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) 1243 { 1244 unsigned long off = get_info_end(s); /* The end of info */ 1245 1246 if (s->flags & SLAB_STORE_USER) { 1247 /* We also have user information there */ 1248 off += 2 * sizeof(struct track); 1249 1250 if (s->flags & SLAB_KMALLOC) 1251 off += sizeof(unsigned int); 1252 } 1253 1254 off += kasan_metadata_size(s, false); 1255 1256 if (size_from_object(s) == off) 1257 return 1; 1258 1259 return check_bytes_and_report(s, slab, p, "Object padding", 1260 p + off, POISON_INUSE, size_from_object(s) - off); 1261 } 1262 1263 /* Check the pad bytes at the end of a slab page */ 1264 static void slab_pad_check(struct kmem_cache *s, struct slab *slab) 1265 { 1266 u8 *start; 1267 u8 *fault; 1268 u8 *end; 1269 u8 *pad; 1270 int length; 1271 int remainder; 1272 1273 if (!(s->flags & SLAB_POISON)) 1274 return; 1275 1276 start = slab_address(slab); 1277 length = slab_size(slab); 1278 end = start + length; 1279 remainder = length % s->size; 1280 if (!remainder) 1281 return; 1282 1283 pad = end - remainder; 1284 metadata_access_enable(); 1285 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1286 metadata_access_disable(); 1287 if (!fault) 1288 return; 1289 while (end > fault && end[-1] == POISON_INUSE) 1290 end--; 1291 1292 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1293 fault, end - 1, fault - start); 1294 print_section(KERN_ERR, "Padding ", pad, remainder); 1295 1296 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1297 } 1298 1299 static int check_object(struct kmem_cache *s, struct slab *slab, 1300 void *object, u8 val) 1301 { 1302 u8 *p = object; 1303 u8 *endobject = object + s->object_size; 1304 unsigned int orig_size, kasan_meta_size; 1305 1306 if (s->flags & SLAB_RED_ZONE) { 1307 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1308 object - s->red_left_pad, val, s->red_left_pad)) 1309 return 0; 1310 1311 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1312 endobject, val, s->inuse - s->object_size)) 1313 return 0; 1314 1315 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1316 orig_size = get_orig_size(s, object); 1317 1318 if (s->object_size > orig_size && 1319 !check_bytes_and_report(s, slab, object, 1320 "kmalloc Redzone", p + orig_size, 1321 val, s->object_size - orig_size)) { 1322 return 0; 1323 } 1324 } 1325 } else { 1326 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1327 check_bytes_and_report(s, slab, p, "Alignment padding", 1328 endobject, POISON_INUSE, 1329 s->inuse - s->object_size); 1330 } 1331 } 1332 1333 if (s->flags & SLAB_POISON) { 1334 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) { 1335 /* 1336 * KASAN can save its free meta data inside of the 1337 * object at offset 0. Thus, skip checking the part of 1338 * the redzone that overlaps with the meta data. 1339 */ 1340 kasan_meta_size = kasan_metadata_size(s, true); 1341 if (kasan_meta_size < s->object_size - 1 && 1342 !check_bytes_and_report(s, slab, p, "Poison", 1343 p + kasan_meta_size, POISON_FREE, 1344 s->object_size - kasan_meta_size - 1)) 1345 return 0; 1346 if (kasan_meta_size < s->object_size && 1347 !check_bytes_and_report(s, slab, p, "End Poison", 1348 p + s->object_size - 1, POISON_END, 1)) 1349 return 0; 1350 } 1351 /* 1352 * check_pad_bytes cleans up on its own. 1353 */ 1354 check_pad_bytes(s, slab, p); 1355 } 1356 1357 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 1358 /* 1359 * Object and freepointer overlap. Cannot check 1360 * freepointer while object is allocated. 1361 */ 1362 return 1; 1363 1364 /* Check free pointer validity */ 1365 if (!check_valid_pointer(s, slab, get_freepointer(s, p))) { 1366 object_err(s, slab, p, "Freepointer corrupt"); 1367 /* 1368 * No choice but to zap it and thus lose the remainder 1369 * of the free objects in this slab. May cause 1370 * another error because the object count is now wrong. 1371 */ 1372 set_freepointer(s, p, NULL); 1373 return 0; 1374 } 1375 return 1; 1376 } 1377 1378 static int check_slab(struct kmem_cache *s, struct slab *slab) 1379 { 1380 int maxobj; 1381 1382 if (!folio_test_slab(slab_folio(slab))) { 1383 slab_err(s, slab, "Not a valid slab page"); 1384 return 0; 1385 } 1386 1387 maxobj = order_objects(slab_order(slab), s->size); 1388 if (slab->objects > maxobj) { 1389 slab_err(s, slab, "objects %u > max %u", 1390 slab->objects, maxobj); 1391 return 0; 1392 } 1393 if (slab->inuse > slab->objects) { 1394 slab_err(s, slab, "inuse %u > max %u", 1395 slab->inuse, slab->objects); 1396 return 0; 1397 } 1398 /* Slab_pad_check fixes things up after itself */ 1399 slab_pad_check(s, slab); 1400 return 1; 1401 } 1402 1403 /* 1404 * Determine if a certain object in a slab is on the freelist. Must hold the 1405 * slab lock to guarantee that the chains are in a consistent state. 1406 */ 1407 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) 1408 { 1409 int nr = 0; 1410 void *fp; 1411 void *object = NULL; 1412 int max_objects; 1413 1414 fp = slab->freelist; 1415 while (fp && nr <= slab->objects) { 1416 if (fp == search) 1417 return 1; 1418 if (!check_valid_pointer(s, slab, fp)) { 1419 if (object) { 1420 object_err(s, slab, object, 1421 "Freechain corrupt"); 1422 set_freepointer(s, object, NULL); 1423 } else { 1424 slab_err(s, slab, "Freepointer corrupt"); 1425 slab->freelist = NULL; 1426 slab->inuse = slab->objects; 1427 slab_fix(s, "Freelist cleared"); 1428 return 0; 1429 } 1430 break; 1431 } 1432 object = fp; 1433 fp = get_freepointer(s, object); 1434 nr++; 1435 } 1436 1437 max_objects = order_objects(slab_order(slab), s->size); 1438 if (max_objects > MAX_OBJS_PER_PAGE) 1439 max_objects = MAX_OBJS_PER_PAGE; 1440 1441 if (slab->objects != max_objects) { 1442 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", 1443 slab->objects, max_objects); 1444 slab->objects = max_objects; 1445 slab_fix(s, "Number of objects adjusted"); 1446 } 1447 if (slab->inuse != slab->objects - nr) { 1448 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", 1449 slab->inuse, slab->objects - nr); 1450 slab->inuse = slab->objects - nr; 1451 slab_fix(s, "Object count adjusted"); 1452 } 1453 return search == NULL; 1454 } 1455 1456 static void trace(struct kmem_cache *s, struct slab *slab, void *object, 1457 int alloc) 1458 { 1459 if (s->flags & SLAB_TRACE) { 1460 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1461 s->name, 1462 alloc ? "alloc" : "free", 1463 object, slab->inuse, 1464 slab->freelist); 1465 1466 if (!alloc) 1467 print_section(KERN_INFO, "Object ", (void *)object, 1468 s->object_size); 1469 1470 dump_stack(); 1471 } 1472 } 1473 1474 /* 1475 * Tracking of fully allocated slabs for debugging purposes. 1476 */ 1477 static void add_full(struct kmem_cache *s, 1478 struct kmem_cache_node *n, struct slab *slab) 1479 { 1480 if (!(s->flags & SLAB_STORE_USER)) 1481 return; 1482 1483 lockdep_assert_held(&n->list_lock); 1484 list_add(&slab->slab_list, &n->full); 1485 } 1486 1487 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) 1488 { 1489 if (!(s->flags & SLAB_STORE_USER)) 1490 return; 1491 1492 lockdep_assert_held(&n->list_lock); 1493 list_del(&slab->slab_list); 1494 } 1495 1496 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1497 { 1498 return atomic_long_read(&n->nr_slabs); 1499 } 1500 1501 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1502 { 1503 struct kmem_cache_node *n = get_node(s, node); 1504 1505 atomic_long_inc(&n->nr_slabs); 1506 atomic_long_add(objects, &n->total_objects); 1507 } 1508 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1509 { 1510 struct kmem_cache_node *n = get_node(s, node); 1511 1512 atomic_long_dec(&n->nr_slabs); 1513 atomic_long_sub(objects, &n->total_objects); 1514 } 1515 1516 /* Object debug checks for alloc/free paths */ 1517 static void setup_object_debug(struct kmem_cache *s, void *object) 1518 { 1519 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1520 return; 1521 1522 init_object(s, object, SLUB_RED_INACTIVE); 1523 init_tracking(s, object); 1524 } 1525 1526 static 1527 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) 1528 { 1529 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1530 return; 1531 1532 metadata_access_enable(); 1533 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); 1534 metadata_access_disable(); 1535 } 1536 1537 static inline int alloc_consistency_checks(struct kmem_cache *s, 1538 struct slab *slab, void *object) 1539 { 1540 if (!check_slab(s, slab)) 1541 return 0; 1542 1543 if (!check_valid_pointer(s, slab, object)) { 1544 object_err(s, slab, object, "Freelist Pointer check fails"); 1545 return 0; 1546 } 1547 1548 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) 1549 return 0; 1550 1551 return 1; 1552 } 1553 1554 static noinline bool alloc_debug_processing(struct kmem_cache *s, 1555 struct slab *slab, void *object, int orig_size) 1556 { 1557 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1558 if (!alloc_consistency_checks(s, slab, object)) 1559 goto bad; 1560 } 1561 1562 /* Success. Perform special debug activities for allocs */ 1563 trace(s, slab, object, 1); 1564 set_orig_size(s, object, orig_size); 1565 init_object(s, object, SLUB_RED_ACTIVE); 1566 return true; 1567 1568 bad: 1569 if (folio_test_slab(slab_folio(slab))) { 1570 /* 1571 * If this is a slab page then lets do the best we can 1572 * to avoid issues in the future. Marking all objects 1573 * as used avoids touching the remaining objects. 1574 */ 1575 slab_fix(s, "Marking all objects used"); 1576 slab->inuse = slab->objects; 1577 slab->freelist = NULL; 1578 } 1579 return false; 1580 } 1581 1582 static inline int free_consistency_checks(struct kmem_cache *s, 1583 struct slab *slab, void *object, unsigned long addr) 1584 { 1585 if (!check_valid_pointer(s, slab, object)) { 1586 slab_err(s, slab, "Invalid object pointer 0x%p", object); 1587 return 0; 1588 } 1589 1590 if (on_freelist(s, slab, object)) { 1591 object_err(s, slab, object, "Object already free"); 1592 return 0; 1593 } 1594 1595 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) 1596 return 0; 1597 1598 if (unlikely(s != slab->slab_cache)) { 1599 if (!folio_test_slab(slab_folio(slab))) { 1600 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", 1601 object); 1602 } else if (!slab->slab_cache) { 1603 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1604 object); 1605 dump_stack(); 1606 } else 1607 object_err(s, slab, object, 1608 "page slab pointer corrupt."); 1609 return 0; 1610 } 1611 return 1; 1612 } 1613 1614 /* 1615 * Parse a block of slab_debug options. Blocks are delimited by ';' 1616 * 1617 * @str: start of block 1618 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1619 * @slabs: return start of list of slabs, or NULL when there's no list 1620 * @init: assume this is initial parsing and not per-kmem-create parsing 1621 * 1622 * returns the start of next block if there's any, or NULL 1623 */ 1624 static char * 1625 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1626 { 1627 bool higher_order_disable = false; 1628 1629 /* Skip any completely empty blocks */ 1630 while (*str && *str == ';') 1631 str++; 1632 1633 if (*str == ',') { 1634 /* 1635 * No options but restriction on slabs. This means full 1636 * debugging for slabs matching a pattern. 1637 */ 1638 *flags = DEBUG_DEFAULT_FLAGS; 1639 goto check_slabs; 1640 } 1641 *flags = 0; 1642 1643 /* Determine which debug features should be switched on */ 1644 for (; *str && *str != ',' && *str != ';'; str++) { 1645 switch (tolower(*str)) { 1646 case '-': 1647 *flags = 0; 1648 break; 1649 case 'f': 1650 *flags |= SLAB_CONSISTENCY_CHECKS; 1651 break; 1652 case 'z': 1653 *flags |= SLAB_RED_ZONE; 1654 break; 1655 case 'p': 1656 *flags |= SLAB_POISON; 1657 break; 1658 case 'u': 1659 *flags |= SLAB_STORE_USER; 1660 break; 1661 case 't': 1662 *flags |= SLAB_TRACE; 1663 break; 1664 case 'a': 1665 *flags |= SLAB_FAILSLAB; 1666 break; 1667 case 'o': 1668 /* 1669 * Avoid enabling debugging on caches if its minimum 1670 * order would increase as a result. 1671 */ 1672 higher_order_disable = true; 1673 break; 1674 default: 1675 if (init) 1676 pr_err("slab_debug option '%c' unknown. skipped\n", *str); 1677 } 1678 } 1679 check_slabs: 1680 if (*str == ',') 1681 *slabs = ++str; 1682 else 1683 *slabs = NULL; 1684 1685 /* Skip over the slab list */ 1686 while (*str && *str != ';') 1687 str++; 1688 1689 /* Skip any completely empty blocks */ 1690 while (*str && *str == ';') 1691 str++; 1692 1693 if (init && higher_order_disable) 1694 disable_higher_order_debug = 1; 1695 1696 if (*str) 1697 return str; 1698 else 1699 return NULL; 1700 } 1701 1702 static int __init setup_slub_debug(char *str) 1703 { 1704 slab_flags_t flags; 1705 slab_flags_t global_flags; 1706 char *saved_str; 1707 char *slab_list; 1708 bool global_slub_debug_changed = false; 1709 bool slab_list_specified = false; 1710 1711 global_flags = DEBUG_DEFAULT_FLAGS; 1712 if (*str++ != '=' || !*str) 1713 /* 1714 * No options specified. Switch on full debugging. 1715 */ 1716 goto out; 1717 1718 saved_str = str; 1719 while (str) { 1720 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1721 1722 if (!slab_list) { 1723 global_flags = flags; 1724 global_slub_debug_changed = true; 1725 } else { 1726 slab_list_specified = true; 1727 if (flags & SLAB_STORE_USER) 1728 stack_depot_request_early_init(); 1729 } 1730 } 1731 1732 /* 1733 * For backwards compatibility, a single list of flags with list of 1734 * slabs means debugging is only changed for those slabs, so the global 1735 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1736 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1737 * long as there is no option specifying flags without a slab list. 1738 */ 1739 if (slab_list_specified) { 1740 if (!global_slub_debug_changed) 1741 global_flags = slub_debug; 1742 slub_debug_string = saved_str; 1743 } 1744 out: 1745 slub_debug = global_flags; 1746 if (slub_debug & SLAB_STORE_USER) 1747 stack_depot_request_early_init(); 1748 if (slub_debug != 0 || slub_debug_string) 1749 static_branch_enable(&slub_debug_enabled); 1750 else 1751 static_branch_disable(&slub_debug_enabled); 1752 if ((static_branch_unlikely(&init_on_alloc) || 1753 static_branch_unlikely(&init_on_free)) && 1754 (slub_debug & SLAB_POISON)) 1755 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1756 return 1; 1757 } 1758 1759 __setup("slab_debug", setup_slub_debug); 1760 __setup_param("slub_debug", slub_debug, setup_slub_debug, 0); 1761 1762 /* 1763 * kmem_cache_flags - apply debugging options to the cache 1764 * @flags: flags to set 1765 * @name: name of the cache 1766 * 1767 * Debug option(s) are applied to @flags. In addition to the debug 1768 * option(s), if a slab name (or multiple) is specified i.e. 1769 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1770 * then only the select slabs will receive the debug option(s). 1771 */ 1772 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1773 { 1774 char *iter; 1775 size_t len; 1776 char *next_block; 1777 slab_flags_t block_flags; 1778 slab_flags_t slub_debug_local = slub_debug; 1779 1780 if (flags & SLAB_NO_USER_FLAGS) 1781 return flags; 1782 1783 /* 1784 * If the slab cache is for debugging (e.g. kmemleak) then 1785 * don't store user (stack trace) information by default, 1786 * but let the user enable it via the command line below. 1787 */ 1788 if (flags & SLAB_NOLEAKTRACE) 1789 slub_debug_local &= ~SLAB_STORE_USER; 1790 1791 len = strlen(name); 1792 next_block = slub_debug_string; 1793 /* Go through all blocks of debug options, see if any matches our slab's name */ 1794 while (next_block) { 1795 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1796 if (!iter) 1797 continue; 1798 /* Found a block that has a slab list, search it */ 1799 while (*iter) { 1800 char *end, *glob; 1801 size_t cmplen; 1802 1803 end = strchrnul(iter, ','); 1804 if (next_block && next_block < end) 1805 end = next_block - 1; 1806 1807 glob = strnchr(iter, end - iter, '*'); 1808 if (glob) 1809 cmplen = glob - iter; 1810 else 1811 cmplen = max_t(size_t, len, (end - iter)); 1812 1813 if (!strncmp(name, iter, cmplen)) { 1814 flags |= block_flags; 1815 return flags; 1816 } 1817 1818 if (!*end || *end == ';') 1819 break; 1820 iter = end + 1; 1821 } 1822 } 1823 1824 return flags | slub_debug_local; 1825 } 1826 #else /* !CONFIG_SLUB_DEBUG */ 1827 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} 1828 static inline 1829 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} 1830 1831 static inline bool alloc_debug_processing(struct kmem_cache *s, 1832 struct slab *slab, void *object, int orig_size) { return true; } 1833 1834 static inline bool free_debug_processing(struct kmem_cache *s, 1835 struct slab *slab, void *head, void *tail, int *bulk_cnt, 1836 unsigned long addr, depot_stack_handle_t handle) { return true; } 1837 1838 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} 1839 static inline int check_object(struct kmem_cache *s, struct slab *slab, 1840 void *object, u8 val) { return 1; } 1841 static inline depot_stack_handle_t set_track_prepare(void) { return 0; } 1842 static inline void set_track(struct kmem_cache *s, void *object, 1843 enum track_item alloc, unsigned long addr) {} 1844 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1845 struct slab *slab) {} 1846 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1847 struct slab *slab) {} 1848 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1849 { 1850 return flags; 1851 } 1852 #define slub_debug 0 1853 1854 #define disable_higher_order_debug 0 1855 1856 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1857 { return 0; } 1858 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1859 int objects) {} 1860 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1861 int objects) {} 1862 1863 #ifndef CONFIG_SLUB_TINY 1864 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1865 void **freelist, void *nextfree) 1866 { 1867 return false; 1868 } 1869 #endif 1870 #endif /* CONFIG_SLUB_DEBUG */ 1871 1872 #ifdef CONFIG_SLAB_OBJ_EXT 1873 1874 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 1875 1876 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) 1877 { 1878 struct slabobj_ext *slab_exts; 1879 struct slab *obj_exts_slab; 1880 1881 obj_exts_slab = virt_to_slab(obj_exts); 1882 slab_exts = slab_obj_exts(obj_exts_slab); 1883 if (slab_exts) { 1884 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, 1885 obj_exts_slab, obj_exts); 1886 /* codetag should be NULL */ 1887 WARN_ON(slab_exts[offs].ref.ct); 1888 set_codetag_empty(&slab_exts[offs].ref); 1889 } 1890 } 1891 1892 static inline void mark_failed_objexts_alloc(struct slab *slab) 1893 { 1894 slab->obj_exts = OBJEXTS_ALLOC_FAIL; 1895 } 1896 1897 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 1898 struct slabobj_ext *vec, unsigned int objects) 1899 { 1900 /* 1901 * If vector previously failed to allocate then we have live 1902 * objects with no tag reference. Mark all references in this 1903 * vector as empty to avoid warnings later on. 1904 */ 1905 if (obj_exts & OBJEXTS_ALLOC_FAIL) { 1906 unsigned int i; 1907 1908 for (i = 0; i < objects; i++) 1909 set_codetag_empty(&vec[i].ref); 1910 } 1911 } 1912 1913 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 1914 1915 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} 1916 static inline void mark_failed_objexts_alloc(struct slab *slab) {} 1917 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 1918 struct slabobj_ext *vec, unsigned int objects) {} 1919 1920 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 1921 1922 /* 1923 * The allocated objcg pointers array is not accounted directly. 1924 * Moreover, it should not come from DMA buffer and is not readily 1925 * reclaimable. So those GFP bits should be masked off. 1926 */ 1927 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ 1928 __GFP_ACCOUNT | __GFP_NOFAIL) 1929 1930 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 1931 gfp_t gfp, bool new_slab) 1932 { 1933 unsigned int objects = objs_per_slab(s, slab); 1934 unsigned long new_exts; 1935 unsigned long old_exts; 1936 struct slabobj_ext *vec; 1937 1938 gfp &= ~OBJCGS_CLEAR_MASK; 1939 /* Prevent recursive extension vector allocation */ 1940 gfp |= __GFP_NO_OBJ_EXT; 1941 vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, 1942 slab_nid(slab)); 1943 if (!vec) { 1944 /* Mark vectors which failed to allocate */ 1945 if (new_slab) 1946 mark_failed_objexts_alloc(slab); 1947 1948 return -ENOMEM; 1949 } 1950 1951 new_exts = (unsigned long)vec; 1952 #ifdef CONFIG_MEMCG 1953 new_exts |= MEMCG_DATA_OBJEXTS; 1954 #endif 1955 old_exts = READ_ONCE(slab->obj_exts); 1956 handle_failed_objexts_alloc(old_exts, vec, objects); 1957 if (new_slab) { 1958 /* 1959 * If the slab is brand new and nobody can yet access its 1960 * obj_exts, no synchronization is required and obj_exts can 1961 * be simply assigned. 1962 */ 1963 slab->obj_exts = new_exts; 1964 } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) || 1965 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { 1966 /* 1967 * If the slab is already in use, somebody can allocate and 1968 * assign slabobj_exts in parallel. In this case the existing 1969 * objcg vector should be reused. 1970 */ 1971 mark_objexts_empty(vec); 1972 kfree(vec); 1973 return 0; 1974 } 1975 1976 kmemleak_not_leak(vec); 1977 return 0; 1978 } 1979 1980 static inline void free_slab_obj_exts(struct slab *slab) 1981 { 1982 struct slabobj_ext *obj_exts; 1983 1984 obj_exts = slab_obj_exts(slab); 1985 if (!obj_exts) 1986 return; 1987 1988 /* 1989 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its 1990 * corresponding extension will be NULL. alloc_tag_sub() will throw a 1991 * warning if slab has extensions but the extension of an object is 1992 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that 1993 * the extension for obj_exts is expected to be NULL. 1994 */ 1995 mark_objexts_empty(obj_exts); 1996 kfree(obj_exts); 1997 slab->obj_exts = 0; 1998 } 1999 2000 static inline bool need_slab_obj_ext(void) 2001 { 2002 if (mem_alloc_profiling_enabled()) 2003 return true; 2004 2005 /* 2006 * CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally 2007 * inside memcg_slab_post_alloc_hook. No other users for now. 2008 */ 2009 return false; 2010 } 2011 2012 static inline struct slabobj_ext * 2013 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) 2014 { 2015 struct slab *slab; 2016 2017 if (!p) 2018 return NULL; 2019 2020 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2021 return NULL; 2022 2023 if (flags & __GFP_NO_OBJ_EXT) 2024 return NULL; 2025 2026 slab = virt_to_slab(p); 2027 if (!slab_obj_exts(slab) && 2028 WARN(alloc_slab_obj_exts(slab, s, flags, false), 2029 "%s, %s: Failed to create slab extension vector!\n", 2030 __func__, s->name)) 2031 return NULL; 2032 2033 return slab_obj_exts(slab) + obj_to_index(s, slab, p); 2034 } 2035 2036 static inline void 2037 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2038 int objects) 2039 { 2040 #ifdef CONFIG_MEM_ALLOC_PROFILING 2041 struct slabobj_ext *obj_exts; 2042 int i; 2043 2044 if (!mem_alloc_profiling_enabled()) 2045 return; 2046 2047 obj_exts = slab_obj_exts(slab); 2048 if (!obj_exts) 2049 return; 2050 2051 for (i = 0; i < objects; i++) { 2052 unsigned int off = obj_to_index(s, slab, p[i]); 2053 2054 alloc_tag_sub(&obj_exts[off].ref, s->size); 2055 } 2056 #endif 2057 } 2058 2059 #else /* CONFIG_SLAB_OBJ_EXT */ 2060 2061 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 2062 gfp_t gfp, bool new_slab) 2063 { 2064 return 0; 2065 } 2066 2067 static inline void free_slab_obj_exts(struct slab *slab) 2068 { 2069 } 2070 2071 static inline bool need_slab_obj_ext(void) 2072 { 2073 return false; 2074 } 2075 2076 static inline struct slabobj_ext * 2077 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) 2078 { 2079 return NULL; 2080 } 2081 2082 static inline void 2083 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2084 int objects) 2085 { 2086 } 2087 2088 #endif /* CONFIG_SLAB_OBJ_EXT */ 2089 2090 #ifdef CONFIG_MEMCG_KMEM 2091 2092 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object); 2093 2094 static __fastpath_inline 2095 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 2096 gfp_t flags, size_t size, void **p) 2097 { 2098 if (likely(!memcg_kmem_online())) 2099 return true; 2100 2101 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))) 2102 return true; 2103 2104 if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p))) 2105 return true; 2106 2107 if (likely(size == 1)) { 2108 memcg_alloc_abort_single(s, *p); 2109 *p = NULL; 2110 } else { 2111 kmem_cache_free_bulk(s, size, p); 2112 } 2113 2114 return false; 2115 } 2116 2117 static __fastpath_inline 2118 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2119 int objects) 2120 { 2121 struct slabobj_ext *obj_exts; 2122 2123 if (!memcg_kmem_online()) 2124 return; 2125 2126 obj_exts = slab_obj_exts(slab); 2127 if (likely(!obj_exts)) 2128 return; 2129 2130 __memcg_slab_free_hook(s, slab, p, objects, obj_exts); 2131 } 2132 #else /* CONFIG_MEMCG_KMEM */ 2133 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s, 2134 struct list_lru *lru, 2135 gfp_t flags, size_t size, 2136 void **p) 2137 { 2138 return true; 2139 } 2140 2141 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 2142 void **p, int objects) 2143 { 2144 } 2145 #endif /* CONFIG_MEMCG_KMEM */ 2146 2147 /* 2148 * Hooks for other subsystems that check memory allocations. In a typical 2149 * production configuration these hooks all should produce no code at all. 2150 * 2151 * Returns true if freeing of the object can proceed, false if its reuse 2152 * was delayed by KASAN quarantine, or it was returned to KFENCE. 2153 */ 2154 static __always_inline 2155 bool slab_free_hook(struct kmem_cache *s, void *x, bool init) 2156 { 2157 kmemleak_free_recursive(x, s->flags); 2158 kmsan_slab_free(s, x); 2159 2160 debug_check_no_locks_freed(x, s->object_size); 2161 2162 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 2163 debug_check_no_obj_freed(x, s->object_size); 2164 2165 /* Use KCSAN to help debug racy use-after-free. */ 2166 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 2167 __kcsan_check_access(x, s->object_size, 2168 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 2169 2170 if (kfence_free(x)) 2171 return false; 2172 2173 /* 2174 * As memory initialization might be integrated into KASAN, 2175 * kasan_slab_free and initialization memset's must be 2176 * kept together to avoid discrepancies in behavior. 2177 * 2178 * The initialization memset's clear the object and the metadata, 2179 * but don't touch the SLAB redzone. 2180 * 2181 * The object's freepointer is also avoided if stored outside the 2182 * object. 2183 */ 2184 if (unlikely(init)) { 2185 int rsize; 2186 unsigned int inuse; 2187 2188 inuse = get_info_end(s); 2189 if (!kasan_has_integrated_init()) 2190 memset(kasan_reset_tag(x), 0, s->object_size); 2191 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 2192 memset((char *)kasan_reset_tag(x) + inuse, 0, 2193 s->size - inuse - rsize); 2194 } 2195 /* KASAN might put x into memory quarantine, delaying its reuse. */ 2196 return !kasan_slab_free(s, x, init); 2197 } 2198 2199 static __fastpath_inline 2200 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail, 2201 int *cnt) 2202 { 2203 2204 void *object; 2205 void *next = *head; 2206 void *old_tail = *tail; 2207 bool init; 2208 2209 if (is_kfence_address(next)) { 2210 slab_free_hook(s, next, false); 2211 return false; 2212 } 2213 2214 /* Head and tail of the reconstructed freelist */ 2215 *head = NULL; 2216 *tail = NULL; 2217 2218 init = slab_want_init_on_free(s); 2219 2220 do { 2221 object = next; 2222 next = get_freepointer(s, object); 2223 2224 /* If object's reuse doesn't have to be delayed */ 2225 if (likely(slab_free_hook(s, object, init))) { 2226 /* Move object to the new freelist */ 2227 set_freepointer(s, object, *head); 2228 *head = object; 2229 if (!*tail) 2230 *tail = object; 2231 } else { 2232 /* 2233 * Adjust the reconstructed freelist depth 2234 * accordingly if object's reuse is delayed. 2235 */ 2236 --(*cnt); 2237 } 2238 } while (object != old_tail); 2239 2240 return *head != NULL; 2241 } 2242 2243 static void *setup_object(struct kmem_cache *s, void *object) 2244 { 2245 setup_object_debug(s, object); 2246 object = kasan_init_slab_obj(s, object); 2247 if (unlikely(s->ctor)) { 2248 kasan_unpoison_new_object(s, object); 2249 s->ctor(object); 2250 kasan_poison_new_object(s, object); 2251 } 2252 return object; 2253 } 2254 2255 /* 2256 * Slab allocation and freeing 2257 */ 2258 static inline struct slab *alloc_slab_page(gfp_t flags, int node, 2259 struct kmem_cache_order_objects oo) 2260 { 2261 struct folio *folio; 2262 struct slab *slab; 2263 unsigned int order = oo_order(oo); 2264 2265 folio = (struct folio *)alloc_pages_node(node, flags, order); 2266 if (!folio) 2267 return NULL; 2268 2269 slab = folio_slab(folio); 2270 __folio_set_slab(folio); 2271 /* Make the flag visible before any changes to folio->mapping */ 2272 smp_wmb(); 2273 if (folio_is_pfmemalloc(folio)) 2274 slab_set_pfmemalloc(slab); 2275 2276 return slab; 2277 } 2278 2279 #ifdef CONFIG_SLAB_FREELIST_RANDOM 2280 /* Pre-initialize the random sequence cache */ 2281 static int init_cache_random_seq(struct kmem_cache *s) 2282 { 2283 unsigned int count = oo_objects(s->oo); 2284 int err; 2285 2286 /* Bailout if already initialised */ 2287 if (s->random_seq) 2288 return 0; 2289 2290 err = cache_random_seq_create(s, count, GFP_KERNEL); 2291 if (err) { 2292 pr_err("SLUB: Unable to initialize free list for %s\n", 2293 s->name); 2294 return err; 2295 } 2296 2297 /* Transform to an offset on the set of pages */ 2298 if (s->random_seq) { 2299 unsigned int i; 2300 2301 for (i = 0; i < count; i++) 2302 s->random_seq[i] *= s->size; 2303 } 2304 return 0; 2305 } 2306 2307 /* Initialize each random sequence freelist per cache */ 2308 static void __init init_freelist_randomization(void) 2309 { 2310 struct kmem_cache *s; 2311 2312 mutex_lock(&slab_mutex); 2313 2314 list_for_each_entry(s, &slab_caches, list) 2315 init_cache_random_seq(s); 2316 2317 mutex_unlock(&slab_mutex); 2318 } 2319 2320 /* Get the next entry on the pre-computed freelist randomized */ 2321 static void *next_freelist_entry(struct kmem_cache *s, 2322 unsigned long *pos, void *start, 2323 unsigned long page_limit, 2324 unsigned long freelist_count) 2325 { 2326 unsigned int idx; 2327 2328 /* 2329 * If the target page allocation failed, the number of objects on the 2330 * page might be smaller than the usual size defined by the cache. 2331 */ 2332 do { 2333 idx = s->random_seq[*pos]; 2334 *pos += 1; 2335 if (*pos >= freelist_count) 2336 *pos = 0; 2337 } while (unlikely(idx >= page_limit)); 2338 2339 return (char *)start + idx; 2340 } 2341 2342 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 2343 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2344 { 2345 void *start; 2346 void *cur; 2347 void *next; 2348 unsigned long idx, pos, page_limit, freelist_count; 2349 2350 if (slab->objects < 2 || !s->random_seq) 2351 return false; 2352 2353 freelist_count = oo_objects(s->oo); 2354 pos = get_random_u32_below(freelist_count); 2355 2356 page_limit = slab->objects * s->size; 2357 start = fixup_red_left(s, slab_address(slab)); 2358 2359 /* First entry is used as the base of the freelist */ 2360 cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count); 2361 cur = setup_object(s, cur); 2362 slab->freelist = cur; 2363 2364 for (idx = 1; idx < slab->objects; idx++) { 2365 next = next_freelist_entry(s, &pos, start, page_limit, 2366 freelist_count); 2367 next = setup_object(s, next); 2368 set_freepointer(s, cur, next); 2369 cur = next; 2370 } 2371 set_freepointer(s, cur, NULL); 2372 2373 return true; 2374 } 2375 #else 2376 static inline int init_cache_random_seq(struct kmem_cache *s) 2377 { 2378 return 0; 2379 } 2380 static inline void init_freelist_randomization(void) { } 2381 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2382 { 2383 return false; 2384 } 2385 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 2386 2387 static __always_inline void account_slab(struct slab *slab, int order, 2388 struct kmem_cache *s, gfp_t gfp) 2389 { 2390 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 2391 alloc_slab_obj_exts(slab, s, gfp, true); 2392 2393 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2394 PAGE_SIZE << order); 2395 } 2396 2397 static __always_inline void unaccount_slab(struct slab *slab, int order, 2398 struct kmem_cache *s) 2399 { 2400 if (memcg_kmem_online() || need_slab_obj_ext()) 2401 free_slab_obj_exts(slab); 2402 2403 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2404 -(PAGE_SIZE << order)); 2405 } 2406 2407 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 2408 { 2409 struct slab *slab; 2410 struct kmem_cache_order_objects oo = s->oo; 2411 gfp_t alloc_gfp; 2412 void *start, *p, *next; 2413 int idx; 2414 bool shuffle; 2415 2416 flags &= gfp_allowed_mask; 2417 2418 flags |= s->allocflags; 2419 2420 /* 2421 * Let the initial higher-order allocation fail under memory pressure 2422 * so we fall-back to the minimum order allocation. 2423 */ 2424 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 2425 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 2426 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; 2427 2428 slab = alloc_slab_page(alloc_gfp, node, oo); 2429 if (unlikely(!slab)) { 2430 oo = s->min; 2431 alloc_gfp = flags; 2432 /* 2433 * Allocation may have failed due to fragmentation. 2434 * Try a lower order alloc if possible 2435 */ 2436 slab = alloc_slab_page(alloc_gfp, node, oo); 2437 if (unlikely(!slab)) 2438 return NULL; 2439 stat(s, ORDER_FALLBACK); 2440 } 2441 2442 slab->objects = oo_objects(oo); 2443 slab->inuse = 0; 2444 slab->frozen = 0; 2445 2446 account_slab(slab, oo_order(oo), s, flags); 2447 2448 slab->slab_cache = s; 2449 2450 kasan_poison_slab(slab); 2451 2452 start = slab_address(slab); 2453 2454 setup_slab_debug(s, slab, start); 2455 2456 shuffle = shuffle_freelist(s, slab); 2457 2458 if (!shuffle) { 2459 start = fixup_red_left(s, start); 2460 start = setup_object(s, start); 2461 slab->freelist = start; 2462 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { 2463 next = p + s->size; 2464 next = setup_object(s, next); 2465 set_freepointer(s, p, next); 2466 p = next; 2467 } 2468 set_freepointer(s, p, NULL); 2469 } 2470 2471 return slab; 2472 } 2473 2474 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) 2475 { 2476 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 2477 flags = kmalloc_fix_flags(flags); 2478 2479 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2480 2481 return allocate_slab(s, 2482 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 2483 } 2484 2485 static void __free_slab(struct kmem_cache *s, struct slab *slab) 2486 { 2487 struct folio *folio = slab_folio(slab); 2488 int order = folio_order(folio); 2489 int pages = 1 << order; 2490 2491 __slab_clear_pfmemalloc(slab); 2492 folio->mapping = NULL; 2493 /* Make the mapping reset visible before clearing the flag */ 2494 smp_wmb(); 2495 __folio_clear_slab(folio); 2496 mm_account_reclaimed_pages(pages); 2497 unaccount_slab(slab, order, s); 2498 __free_pages(&folio->page, order); 2499 } 2500 2501 static void rcu_free_slab(struct rcu_head *h) 2502 { 2503 struct slab *slab = container_of(h, struct slab, rcu_head); 2504 2505 __free_slab(slab->slab_cache, slab); 2506 } 2507 2508 static void free_slab(struct kmem_cache *s, struct slab *slab) 2509 { 2510 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 2511 void *p; 2512 2513 slab_pad_check(s, slab); 2514 for_each_object(p, s, slab_address(slab), slab->objects) 2515 check_object(s, slab, p, SLUB_RED_INACTIVE); 2516 } 2517 2518 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) 2519 call_rcu(&slab->rcu_head, rcu_free_slab); 2520 else 2521 __free_slab(s, slab); 2522 } 2523 2524 static void discard_slab(struct kmem_cache *s, struct slab *slab) 2525 { 2526 dec_slabs_node(s, slab_nid(slab), slab->objects); 2527 free_slab(s, slab); 2528 } 2529 2530 /* 2531 * SLUB reuses PG_workingset bit to keep track of whether it's on 2532 * the per-node partial list. 2533 */ 2534 static inline bool slab_test_node_partial(const struct slab *slab) 2535 { 2536 return folio_test_workingset((struct folio *)slab_folio(slab)); 2537 } 2538 2539 static inline void slab_set_node_partial(struct slab *slab) 2540 { 2541 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2542 } 2543 2544 static inline void slab_clear_node_partial(struct slab *slab) 2545 { 2546 clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2547 } 2548 2549 /* 2550 * Management of partially allocated slabs. 2551 */ 2552 static inline void 2553 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 2554 { 2555 n->nr_partial++; 2556 if (tail == DEACTIVATE_TO_TAIL) 2557 list_add_tail(&slab->slab_list, &n->partial); 2558 else 2559 list_add(&slab->slab_list, &n->partial); 2560 slab_set_node_partial(slab); 2561 } 2562 2563 static inline void add_partial(struct kmem_cache_node *n, 2564 struct slab *slab, int tail) 2565 { 2566 lockdep_assert_held(&n->list_lock); 2567 __add_partial(n, slab, tail); 2568 } 2569 2570 static inline void remove_partial(struct kmem_cache_node *n, 2571 struct slab *slab) 2572 { 2573 lockdep_assert_held(&n->list_lock); 2574 list_del(&slab->slab_list); 2575 slab_clear_node_partial(slab); 2576 n->nr_partial--; 2577 } 2578 2579 /* 2580 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a 2581 * slab from the n->partial list. Remove only a single object from the slab, do 2582 * the alloc_debug_processing() checks and leave the slab on the list, or move 2583 * it to full list if it was the last free object. 2584 */ 2585 static void *alloc_single_from_partial(struct kmem_cache *s, 2586 struct kmem_cache_node *n, struct slab *slab, int orig_size) 2587 { 2588 void *object; 2589 2590 lockdep_assert_held(&n->list_lock); 2591 2592 object = slab->freelist; 2593 slab->freelist = get_freepointer(s, object); 2594 slab->inuse++; 2595 2596 if (!alloc_debug_processing(s, slab, object, orig_size)) { 2597 remove_partial(n, slab); 2598 return NULL; 2599 } 2600 2601 if (slab->inuse == slab->objects) { 2602 remove_partial(n, slab); 2603 add_full(s, n, slab); 2604 } 2605 2606 return object; 2607 } 2608 2609 /* 2610 * Called only for kmem_cache_debug() caches to allocate from a freshly 2611 * allocated slab. Allocate a single object instead of whole freelist 2612 * and put the slab to the partial (or full) list. 2613 */ 2614 static void *alloc_single_from_new_slab(struct kmem_cache *s, 2615 struct slab *slab, int orig_size) 2616 { 2617 int nid = slab_nid(slab); 2618 struct kmem_cache_node *n = get_node(s, nid); 2619 unsigned long flags; 2620 void *object; 2621 2622 2623 object = slab->freelist; 2624 slab->freelist = get_freepointer(s, object); 2625 slab->inuse = 1; 2626 2627 if (!alloc_debug_processing(s, slab, object, orig_size)) 2628 /* 2629 * It's not really expected that this would fail on a 2630 * freshly allocated slab, but a concurrent memory 2631 * corruption in theory could cause that. 2632 */ 2633 return NULL; 2634 2635 spin_lock_irqsave(&n->list_lock, flags); 2636 2637 if (slab->inuse == slab->objects) 2638 add_full(s, n, slab); 2639 else 2640 add_partial(n, slab, DEACTIVATE_TO_HEAD); 2641 2642 inc_slabs_node(s, nid, slab->objects); 2643 spin_unlock_irqrestore(&n->list_lock, flags); 2644 2645 return object; 2646 } 2647 2648 #ifdef CONFIG_SLUB_CPU_PARTIAL 2649 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); 2650 #else 2651 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, 2652 int drain) { } 2653 #endif 2654 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); 2655 2656 /* 2657 * Try to allocate a partial slab from a specific node. 2658 */ 2659 static struct slab *get_partial_node(struct kmem_cache *s, 2660 struct kmem_cache_node *n, 2661 struct partial_context *pc) 2662 { 2663 struct slab *slab, *slab2, *partial = NULL; 2664 unsigned long flags; 2665 unsigned int partial_slabs = 0; 2666 2667 /* 2668 * Racy check. If we mistakenly see no partial slabs then we 2669 * just allocate an empty slab. If we mistakenly try to get a 2670 * partial slab and there is none available then get_partial() 2671 * will return NULL. 2672 */ 2673 if (!n || !n->nr_partial) 2674 return NULL; 2675 2676 spin_lock_irqsave(&n->list_lock, flags); 2677 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { 2678 if (!pfmemalloc_match(slab, pc->flags)) 2679 continue; 2680 2681 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 2682 void *object = alloc_single_from_partial(s, n, slab, 2683 pc->orig_size); 2684 if (object) { 2685 partial = slab; 2686 pc->object = object; 2687 break; 2688 } 2689 continue; 2690 } 2691 2692 remove_partial(n, slab); 2693 2694 if (!partial) { 2695 partial = slab; 2696 stat(s, ALLOC_FROM_PARTIAL); 2697 2698 if ((slub_get_cpu_partial(s) == 0)) { 2699 break; 2700 } 2701 } else { 2702 put_cpu_partial(s, slab, 0); 2703 stat(s, CPU_PARTIAL_NODE); 2704 2705 if (++partial_slabs > slub_get_cpu_partial(s) / 2) { 2706 break; 2707 } 2708 } 2709 } 2710 spin_unlock_irqrestore(&n->list_lock, flags); 2711 return partial; 2712 } 2713 2714 /* 2715 * Get a slab from somewhere. Search in increasing NUMA distances. 2716 */ 2717 static struct slab *get_any_partial(struct kmem_cache *s, 2718 struct partial_context *pc) 2719 { 2720 #ifdef CONFIG_NUMA 2721 struct zonelist *zonelist; 2722 struct zoneref *z; 2723 struct zone *zone; 2724 enum zone_type highest_zoneidx = gfp_zone(pc->flags); 2725 struct slab *slab; 2726 unsigned int cpuset_mems_cookie; 2727 2728 /* 2729 * The defrag ratio allows a configuration of the tradeoffs between 2730 * inter node defragmentation and node local allocations. A lower 2731 * defrag_ratio increases the tendency to do local allocations 2732 * instead of attempting to obtain partial slabs from other nodes. 2733 * 2734 * If the defrag_ratio is set to 0 then kmalloc() always 2735 * returns node local objects. If the ratio is higher then kmalloc() 2736 * may return off node objects because partial slabs are obtained 2737 * from other nodes and filled up. 2738 * 2739 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2740 * (which makes defrag_ratio = 1000) then every (well almost) 2741 * allocation will first attempt to defrag slab caches on other nodes. 2742 * This means scanning over all nodes to look for partial slabs which 2743 * may be expensive if we do it every time we are trying to find a slab 2744 * with available objects. 2745 */ 2746 if (!s->remote_node_defrag_ratio || 2747 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2748 return NULL; 2749 2750 do { 2751 cpuset_mems_cookie = read_mems_allowed_begin(); 2752 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); 2753 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2754 struct kmem_cache_node *n; 2755 2756 n = get_node(s, zone_to_nid(zone)); 2757 2758 if (n && cpuset_zone_allowed(zone, pc->flags) && 2759 n->nr_partial > s->min_partial) { 2760 slab = get_partial_node(s, n, pc); 2761 if (slab) { 2762 /* 2763 * Don't check read_mems_allowed_retry() 2764 * here - if mems_allowed was updated in 2765 * parallel, that was a harmless race 2766 * between allocation and the cpuset 2767 * update 2768 */ 2769 return slab; 2770 } 2771 } 2772 } 2773 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2774 #endif /* CONFIG_NUMA */ 2775 return NULL; 2776 } 2777 2778 /* 2779 * Get a partial slab, lock it and return it. 2780 */ 2781 static struct slab *get_partial(struct kmem_cache *s, int node, 2782 struct partial_context *pc) 2783 { 2784 struct slab *slab; 2785 int searchnode = node; 2786 2787 if (node == NUMA_NO_NODE) 2788 searchnode = numa_mem_id(); 2789 2790 slab = get_partial_node(s, get_node(s, searchnode), pc); 2791 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE))) 2792 return slab; 2793 2794 return get_any_partial(s, pc); 2795 } 2796 2797 #ifndef CONFIG_SLUB_TINY 2798 2799 #ifdef CONFIG_PREEMPTION 2800 /* 2801 * Calculate the next globally unique transaction for disambiguation 2802 * during cmpxchg. The transactions start with the cpu number and are then 2803 * incremented by CONFIG_NR_CPUS. 2804 */ 2805 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2806 #else 2807 /* 2808 * No preemption supported therefore also no need to check for 2809 * different cpus. 2810 */ 2811 #define TID_STEP 1 2812 #endif /* CONFIG_PREEMPTION */ 2813 2814 static inline unsigned long next_tid(unsigned long tid) 2815 { 2816 return tid + TID_STEP; 2817 } 2818 2819 #ifdef SLUB_DEBUG_CMPXCHG 2820 static inline unsigned int tid_to_cpu(unsigned long tid) 2821 { 2822 return tid % TID_STEP; 2823 } 2824 2825 static inline unsigned long tid_to_event(unsigned long tid) 2826 { 2827 return tid / TID_STEP; 2828 } 2829 #endif 2830 2831 static inline unsigned int init_tid(int cpu) 2832 { 2833 return cpu; 2834 } 2835 2836 static inline void note_cmpxchg_failure(const char *n, 2837 const struct kmem_cache *s, unsigned long tid) 2838 { 2839 #ifdef SLUB_DEBUG_CMPXCHG 2840 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2841 2842 pr_info("%s %s: cmpxchg redo ", n, s->name); 2843 2844 #ifdef CONFIG_PREEMPTION 2845 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2846 pr_warn("due to cpu change %d -> %d\n", 2847 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2848 else 2849 #endif 2850 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2851 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2852 tid_to_event(tid), tid_to_event(actual_tid)); 2853 else 2854 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2855 actual_tid, tid, next_tid(tid)); 2856 #endif 2857 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2858 } 2859 2860 static void init_kmem_cache_cpus(struct kmem_cache *s) 2861 { 2862 int cpu; 2863 struct kmem_cache_cpu *c; 2864 2865 for_each_possible_cpu(cpu) { 2866 c = per_cpu_ptr(s->cpu_slab, cpu); 2867 local_lock_init(&c->lock); 2868 c->tid = init_tid(cpu); 2869 } 2870 } 2871 2872 /* 2873 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, 2874 * unfreezes the slabs and puts it on the proper list. 2875 * Assumes the slab has been already safely taken away from kmem_cache_cpu 2876 * by the caller. 2877 */ 2878 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, 2879 void *freelist) 2880 { 2881 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 2882 int free_delta = 0; 2883 void *nextfree, *freelist_iter, *freelist_tail; 2884 int tail = DEACTIVATE_TO_HEAD; 2885 unsigned long flags = 0; 2886 struct slab new; 2887 struct slab old; 2888 2889 if (READ_ONCE(slab->freelist)) { 2890 stat(s, DEACTIVATE_REMOTE_FREES); 2891 tail = DEACTIVATE_TO_TAIL; 2892 } 2893 2894 /* 2895 * Stage one: Count the objects on cpu's freelist as free_delta and 2896 * remember the last object in freelist_tail for later splicing. 2897 */ 2898 freelist_tail = NULL; 2899 freelist_iter = freelist; 2900 while (freelist_iter) { 2901 nextfree = get_freepointer(s, freelist_iter); 2902 2903 /* 2904 * If 'nextfree' is invalid, it is possible that the object at 2905 * 'freelist_iter' is already corrupted. So isolate all objects 2906 * starting at 'freelist_iter' by skipping them. 2907 */ 2908 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) 2909 break; 2910 2911 freelist_tail = freelist_iter; 2912 free_delta++; 2913 2914 freelist_iter = nextfree; 2915 } 2916 2917 /* 2918 * Stage two: Unfreeze the slab while splicing the per-cpu 2919 * freelist to the head of slab's freelist. 2920 */ 2921 do { 2922 old.freelist = READ_ONCE(slab->freelist); 2923 old.counters = READ_ONCE(slab->counters); 2924 VM_BUG_ON(!old.frozen); 2925 2926 /* Determine target state of the slab */ 2927 new.counters = old.counters; 2928 new.frozen = 0; 2929 if (freelist_tail) { 2930 new.inuse -= free_delta; 2931 set_freepointer(s, freelist_tail, old.freelist); 2932 new.freelist = freelist; 2933 } else { 2934 new.freelist = old.freelist; 2935 } 2936 } while (!slab_update_freelist(s, slab, 2937 old.freelist, old.counters, 2938 new.freelist, new.counters, 2939 "unfreezing slab")); 2940 2941 /* 2942 * Stage three: Manipulate the slab list based on the updated state. 2943 */ 2944 if (!new.inuse && n->nr_partial >= s->min_partial) { 2945 stat(s, DEACTIVATE_EMPTY); 2946 discard_slab(s, slab); 2947 stat(s, FREE_SLAB); 2948 } else if (new.freelist) { 2949 spin_lock_irqsave(&n->list_lock, flags); 2950 add_partial(n, slab, tail); 2951 spin_unlock_irqrestore(&n->list_lock, flags); 2952 stat(s, tail); 2953 } else { 2954 stat(s, DEACTIVATE_FULL); 2955 } 2956 } 2957 2958 #ifdef CONFIG_SLUB_CPU_PARTIAL 2959 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab) 2960 { 2961 struct kmem_cache_node *n = NULL, *n2 = NULL; 2962 struct slab *slab, *slab_to_discard = NULL; 2963 unsigned long flags = 0; 2964 2965 while (partial_slab) { 2966 slab = partial_slab; 2967 partial_slab = slab->next; 2968 2969 n2 = get_node(s, slab_nid(slab)); 2970 if (n != n2) { 2971 if (n) 2972 spin_unlock_irqrestore(&n->list_lock, flags); 2973 2974 n = n2; 2975 spin_lock_irqsave(&n->list_lock, flags); 2976 } 2977 2978 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { 2979 slab->next = slab_to_discard; 2980 slab_to_discard = slab; 2981 } else { 2982 add_partial(n, slab, DEACTIVATE_TO_TAIL); 2983 stat(s, FREE_ADD_PARTIAL); 2984 } 2985 } 2986 2987 if (n) 2988 spin_unlock_irqrestore(&n->list_lock, flags); 2989 2990 while (slab_to_discard) { 2991 slab = slab_to_discard; 2992 slab_to_discard = slab_to_discard->next; 2993 2994 stat(s, DEACTIVATE_EMPTY); 2995 discard_slab(s, slab); 2996 stat(s, FREE_SLAB); 2997 } 2998 } 2999 3000 /* 3001 * Put all the cpu partial slabs to the node partial list. 3002 */ 3003 static void put_partials(struct kmem_cache *s) 3004 { 3005 struct slab *partial_slab; 3006 unsigned long flags; 3007 3008 local_lock_irqsave(&s->cpu_slab->lock, flags); 3009 partial_slab = this_cpu_read(s->cpu_slab->partial); 3010 this_cpu_write(s->cpu_slab->partial, NULL); 3011 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3012 3013 if (partial_slab) 3014 __put_partials(s, partial_slab); 3015 } 3016 3017 static void put_partials_cpu(struct kmem_cache *s, 3018 struct kmem_cache_cpu *c) 3019 { 3020 struct slab *partial_slab; 3021 3022 partial_slab = slub_percpu_partial(c); 3023 c->partial = NULL; 3024 3025 if (partial_slab) 3026 __put_partials(s, partial_slab); 3027 } 3028 3029 /* 3030 * Put a slab into a partial slab slot if available. 3031 * 3032 * If we did not find a slot then simply move all the partials to the 3033 * per node partial list. 3034 */ 3035 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) 3036 { 3037 struct slab *oldslab; 3038 struct slab *slab_to_put = NULL; 3039 unsigned long flags; 3040 int slabs = 0; 3041 3042 local_lock_irqsave(&s->cpu_slab->lock, flags); 3043 3044 oldslab = this_cpu_read(s->cpu_slab->partial); 3045 3046 if (oldslab) { 3047 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { 3048 /* 3049 * Partial array is full. Move the existing set to the 3050 * per node partial list. Postpone the actual unfreezing 3051 * outside of the critical section. 3052 */ 3053 slab_to_put = oldslab; 3054 oldslab = NULL; 3055 } else { 3056 slabs = oldslab->slabs; 3057 } 3058 } 3059 3060 slabs++; 3061 3062 slab->slabs = slabs; 3063 slab->next = oldslab; 3064 3065 this_cpu_write(s->cpu_slab->partial, slab); 3066 3067 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3068 3069 if (slab_to_put) { 3070 __put_partials(s, slab_to_put); 3071 stat(s, CPU_PARTIAL_DRAIN); 3072 } 3073 } 3074 3075 #else /* CONFIG_SLUB_CPU_PARTIAL */ 3076 3077 static inline void put_partials(struct kmem_cache *s) { } 3078 static inline void put_partials_cpu(struct kmem_cache *s, 3079 struct kmem_cache_cpu *c) { } 3080 3081 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 3082 3083 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 3084 { 3085 unsigned long flags; 3086 struct slab *slab; 3087 void *freelist; 3088 3089 local_lock_irqsave(&s->cpu_slab->lock, flags); 3090 3091 slab = c->slab; 3092 freelist = c->freelist; 3093 3094 c->slab = NULL; 3095 c->freelist = NULL; 3096 c->tid = next_tid(c->tid); 3097 3098 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3099 3100 if (slab) { 3101 deactivate_slab(s, slab, freelist); 3102 stat(s, CPUSLAB_FLUSH); 3103 } 3104 } 3105 3106 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 3107 { 3108 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3109 void *freelist = c->freelist; 3110 struct slab *slab = c->slab; 3111 3112 c->slab = NULL; 3113 c->freelist = NULL; 3114 c->tid = next_tid(c->tid); 3115 3116 if (slab) { 3117 deactivate_slab(s, slab, freelist); 3118 stat(s, CPUSLAB_FLUSH); 3119 } 3120 3121 put_partials_cpu(s, c); 3122 } 3123 3124 struct slub_flush_work { 3125 struct work_struct work; 3126 struct kmem_cache *s; 3127 bool skip; 3128 }; 3129 3130 /* 3131 * Flush cpu slab. 3132 * 3133 * Called from CPU work handler with migration disabled. 3134 */ 3135 static void flush_cpu_slab(struct work_struct *w) 3136 { 3137 struct kmem_cache *s; 3138 struct kmem_cache_cpu *c; 3139 struct slub_flush_work *sfw; 3140 3141 sfw = container_of(w, struct slub_flush_work, work); 3142 3143 s = sfw->s; 3144 c = this_cpu_ptr(s->cpu_slab); 3145 3146 if (c->slab) 3147 flush_slab(s, c); 3148 3149 put_partials(s); 3150 } 3151 3152 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 3153 { 3154 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3155 3156 return c->slab || slub_percpu_partial(c); 3157 } 3158 3159 static DEFINE_MUTEX(flush_lock); 3160 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 3161 3162 static void flush_all_cpus_locked(struct kmem_cache *s) 3163 { 3164 struct slub_flush_work *sfw; 3165 unsigned int cpu; 3166 3167 lockdep_assert_cpus_held(); 3168 mutex_lock(&flush_lock); 3169 3170 for_each_online_cpu(cpu) { 3171 sfw = &per_cpu(slub_flush, cpu); 3172 if (!has_cpu_slab(cpu, s)) { 3173 sfw->skip = true; 3174 continue; 3175 } 3176 INIT_WORK(&sfw->work, flush_cpu_slab); 3177 sfw->skip = false; 3178 sfw->s = s; 3179 queue_work_on(cpu, flushwq, &sfw->work); 3180 } 3181 3182 for_each_online_cpu(cpu) { 3183 sfw = &per_cpu(slub_flush, cpu); 3184 if (sfw->skip) 3185 continue; 3186 flush_work(&sfw->work); 3187 } 3188 3189 mutex_unlock(&flush_lock); 3190 } 3191 3192 static void flush_all(struct kmem_cache *s) 3193 { 3194 cpus_read_lock(); 3195 flush_all_cpus_locked(s); 3196 cpus_read_unlock(); 3197 } 3198 3199 /* 3200 * Use the cpu notifier to insure that the cpu slabs are flushed when 3201 * necessary. 3202 */ 3203 static int slub_cpu_dead(unsigned int cpu) 3204 { 3205 struct kmem_cache *s; 3206 3207 mutex_lock(&slab_mutex); 3208 list_for_each_entry(s, &slab_caches, list) 3209 __flush_cpu_slab(s, cpu); 3210 mutex_unlock(&slab_mutex); 3211 return 0; 3212 } 3213 3214 #else /* CONFIG_SLUB_TINY */ 3215 static inline void flush_all_cpus_locked(struct kmem_cache *s) { } 3216 static inline void flush_all(struct kmem_cache *s) { } 3217 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { } 3218 static inline int slub_cpu_dead(unsigned int cpu) { return 0; } 3219 #endif /* CONFIG_SLUB_TINY */ 3220 3221 /* 3222 * Check if the objects in a per cpu structure fit numa 3223 * locality expectations. 3224 */ 3225 static inline int node_match(struct slab *slab, int node) 3226 { 3227 #ifdef CONFIG_NUMA 3228 if (node != NUMA_NO_NODE && slab_nid(slab) != node) 3229 return 0; 3230 #endif 3231 return 1; 3232 } 3233 3234 #ifdef CONFIG_SLUB_DEBUG 3235 static int count_free(struct slab *slab) 3236 { 3237 return slab->objects - slab->inuse; 3238 } 3239 3240 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 3241 { 3242 return atomic_long_read(&n->total_objects); 3243 } 3244 3245 /* Supports checking bulk free of a constructed freelist */ 3246 static inline bool free_debug_processing(struct kmem_cache *s, 3247 struct slab *slab, void *head, void *tail, int *bulk_cnt, 3248 unsigned long addr, depot_stack_handle_t handle) 3249 { 3250 bool checks_ok = false; 3251 void *object = head; 3252 int cnt = 0; 3253 3254 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3255 if (!check_slab(s, slab)) 3256 goto out; 3257 } 3258 3259 if (slab->inuse < *bulk_cnt) { 3260 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", 3261 slab->inuse, *bulk_cnt); 3262 goto out; 3263 } 3264 3265 next_object: 3266 3267 if (++cnt > *bulk_cnt) 3268 goto out_cnt; 3269 3270 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3271 if (!free_consistency_checks(s, slab, object, addr)) 3272 goto out; 3273 } 3274 3275 if (s->flags & SLAB_STORE_USER) 3276 set_track_update(s, object, TRACK_FREE, addr, handle); 3277 trace(s, slab, object, 0); 3278 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 3279 init_object(s, object, SLUB_RED_INACTIVE); 3280 3281 /* Reached end of constructed freelist yet? */ 3282 if (object != tail) { 3283 object = get_freepointer(s, object); 3284 goto next_object; 3285 } 3286 checks_ok = true; 3287 3288 out_cnt: 3289 if (cnt != *bulk_cnt) { 3290 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", 3291 *bulk_cnt, cnt); 3292 *bulk_cnt = cnt; 3293 } 3294 3295 out: 3296 3297 if (!checks_ok) 3298 slab_fix(s, "Object at 0x%p not freed", object); 3299 3300 return checks_ok; 3301 } 3302 #endif /* CONFIG_SLUB_DEBUG */ 3303 3304 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS) 3305 static unsigned long count_partial(struct kmem_cache_node *n, 3306 int (*get_count)(struct slab *)) 3307 { 3308 unsigned long flags; 3309 unsigned long x = 0; 3310 struct slab *slab; 3311 3312 spin_lock_irqsave(&n->list_lock, flags); 3313 list_for_each_entry(slab, &n->partial, slab_list) 3314 x += get_count(slab); 3315 spin_unlock_irqrestore(&n->list_lock, flags); 3316 return x; 3317 } 3318 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */ 3319 3320 #ifdef CONFIG_SLUB_DEBUG 3321 #define MAX_PARTIAL_TO_SCAN 10000 3322 3323 static unsigned long count_partial_free_approx(struct kmem_cache_node *n) 3324 { 3325 unsigned long flags; 3326 unsigned long x = 0; 3327 struct slab *slab; 3328 3329 spin_lock_irqsave(&n->list_lock, flags); 3330 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) { 3331 list_for_each_entry(slab, &n->partial, slab_list) 3332 x += slab->objects - slab->inuse; 3333 } else { 3334 /* 3335 * For a long list, approximate the total count of objects in 3336 * it to meet the limit on the number of slabs to scan. 3337 * Scan from both the list's head and tail for better accuracy. 3338 */ 3339 unsigned long scanned = 0; 3340 3341 list_for_each_entry(slab, &n->partial, slab_list) { 3342 x += slab->objects - slab->inuse; 3343 if (++scanned == MAX_PARTIAL_TO_SCAN / 2) 3344 break; 3345 } 3346 list_for_each_entry_reverse(slab, &n->partial, slab_list) { 3347 x += slab->objects - slab->inuse; 3348 if (++scanned == MAX_PARTIAL_TO_SCAN) 3349 break; 3350 } 3351 x = mult_frac(x, n->nr_partial, scanned); 3352 x = min(x, node_nr_objs(n)); 3353 } 3354 spin_unlock_irqrestore(&n->list_lock, flags); 3355 return x; 3356 } 3357 3358 static noinline void 3359 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 3360 { 3361 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 3362 DEFAULT_RATELIMIT_BURST); 3363 int node; 3364 struct kmem_cache_node *n; 3365 3366 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 3367 return; 3368 3369 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 3370 nid, gfpflags, &gfpflags); 3371 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 3372 s->name, s->object_size, s->size, oo_order(s->oo), 3373 oo_order(s->min)); 3374 3375 if (oo_order(s->min) > get_order(s->object_size)) 3376 pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n", 3377 s->name); 3378 3379 for_each_kmem_cache_node(s, node, n) { 3380 unsigned long nr_slabs; 3381 unsigned long nr_objs; 3382 unsigned long nr_free; 3383 3384 nr_free = count_partial_free_approx(n); 3385 nr_slabs = node_nr_slabs(n); 3386 nr_objs = node_nr_objs(n); 3387 3388 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 3389 node, nr_slabs, nr_objs, nr_free); 3390 } 3391 } 3392 #else /* CONFIG_SLUB_DEBUG */ 3393 static inline void 3394 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } 3395 #endif 3396 3397 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) 3398 { 3399 if (unlikely(slab_test_pfmemalloc(slab))) 3400 return gfp_pfmemalloc_allowed(gfpflags); 3401 3402 return true; 3403 } 3404 3405 #ifndef CONFIG_SLUB_TINY 3406 static inline bool 3407 __update_cpu_freelist_fast(struct kmem_cache *s, 3408 void *freelist_old, void *freelist_new, 3409 unsigned long tid) 3410 { 3411 freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; 3412 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; 3413 3414 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, 3415 &old.full, new.full); 3416 } 3417 3418 /* 3419 * Check the slab->freelist and either transfer the freelist to the 3420 * per cpu freelist or deactivate the slab. 3421 * 3422 * The slab is still frozen if the return value is not NULL. 3423 * 3424 * If this function returns NULL then the slab has been unfrozen. 3425 */ 3426 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) 3427 { 3428 struct slab new; 3429 unsigned long counters; 3430 void *freelist; 3431 3432 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3433 3434 do { 3435 freelist = slab->freelist; 3436 counters = slab->counters; 3437 3438 new.counters = counters; 3439 3440 new.inuse = slab->objects; 3441 new.frozen = freelist != NULL; 3442 3443 } while (!__slab_update_freelist(s, slab, 3444 freelist, counters, 3445 NULL, new.counters, 3446 "get_freelist")); 3447 3448 return freelist; 3449 } 3450 3451 /* 3452 * Freeze the partial slab and return the pointer to the freelist. 3453 */ 3454 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab) 3455 { 3456 struct slab new; 3457 unsigned long counters; 3458 void *freelist; 3459 3460 do { 3461 freelist = slab->freelist; 3462 counters = slab->counters; 3463 3464 new.counters = counters; 3465 VM_BUG_ON(new.frozen); 3466 3467 new.inuse = slab->objects; 3468 new.frozen = 1; 3469 3470 } while (!slab_update_freelist(s, slab, 3471 freelist, counters, 3472 NULL, new.counters, 3473 "freeze_slab")); 3474 3475 return freelist; 3476 } 3477 3478 /* 3479 * Slow path. The lockless freelist is empty or we need to perform 3480 * debugging duties. 3481 * 3482 * Processing is still very fast if new objects have been freed to the 3483 * regular freelist. In that case we simply take over the regular freelist 3484 * as the lockless freelist and zap the regular freelist. 3485 * 3486 * If that is not working then we fall back to the partial lists. We take the 3487 * first element of the freelist as the object to allocate now and move the 3488 * rest of the freelist to the lockless freelist. 3489 * 3490 * And if we were unable to get a new slab from the partial slab lists then 3491 * we need to allocate a new slab. This is the slowest path since it involves 3492 * a call to the page allocator and the setup of a new slab. 3493 * 3494 * Version of __slab_alloc to use when we know that preemption is 3495 * already disabled (which is the case for bulk allocation). 3496 */ 3497 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3498 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3499 { 3500 void *freelist; 3501 struct slab *slab; 3502 unsigned long flags; 3503 struct partial_context pc; 3504 bool try_thisnode = true; 3505 3506 stat(s, ALLOC_SLOWPATH); 3507 3508 reread_slab: 3509 3510 slab = READ_ONCE(c->slab); 3511 if (!slab) { 3512 /* 3513 * if the node is not online or has no normal memory, just 3514 * ignore the node constraint 3515 */ 3516 if (unlikely(node != NUMA_NO_NODE && 3517 !node_isset(node, slab_nodes))) 3518 node = NUMA_NO_NODE; 3519 goto new_slab; 3520 } 3521 3522 if (unlikely(!node_match(slab, node))) { 3523 /* 3524 * same as above but node_match() being false already 3525 * implies node != NUMA_NO_NODE 3526 */ 3527 if (!node_isset(node, slab_nodes)) { 3528 node = NUMA_NO_NODE; 3529 } else { 3530 stat(s, ALLOC_NODE_MISMATCH); 3531 goto deactivate_slab; 3532 } 3533 } 3534 3535 /* 3536 * By rights, we should be searching for a slab page that was 3537 * PFMEMALLOC but right now, we are losing the pfmemalloc 3538 * information when the page leaves the per-cpu allocator 3539 */ 3540 if (unlikely(!pfmemalloc_match(slab, gfpflags))) 3541 goto deactivate_slab; 3542 3543 /* must check again c->slab in case we got preempted and it changed */ 3544 local_lock_irqsave(&s->cpu_slab->lock, flags); 3545 if (unlikely(slab != c->slab)) { 3546 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3547 goto reread_slab; 3548 } 3549 freelist = c->freelist; 3550 if (freelist) 3551 goto load_freelist; 3552 3553 freelist = get_freelist(s, slab); 3554 3555 if (!freelist) { 3556 c->slab = NULL; 3557 c->tid = next_tid(c->tid); 3558 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3559 stat(s, DEACTIVATE_BYPASS); 3560 goto new_slab; 3561 } 3562 3563 stat(s, ALLOC_REFILL); 3564 3565 load_freelist: 3566 3567 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3568 3569 /* 3570 * freelist is pointing to the list of objects to be used. 3571 * slab is pointing to the slab from which the objects are obtained. 3572 * That slab must be frozen for per cpu allocations to work. 3573 */ 3574 VM_BUG_ON(!c->slab->frozen); 3575 c->freelist = get_freepointer(s, freelist); 3576 c->tid = next_tid(c->tid); 3577 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3578 return freelist; 3579 3580 deactivate_slab: 3581 3582 local_lock_irqsave(&s->cpu_slab->lock, flags); 3583 if (slab != c->slab) { 3584 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3585 goto reread_slab; 3586 } 3587 freelist = c->freelist; 3588 c->slab = NULL; 3589 c->freelist = NULL; 3590 c->tid = next_tid(c->tid); 3591 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3592 deactivate_slab(s, slab, freelist); 3593 3594 new_slab: 3595 3596 #ifdef CONFIG_SLUB_CPU_PARTIAL 3597 while (slub_percpu_partial(c)) { 3598 local_lock_irqsave(&s->cpu_slab->lock, flags); 3599 if (unlikely(c->slab)) { 3600 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3601 goto reread_slab; 3602 } 3603 if (unlikely(!slub_percpu_partial(c))) { 3604 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3605 /* we were preempted and partial list got empty */ 3606 goto new_objects; 3607 } 3608 3609 slab = slub_percpu_partial(c); 3610 slub_set_percpu_partial(c, slab); 3611 3612 if (likely(node_match(slab, node) && 3613 pfmemalloc_match(slab, gfpflags))) { 3614 c->slab = slab; 3615 freelist = get_freelist(s, slab); 3616 VM_BUG_ON(!freelist); 3617 stat(s, CPU_PARTIAL_ALLOC); 3618 goto load_freelist; 3619 } 3620 3621 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3622 3623 slab->next = NULL; 3624 __put_partials(s, slab); 3625 } 3626 #endif 3627 3628 new_objects: 3629 3630 pc.flags = gfpflags; 3631 /* 3632 * When a preferred node is indicated but no __GFP_THISNODE 3633 * 3634 * 1) try to get a partial slab from target node only by having 3635 * __GFP_THISNODE in pc.flags for get_partial() 3636 * 2) if 1) failed, try to allocate a new slab from target node with 3637 * GPF_NOWAIT | __GFP_THISNODE opportunistically 3638 * 3) if 2) failed, retry with original gfpflags which will allow 3639 * get_partial() try partial lists of other nodes before potentially 3640 * allocating new page from other nodes 3641 */ 3642 if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 3643 && try_thisnode)) 3644 pc.flags = GFP_NOWAIT | __GFP_THISNODE; 3645 3646 pc.orig_size = orig_size; 3647 slab = get_partial(s, node, &pc); 3648 if (slab) { 3649 if (kmem_cache_debug(s)) { 3650 freelist = pc.object; 3651 /* 3652 * For debug caches here we had to go through 3653 * alloc_single_from_partial() so just store the 3654 * tracking info and return the object. 3655 */ 3656 if (s->flags & SLAB_STORE_USER) 3657 set_track(s, freelist, TRACK_ALLOC, addr); 3658 3659 return freelist; 3660 } 3661 3662 freelist = freeze_slab(s, slab); 3663 goto retry_load_slab; 3664 } 3665 3666 slub_put_cpu_ptr(s->cpu_slab); 3667 slab = new_slab(s, pc.flags, node); 3668 c = slub_get_cpu_ptr(s->cpu_slab); 3669 3670 if (unlikely(!slab)) { 3671 if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 3672 && try_thisnode) { 3673 try_thisnode = false; 3674 goto new_objects; 3675 } 3676 slab_out_of_memory(s, gfpflags, node); 3677 return NULL; 3678 } 3679 3680 stat(s, ALLOC_SLAB); 3681 3682 if (kmem_cache_debug(s)) { 3683 freelist = alloc_single_from_new_slab(s, slab, orig_size); 3684 3685 if (unlikely(!freelist)) 3686 goto new_objects; 3687 3688 if (s->flags & SLAB_STORE_USER) 3689 set_track(s, freelist, TRACK_ALLOC, addr); 3690 3691 return freelist; 3692 } 3693 3694 /* 3695 * No other reference to the slab yet so we can 3696 * muck around with it freely without cmpxchg 3697 */ 3698 freelist = slab->freelist; 3699 slab->freelist = NULL; 3700 slab->inuse = slab->objects; 3701 slab->frozen = 1; 3702 3703 inc_slabs_node(s, slab_nid(slab), slab->objects); 3704 3705 if (unlikely(!pfmemalloc_match(slab, gfpflags))) { 3706 /* 3707 * For !pfmemalloc_match() case we don't load freelist so that 3708 * we don't make further mismatched allocations easier. 3709 */ 3710 deactivate_slab(s, slab, get_freepointer(s, freelist)); 3711 return freelist; 3712 } 3713 3714 retry_load_slab: 3715 3716 local_lock_irqsave(&s->cpu_slab->lock, flags); 3717 if (unlikely(c->slab)) { 3718 void *flush_freelist = c->freelist; 3719 struct slab *flush_slab = c->slab; 3720 3721 c->slab = NULL; 3722 c->freelist = NULL; 3723 c->tid = next_tid(c->tid); 3724 3725 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3726 3727 deactivate_slab(s, flush_slab, flush_freelist); 3728 3729 stat(s, CPUSLAB_FLUSH); 3730 3731 goto retry_load_slab; 3732 } 3733 c->slab = slab; 3734 3735 goto load_freelist; 3736 } 3737 3738 /* 3739 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 3740 * disabled. Compensates for possible cpu changes by refetching the per cpu area 3741 * pointer. 3742 */ 3743 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3744 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3745 { 3746 void *p; 3747 3748 #ifdef CONFIG_PREEMPT_COUNT 3749 /* 3750 * We may have been preempted and rescheduled on a different 3751 * cpu before disabling preemption. Need to reload cpu area 3752 * pointer. 3753 */ 3754 c = slub_get_cpu_ptr(s->cpu_slab); 3755 #endif 3756 3757 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); 3758 #ifdef CONFIG_PREEMPT_COUNT 3759 slub_put_cpu_ptr(s->cpu_slab); 3760 #endif 3761 return p; 3762 } 3763 3764 static __always_inline void *__slab_alloc_node(struct kmem_cache *s, 3765 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3766 { 3767 struct kmem_cache_cpu *c; 3768 struct slab *slab; 3769 unsigned long tid; 3770 void *object; 3771 3772 redo: 3773 /* 3774 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 3775 * enabled. We may switch back and forth between cpus while 3776 * reading from one cpu area. That does not matter as long 3777 * as we end up on the original cpu again when doing the cmpxchg. 3778 * 3779 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 3780 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 3781 * the tid. If we are preempted and switched to another cpu between the 3782 * two reads, it's OK as the two are still associated with the same cpu 3783 * and cmpxchg later will validate the cpu. 3784 */ 3785 c = raw_cpu_ptr(s->cpu_slab); 3786 tid = READ_ONCE(c->tid); 3787 3788 /* 3789 * Irqless object alloc/free algorithm used here depends on sequence 3790 * of fetching cpu_slab's data. tid should be fetched before anything 3791 * on c to guarantee that object and slab associated with previous tid 3792 * won't be used with current tid. If we fetch tid first, object and 3793 * slab could be one associated with next tid and our alloc/free 3794 * request will be failed. In this case, we will retry. So, no problem. 3795 */ 3796 barrier(); 3797 3798 /* 3799 * The transaction ids are globally unique per cpu and per operation on 3800 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 3801 * occurs on the right processor and that there was no operation on the 3802 * linked list in between. 3803 */ 3804 3805 object = c->freelist; 3806 slab = c->slab; 3807 3808 if (!USE_LOCKLESS_FAST_PATH() || 3809 unlikely(!object || !slab || !node_match(slab, node))) { 3810 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); 3811 } else { 3812 void *next_object = get_freepointer_safe(s, object); 3813 3814 /* 3815 * The cmpxchg will only match if there was no additional 3816 * operation and if we are on the right processor. 3817 * 3818 * The cmpxchg does the following atomically (without lock 3819 * semantics!) 3820 * 1. Relocate first pointer to the current per cpu area. 3821 * 2. Verify that tid and freelist have not been changed 3822 * 3. If they were not changed replace tid and freelist 3823 * 3824 * Since this is without lock semantics the protection is only 3825 * against code executing on this cpu *not* from access by 3826 * other cpus. 3827 */ 3828 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { 3829 note_cmpxchg_failure("slab_alloc", s, tid); 3830 goto redo; 3831 } 3832 prefetch_freepointer(s, next_object); 3833 stat(s, ALLOC_FASTPATH); 3834 } 3835 3836 return object; 3837 } 3838 #else /* CONFIG_SLUB_TINY */ 3839 static void *__slab_alloc_node(struct kmem_cache *s, 3840 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3841 { 3842 struct partial_context pc; 3843 struct slab *slab; 3844 void *object; 3845 3846 pc.flags = gfpflags; 3847 pc.orig_size = orig_size; 3848 slab = get_partial(s, node, &pc); 3849 3850 if (slab) 3851 return pc.object; 3852 3853 slab = new_slab(s, gfpflags, node); 3854 if (unlikely(!slab)) { 3855 slab_out_of_memory(s, gfpflags, node); 3856 return NULL; 3857 } 3858 3859 object = alloc_single_from_new_slab(s, slab, orig_size); 3860 3861 return object; 3862 } 3863 #endif /* CONFIG_SLUB_TINY */ 3864 3865 /* 3866 * If the object has been wiped upon free, make sure it's fully initialized by 3867 * zeroing out freelist pointer. 3868 */ 3869 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 3870 void *obj) 3871 { 3872 if (unlikely(slab_want_init_on_free(s)) && obj && 3873 !freeptr_outside_object(s)) 3874 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 3875 0, sizeof(void *)); 3876 } 3877 3878 noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags) 3879 { 3880 if (__should_failslab(s, gfpflags)) 3881 return -ENOMEM; 3882 return 0; 3883 } 3884 ALLOW_ERROR_INJECTION(should_failslab, ERRNO); 3885 3886 static __fastpath_inline 3887 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 3888 { 3889 flags &= gfp_allowed_mask; 3890 3891 might_alloc(flags); 3892 3893 if (unlikely(should_failslab(s, flags))) 3894 return NULL; 3895 3896 return s; 3897 } 3898 3899 static __fastpath_inline 3900 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 3901 gfp_t flags, size_t size, void **p, bool init, 3902 unsigned int orig_size) 3903 { 3904 unsigned int zero_size = s->object_size; 3905 struct slabobj_ext *obj_exts; 3906 bool kasan_init = init; 3907 size_t i; 3908 gfp_t init_flags = flags & gfp_allowed_mask; 3909 3910 /* 3911 * For kmalloc object, the allocated memory size(object_size) is likely 3912 * larger than the requested size(orig_size). If redzone check is 3913 * enabled for the extra space, don't zero it, as it will be redzoned 3914 * soon. The redzone operation for this extra space could be seen as a 3915 * replacement of current poisoning under certain debug option, and 3916 * won't break other sanity checks. 3917 */ 3918 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 3919 (s->flags & SLAB_KMALLOC)) 3920 zero_size = orig_size; 3921 3922 /* 3923 * When slab_debug is enabled, avoid memory initialization integrated 3924 * into KASAN and instead zero out the memory via the memset below with 3925 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and 3926 * cause false-positive reports. This does not lead to a performance 3927 * penalty on production builds, as slab_debug is not intended to be 3928 * enabled there. 3929 */ 3930 if (__slub_debug_enabled()) 3931 kasan_init = false; 3932 3933 /* 3934 * As memory initialization might be integrated into KASAN, 3935 * kasan_slab_alloc and initialization memset must be 3936 * kept together to avoid discrepancies in behavior. 3937 * 3938 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 3939 */ 3940 for (i = 0; i < size; i++) { 3941 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init); 3942 if (p[i] && init && (!kasan_init || 3943 !kasan_has_integrated_init())) 3944 memset(p[i], 0, zero_size); 3945 kmemleak_alloc_recursive(p[i], s->object_size, 1, 3946 s->flags, init_flags); 3947 kmsan_slab_alloc(s, p[i], init_flags); 3948 if (need_slab_obj_ext()) { 3949 obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]); 3950 #ifdef CONFIG_MEM_ALLOC_PROFILING 3951 /* 3952 * Currently obj_exts is used only for allocation profiling. 3953 * If other users appear then mem_alloc_profiling_enabled() 3954 * check should be added before alloc_tag_add(). 3955 */ 3956 if (likely(obj_exts)) 3957 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); 3958 #endif 3959 } 3960 } 3961 3962 return memcg_slab_post_alloc_hook(s, lru, flags, size, p); 3963 } 3964 3965 /* 3966 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 3967 * have the fastpath folded into their functions. So no function call 3968 * overhead for requests that can be satisfied on the fastpath. 3969 * 3970 * The fastpath works by first checking if the lockless freelist can be used. 3971 * If not then __slab_alloc is called for slow processing. 3972 * 3973 * Otherwise we can simply pick the next object from the lockless free list. 3974 */ 3975 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, 3976 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3977 { 3978 void *object; 3979 bool init = false; 3980 3981 s = slab_pre_alloc_hook(s, gfpflags); 3982 if (unlikely(!s)) 3983 return NULL; 3984 3985 object = kfence_alloc(s, orig_size, gfpflags); 3986 if (unlikely(object)) 3987 goto out; 3988 3989 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); 3990 3991 maybe_wipe_obj_freeptr(s, object); 3992 init = slab_want_init_on_alloc(gfpflags, s); 3993 3994 out: 3995 /* 3996 * When init equals 'true', like for kzalloc() family, only 3997 * @orig_size bytes might be zeroed instead of s->object_size 3998 * In case this fails due to memcg_slab_post_alloc_hook(), 3999 * object is set to NULL 4000 */ 4001 slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size); 4002 4003 return object; 4004 } 4005 4006 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags) 4007 { 4008 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, 4009 s->object_size); 4010 4011 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 4012 4013 return ret; 4014 } 4015 EXPORT_SYMBOL(kmem_cache_alloc_noprof); 4016 4017 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, 4018 gfp_t gfpflags) 4019 { 4020 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, 4021 s->object_size); 4022 4023 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 4024 4025 return ret; 4026 } 4027 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof); 4028 4029 /** 4030 * kmem_cache_alloc_node - Allocate an object on the specified node 4031 * @s: The cache to allocate from. 4032 * @gfpflags: See kmalloc(). 4033 * @node: node number of the target node. 4034 * 4035 * Identical to kmem_cache_alloc but it will allocate memory on the given 4036 * node, which can improve the performance for cpu bound structures. 4037 * 4038 * Fallback to other node is possible if __GFP_THISNODE is not set. 4039 * 4040 * Return: pointer to the new object or %NULL in case of error 4041 */ 4042 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node) 4043 { 4044 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); 4045 4046 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); 4047 4048 return ret; 4049 } 4050 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof); 4051 4052 /* 4053 * To avoid unnecessary overhead, we pass through large allocation requests 4054 * directly to the page allocator. We use __GFP_COMP, because we will need to 4055 * know the allocation order to free the pages properly in kfree. 4056 */ 4057 static void *__kmalloc_large_node(size_t size, gfp_t flags, int node) 4058 { 4059 struct folio *folio; 4060 void *ptr = NULL; 4061 unsigned int order = get_order(size); 4062 4063 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 4064 flags = kmalloc_fix_flags(flags); 4065 4066 flags |= __GFP_COMP; 4067 folio = (struct folio *)alloc_pages_node_noprof(node, flags, order); 4068 if (folio) { 4069 ptr = folio_address(folio); 4070 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4071 PAGE_SIZE << order); 4072 } 4073 4074 ptr = kasan_kmalloc_large(ptr, size, flags); 4075 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 4076 kmemleak_alloc(ptr, size, 1, flags); 4077 kmsan_kmalloc_large(ptr, size, flags); 4078 4079 return ptr; 4080 } 4081 4082 void *kmalloc_large_noprof(size_t size, gfp_t flags) 4083 { 4084 void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE); 4085 4086 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 4087 flags, NUMA_NO_NODE); 4088 return ret; 4089 } 4090 EXPORT_SYMBOL(kmalloc_large_noprof); 4091 4092 void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) 4093 { 4094 void *ret = __kmalloc_large_node(size, flags, node); 4095 4096 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 4097 flags, node); 4098 return ret; 4099 } 4100 EXPORT_SYMBOL(kmalloc_large_node_noprof); 4101 4102 static __always_inline 4103 void *__do_kmalloc_node(size_t size, gfp_t flags, int node, 4104 unsigned long caller) 4105 { 4106 struct kmem_cache *s; 4107 void *ret; 4108 4109 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4110 ret = __kmalloc_large_node(size, flags, node); 4111 trace_kmalloc(caller, ret, size, 4112 PAGE_SIZE << get_order(size), flags, node); 4113 return ret; 4114 } 4115 4116 if (unlikely(!size)) 4117 return ZERO_SIZE_PTR; 4118 4119 s = kmalloc_slab(size, flags, caller); 4120 4121 ret = slab_alloc_node(s, NULL, flags, node, caller, size); 4122 ret = kasan_kmalloc(s, ret, size, flags); 4123 trace_kmalloc(caller, ret, size, s->size, flags, node); 4124 return ret; 4125 } 4126 4127 void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) 4128 { 4129 return __do_kmalloc_node(size, flags, node, _RET_IP_); 4130 } 4131 EXPORT_SYMBOL(__kmalloc_node_noprof); 4132 4133 void *__kmalloc_noprof(size_t size, gfp_t flags) 4134 { 4135 return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_); 4136 } 4137 EXPORT_SYMBOL(__kmalloc_noprof); 4138 4139 void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, 4140 int node, unsigned long caller) 4141 { 4142 return __do_kmalloc_node(size, flags, node, caller); 4143 } 4144 EXPORT_SYMBOL(kmalloc_node_track_caller_noprof); 4145 4146 void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size) 4147 { 4148 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, 4149 _RET_IP_, size); 4150 4151 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); 4152 4153 ret = kasan_kmalloc(s, ret, size, gfpflags); 4154 return ret; 4155 } 4156 EXPORT_SYMBOL(kmalloc_trace_noprof); 4157 4158 void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, 4159 int node, size_t size) 4160 { 4161 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); 4162 4163 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); 4164 4165 ret = kasan_kmalloc(s, ret, size, gfpflags); 4166 return ret; 4167 } 4168 EXPORT_SYMBOL(kmalloc_node_trace_noprof); 4169 4170 static noinline void free_to_partial_list( 4171 struct kmem_cache *s, struct slab *slab, 4172 void *head, void *tail, int bulk_cnt, 4173 unsigned long addr) 4174 { 4175 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 4176 struct slab *slab_free = NULL; 4177 int cnt = bulk_cnt; 4178 unsigned long flags; 4179 depot_stack_handle_t handle = 0; 4180 4181 if (s->flags & SLAB_STORE_USER) 4182 handle = set_track_prepare(); 4183 4184 spin_lock_irqsave(&n->list_lock, flags); 4185 4186 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { 4187 void *prior = slab->freelist; 4188 4189 /* Perform the actual freeing while we still hold the locks */ 4190 slab->inuse -= cnt; 4191 set_freepointer(s, tail, prior); 4192 slab->freelist = head; 4193 4194 /* 4195 * If the slab is empty, and node's partial list is full, 4196 * it should be discarded anyway no matter it's on full or 4197 * partial list. 4198 */ 4199 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) 4200 slab_free = slab; 4201 4202 if (!prior) { 4203 /* was on full list */ 4204 remove_full(s, n, slab); 4205 if (!slab_free) { 4206 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4207 stat(s, FREE_ADD_PARTIAL); 4208 } 4209 } else if (slab_free) { 4210 remove_partial(n, slab); 4211 stat(s, FREE_REMOVE_PARTIAL); 4212 } 4213 } 4214 4215 if (slab_free) { 4216 /* 4217 * Update the counters while still holding n->list_lock to 4218 * prevent spurious validation warnings 4219 */ 4220 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); 4221 } 4222 4223 spin_unlock_irqrestore(&n->list_lock, flags); 4224 4225 if (slab_free) { 4226 stat(s, FREE_SLAB); 4227 free_slab(s, slab_free); 4228 } 4229 } 4230 4231 /* 4232 * Slow path handling. This may still be called frequently since objects 4233 * have a longer lifetime than the cpu slabs in most processing loads. 4234 * 4235 * So we still attempt to reduce cache line usage. Just take the slab 4236 * lock and free the item. If there is no additional partial slab 4237 * handling required then we can return immediately. 4238 */ 4239 static void __slab_free(struct kmem_cache *s, struct slab *slab, 4240 void *head, void *tail, int cnt, 4241 unsigned long addr) 4242 4243 { 4244 void *prior; 4245 int was_frozen; 4246 struct slab new; 4247 unsigned long counters; 4248 struct kmem_cache_node *n = NULL; 4249 unsigned long flags; 4250 bool on_node_partial; 4251 4252 stat(s, FREE_SLOWPATH); 4253 4254 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 4255 free_to_partial_list(s, slab, head, tail, cnt, addr); 4256 return; 4257 } 4258 4259 do { 4260 if (unlikely(n)) { 4261 spin_unlock_irqrestore(&n->list_lock, flags); 4262 n = NULL; 4263 } 4264 prior = slab->freelist; 4265 counters = slab->counters; 4266 set_freepointer(s, tail, prior); 4267 new.counters = counters; 4268 was_frozen = new.frozen; 4269 new.inuse -= cnt; 4270 if ((!new.inuse || !prior) && !was_frozen) { 4271 /* Needs to be taken off a list */ 4272 if (!kmem_cache_has_cpu_partial(s) || prior) { 4273 4274 n = get_node(s, slab_nid(slab)); 4275 /* 4276 * Speculatively acquire the list_lock. 4277 * If the cmpxchg does not succeed then we may 4278 * drop the list_lock without any processing. 4279 * 4280 * Otherwise the list_lock will synchronize with 4281 * other processors updating the list of slabs. 4282 */ 4283 spin_lock_irqsave(&n->list_lock, flags); 4284 4285 on_node_partial = slab_test_node_partial(slab); 4286 } 4287 } 4288 4289 } while (!slab_update_freelist(s, slab, 4290 prior, counters, 4291 head, new.counters, 4292 "__slab_free")); 4293 4294 if (likely(!n)) { 4295 4296 if (likely(was_frozen)) { 4297 /* 4298 * The list lock was not taken therefore no list 4299 * activity can be necessary. 4300 */ 4301 stat(s, FREE_FROZEN); 4302 } else if (kmem_cache_has_cpu_partial(s) && !prior) { 4303 /* 4304 * If we started with a full slab then put it onto the 4305 * per cpu partial list. 4306 */ 4307 put_cpu_partial(s, slab, 1); 4308 stat(s, CPU_PARTIAL_FREE); 4309 } 4310 4311 return; 4312 } 4313 4314 /* 4315 * This slab was partially empty but not on the per-node partial list, 4316 * in which case we shouldn't manipulate its list, just return. 4317 */ 4318 if (prior && !on_node_partial) { 4319 spin_unlock_irqrestore(&n->list_lock, flags); 4320 return; 4321 } 4322 4323 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 4324 goto slab_empty; 4325 4326 /* 4327 * Objects left in the slab. If it was not on the partial list before 4328 * then add it. 4329 */ 4330 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 4331 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4332 stat(s, FREE_ADD_PARTIAL); 4333 } 4334 spin_unlock_irqrestore(&n->list_lock, flags); 4335 return; 4336 4337 slab_empty: 4338 if (prior) { 4339 /* 4340 * Slab on the partial list. 4341 */ 4342 remove_partial(n, slab); 4343 stat(s, FREE_REMOVE_PARTIAL); 4344 } 4345 4346 spin_unlock_irqrestore(&n->list_lock, flags); 4347 stat(s, FREE_SLAB); 4348 discard_slab(s, slab); 4349 } 4350 4351 #ifndef CONFIG_SLUB_TINY 4352 /* 4353 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 4354 * can perform fastpath freeing without additional function calls. 4355 * 4356 * The fastpath is only possible if we are freeing to the current cpu slab 4357 * of this processor. This typically the case if we have just allocated 4358 * the item before. 4359 * 4360 * If fastpath is not possible then fall back to __slab_free where we deal 4361 * with all sorts of special processing. 4362 * 4363 * Bulk free of a freelist with several objects (all pointing to the 4364 * same slab) possible by specifying head and tail ptr, plus objects 4365 * count (cnt). Bulk free indicated by tail pointer being set. 4366 */ 4367 static __always_inline void do_slab_free(struct kmem_cache *s, 4368 struct slab *slab, void *head, void *tail, 4369 int cnt, unsigned long addr) 4370 { 4371 struct kmem_cache_cpu *c; 4372 unsigned long tid; 4373 void **freelist; 4374 4375 redo: 4376 /* 4377 * Determine the currently cpus per cpu slab. 4378 * The cpu may change afterward. However that does not matter since 4379 * data is retrieved via this pointer. If we are on the same cpu 4380 * during the cmpxchg then the free will succeed. 4381 */ 4382 c = raw_cpu_ptr(s->cpu_slab); 4383 tid = READ_ONCE(c->tid); 4384 4385 /* Same with comment on barrier() in __slab_alloc_node() */ 4386 barrier(); 4387 4388 if (unlikely(slab != c->slab)) { 4389 __slab_free(s, slab, head, tail, cnt, addr); 4390 return; 4391 } 4392 4393 if (USE_LOCKLESS_FAST_PATH()) { 4394 freelist = READ_ONCE(c->freelist); 4395 4396 set_freepointer(s, tail, freelist); 4397 4398 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { 4399 note_cmpxchg_failure("slab_free", s, tid); 4400 goto redo; 4401 } 4402 } else { 4403 /* Update the free list under the local lock */ 4404 local_lock(&s->cpu_slab->lock); 4405 c = this_cpu_ptr(s->cpu_slab); 4406 if (unlikely(slab != c->slab)) { 4407 local_unlock(&s->cpu_slab->lock); 4408 goto redo; 4409 } 4410 tid = c->tid; 4411 freelist = c->freelist; 4412 4413 set_freepointer(s, tail, freelist); 4414 c->freelist = head; 4415 c->tid = next_tid(tid); 4416 4417 local_unlock(&s->cpu_slab->lock); 4418 } 4419 stat_add(s, FREE_FASTPATH, cnt); 4420 } 4421 #else /* CONFIG_SLUB_TINY */ 4422 static void do_slab_free(struct kmem_cache *s, 4423 struct slab *slab, void *head, void *tail, 4424 int cnt, unsigned long addr) 4425 { 4426 __slab_free(s, slab, head, tail, cnt, addr); 4427 } 4428 #endif /* CONFIG_SLUB_TINY */ 4429 4430 static __fastpath_inline 4431 void slab_free(struct kmem_cache *s, struct slab *slab, void *object, 4432 unsigned long addr) 4433 { 4434 memcg_slab_free_hook(s, slab, &object, 1); 4435 alloc_tagging_slab_free_hook(s, slab, &object, 1); 4436 4437 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s)))) 4438 do_slab_free(s, slab, object, object, 1, addr); 4439 } 4440 4441 #ifdef CONFIG_MEMCG_KMEM 4442 /* Do not inline the rare memcg charging failed path into the allocation path */ 4443 static noinline 4444 void memcg_alloc_abort_single(struct kmem_cache *s, void *object) 4445 { 4446 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s)))) 4447 do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_); 4448 } 4449 #endif 4450 4451 static __fastpath_inline 4452 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, 4453 void *tail, void **p, int cnt, unsigned long addr) 4454 { 4455 memcg_slab_free_hook(s, slab, p, cnt); 4456 alloc_tagging_slab_free_hook(s, slab, p, cnt); 4457 /* 4458 * With KASAN enabled slab_free_freelist_hook modifies the freelist 4459 * to remove objects, whose reuse must be delayed. 4460 */ 4461 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) 4462 do_slab_free(s, slab, head, tail, cnt, addr); 4463 } 4464 4465 #ifdef CONFIG_KASAN_GENERIC 4466 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 4467 { 4468 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr); 4469 } 4470 #endif 4471 4472 static inline struct kmem_cache *virt_to_cache(const void *obj) 4473 { 4474 struct slab *slab; 4475 4476 slab = virt_to_slab(obj); 4477 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__)) 4478 return NULL; 4479 return slab->slab_cache; 4480 } 4481 4482 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 4483 { 4484 struct kmem_cache *cachep; 4485 4486 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 4487 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 4488 return s; 4489 4490 cachep = virt_to_cache(x); 4491 if (WARN(cachep && cachep != s, 4492 "%s: Wrong slab cache. %s but object is from %s\n", 4493 __func__, s->name, cachep->name)) 4494 print_tracking(cachep, x); 4495 return cachep; 4496 } 4497 4498 /** 4499 * kmem_cache_free - Deallocate an object 4500 * @s: The cache the allocation was from. 4501 * @x: The previously allocated object. 4502 * 4503 * Free an object which was previously allocated from this 4504 * cache. 4505 */ 4506 void kmem_cache_free(struct kmem_cache *s, void *x) 4507 { 4508 s = cache_from_obj(s, x); 4509 if (!s) 4510 return; 4511 trace_kmem_cache_free(_RET_IP_, x, s); 4512 slab_free(s, virt_to_slab(x), x, _RET_IP_); 4513 } 4514 EXPORT_SYMBOL(kmem_cache_free); 4515 4516 static void free_large_kmalloc(struct folio *folio, void *object) 4517 { 4518 unsigned int order = folio_order(folio); 4519 4520 if (WARN_ON_ONCE(order == 0)) 4521 pr_warn_once("object pointer: 0x%p\n", object); 4522 4523 kmemleak_free(object); 4524 kasan_kfree_large(object); 4525 kmsan_kfree_large(object); 4526 4527 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4528 -(PAGE_SIZE << order)); 4529 folio_put(folio); 4530 } 4531 4532 /** 4533 * kfree - free previously allocated memory 4534 * @object: pointer returned by kmalloc() or kmem_cache_alloc() 4535 * 4536 * If @object is NULL, no operation is performed. 4537 */ 4538 void kfree(const void *object) 4539 { 4540 struct folio *folio; 4541 struct slab *slab; 4542 struct kmem_cache *s; 4543 void *x = (void *)object; 4544 4545 trace_kfree(_RET_IP_, object); 4546 4547 if (unlikely(ZERO_OR_NULL_PTR(object))) 4548 return; 4549 4550 folio = virt_to_folio(object); 4551 if (unlikely(!folio_test_slab(folio))) { 4552 free_large_kmalloc(folio, (void *)object); 4553 return; 4554 } 4555 4556 slab = folio_slab(folio); 4557 s = slab->slab_cache; 4558 slab_free(s, slab, x, _RET_IP_); 4559 } 4560 EXPORT_SYMBOL(kfree); 4561 4562 struct detached_freelist { 4563 struct slab *slab; 4564 void *tail; 4565 void *freelist; 4566 int cnt; 4567 struct kmem_cache *s; 4568 }; 4569 4570 /* 4571 * This function progressively scans the array with free objects (with 4572 * a limited look ahead) and extract objects belonging to the same 4573 * slab. It builds a detached freelist directly within the given 4574 * slab/objects. This can happen without any need for 4575 * synchronization, because the objects are owned by running process. 4576 * The freelist is build up as a single linked list in the objects. 4577 * The idea is, that this detached freelist can then be bulk 4578 * transferred to the real freelist(s), but only requiring a single 4579 * synchronization primitive. Look ahead in the array is limited due 4580 * to performance reasons. 4581 */ 4582 static inline 4583 int build_detached_freelist(struct kmem_cache *s, size_t size, 4584 void **p, struct detached_freelist *df) 4585 { 4586 int lookahead = 3; 4587 void *object; 4588 struct folio *folio; 4589 size_t same; 4590 4591 object = p[--size]; 4592 folio = virt_to_folio(object); 4593 if (!s) { 4594 /* Handle kalloc'ed objects */ 4595 if (unlikely(!folio_test_slab(folio))) { 4596 free_large_kmalloc(folio, object); 4597 df->slab = NULL; 4598 return size; 4599 } 4600 /* Derive kmem_cache from object */ 4601 df->slab = folio_slab(folio); 4602 df->s = df->slab->slab_cache; 4603 } else { 4604 df->slab = folio_slab(folio); 4605 df->s = cache_from_obj(s, object); /* Support for memcg */ 4606 } 4607 4608 /* Start new detached freelist */ 4609 df->tail = object; 4610 df->freelist = object; 4611 df->cnt = 1; 4612 4613 if (is_kfence_address(object)) 4614 return size; 4615 4616 set_freepointer(df->s, object, NULL); 4617 4618 same = size; 4619 while (size) { 4620 object = p[--size]; 4621 /* df->slab is always set at this point */ 4622 if (df->slab == virt_to_slab(object)) { 4623 /* Opportunity build freelist */ 4624 set_freepointer(df->s, object, df->freelist); 4625 df->freelist = object; 4626 df->cnt++; 4627 same--; 4628 if (size != same) 4629 swap(p[size], p[same]); 4630 continue; 4631 } 4632 4633 /* Limit look ahead search */ 4634 if (!--lookahead) 4635 break; 4636 } 4637 4638 return same; 4639 } 4640 4641 /* 4642 * Internal bulk free of objects that were not initialised by the post alloc 4643 * hooks and thus should not be processed by the free hooks 4644 */ 4645 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4646 { 4647 if (!size) 4648 return; 4649 4650 do { 4651 struct detached_freelist df; 4652 4653 size = build_detached_freelist(s, size, p, &df); 4654 if (!df.slab) 4655 continue; 4656 4657 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, 4658 _RET_IP_); 4659 } while (likely(size)); 4660 } 4661 4662 /* Note that interrupts must be enabled when calling this function. */ 4663 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4664 { 4665 if (!size) 4666 return; 4667 4668 do { 4669 struct detached_freelist df; 4670 4671 size = build_detached_freelist(s, size, p, &df); 4672 if (!df.slab) 4673 continue; 4674 4675 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size], 4676 df.cnt, _RET_IP_); 4677 } while (likely(size)); 4678 } 4679 EXPORT_SYMBOL(kmem_cache_free_bulk); 4680 4681 #ifndef CONFIG_SLUB_TINY 4682 static inline 4683 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 4684 void **p) 4685 { 4686 struct kmem_cache_cpu *c; 4687 unsigned long irqflags; 4688 int i; 4689 4690 /* 4691 * Drain objects in the per cpu slab, while disabling local 4692 * IRQs, which protects against PREEMPT and interrupts 4693 * handlers invoking normal fastpath. 4694 */ 4695 c = slub_get_cpu_ptr(s->cpu_slab); 4696 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 4697 4698 for (i = 0; i < size; i++) { 4699 void *object = kfence_alloc(s, s->object_size, flags); 4700 4701 if (unlikely(object)) { 4702 p[i] = object; 4703 continue; 4704 } 4705 4706 object = c->freelist; 4707 if (unlikely(!object)) { 4708 /* 4709 * We may have removed an object from c->freelist using 4710 * the fastpath in the previous iteration; in that case, 4711 * c->tid has not been bumped yet. 4712 * Since ___slab_alloc() may reenable interrupts while 4713 * allocating memory, we should bump c->tid now. 4714 */ 4715 c->tid = next_tid(c->tid); 4716 4717 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 4718 4719 /* 4720 * Invoking slow path likely have side-effect 4721 * of re-populating per CPU c->freelist 4722 */ 4723 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 4724 _RET_IP_, c, s->object_size); 4725 if (unlikely(!p[i])) 4726 goto error; 4727 4728 c = this_cpu_ptr(s->cpu_slab); 4729 maybe_wipe_obj_freeptr(s, p[i]); 4730 4731 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 4732 4733 continue; /* goto for-loop */ 4734 } 4735 c->freelist = get_freepointer(s, object); 4736 p[i] = object; 4737 maybe_wipe_obj_freeptr(s, p[i]); 4738 stat(s, ALLOC_FASTPATH); 4739 } 4740 c->tid = next_tid(c->tid); 4741 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 4742 slub_put_cpu_ptr(s->cpu_slab); 4743 4744 return i; 4745 4746 error: 4747 slub_put_cpu_ptr(s->cpu_slab); 4748 __kmem_cache_free_bulk(s, i, p); 4749 return 0; 4750 4751 } 4752 #else /* CONFIG_SLUB_TINY */ 4753 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 4754 size_t size, void **p) 4755 { 4756 int i; 4757 4758 for (i = 0; i < size; i++) { 4759 void *object = kfence_alloc(s, s->object_size, flags); 4760 4761 if (unlikely(object)) { 4762 p[i] = object; 4763 continue; 4764 } 4765 4766 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE, 4767 _RET_IP_, s->object_size); 4768 if (unlikely(!p[i])) 4769 goto error; 4770 4771 maybe_wipe_obj_freeptr(s, p[i]); 4772 } 4773 4774 return i; 4775 4776 error: 4777 __kmem_cache_free_bulk(s, i, p); 4778 return 0; 4779 } 4780 #endif /* CONFIG_SLUB_TINY */ 4781 4782 /* Note that interrupts must be enabled when calling this function. */ 4783 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, 4784 void **p) 4785 { 4786 int i; 4787 4788 if (!size) 4789 return 0; 4790 4791 s = slab_pre_alloc_hook(s, flags); 4792 if (unlikely(!s)) 4793 return 0; 4794 4795 i = __kmem_cache_alloc_bulk(s, flags, size, p); 4796 if (unlikely(i == 0)) 4797 return 0; 4798 4799 /* 4800 * memcg and kmem_cache debug support and memory initialization. 4801 * Done outside of the IRQ disabled fastpath loop. 4802 */ 4803 if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p, 4804 slab_want_init_on_alloc(flags, s), s->object_size))) { 4805 return 0; 4806 } 4807 return i; 4808 } 4809 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof); 4810 4811 4812 /* 4813 * Object placement in a slab is made very easy because we always start at 4814 * offset 0. If we tune the size of the object to the alignment then we can 4815 * get the required alignment by putting one properly sized object after 4816 * another. 4817 * 4818 * Notice that the allocation order determines the sizes of the per cpu 4819 * caches. Each processor has always one slab available for allocations. 4820 * Increasing the allocation order reduces the number of times that slabs 4821 * must be moved on and off the partial lists and is therefore a factor in 4822 * locking overhead. 4823 */ 4824 4825 /* 4826 * Minimum / Maximum order of slab pages. This influences locking overhead 4827 * and slab fragmentation. A higher order reduces the number of partial slabs 4828 * and increases the number of allocations possible without having to 4829 * take the list_lock. 4830 */ 4831 static unsigned int slub_min_order; 4832 static unsigned int slub_max_order = 4833 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; 4834 static unsigned int slub_min_objects; 4835 4836 /* 4837 * Calculate the order of allocation given an slab object size. 4838 * 4839 * The order of allocation has significant impact on performance and other 4840 * system components. Generally order 0 allocations should be preferred since 4841 * order 0 does not cause fragmentation in the page allocator. Larger objects 4842 * be problematic to put into order 0 slabs because there may be too much 4843 * unused space left. We go to a higher order if more than 1/16th of the slab 4844 * would be wasted. 4845 * 4846 * In order to reach satisfactory performance we must ensure that a minimum 4847 * number of objects is in one slab. Otherwise we may generate too much 4848 * activity on the partial lists which requires taking the list_lock. This is 4849 * less a concern for large slabs though which are rarely used. 4850 * 4851 * slab_max_order specifies the order where we begin to stop considering the 4852 * number of objects in a slab as critical. If we reach slab_max_order then 4853 * we try to keep the page order as low as possible. So we accept more waste 4854 * of space in favor of a small page order. 4855 * 4856 * Higher order allocations also allow the placement of more objects in a 4857 * slab and thereby reduce object handling overhead. If the user has 4858 * requested a higher minimum order then we start with that one instead of 4859 * the smallest order which will fit the object. 4860 */ 4861 static inline unsigned int calc_slab_order(unsigned int size, 4862 unsigned int min_order, unsigned int max_order, 4863 unsigned int fract_leftover) 4864 { 4865 unsigned int order; 4866 4867 for (order = min_order; order <= max_order; order++) { 4868 4869 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 4870 unsigned int rem; 4871 4872 rem = slab_size % size; 4873 4874 if (rem <= slab_size / fract_leftover) 4875 break; 4876 } 4877 4878 return order; 4879 } 4880 4881 static inline int calculate_order(unsigned int size) 4882 { 4883 unsigned int order; 4884 unsigned int min_objects; 4885 unsigned int max_objects; 4886 unsigned int min_order; 4887 4888 min_objects = slub_min_objects; 4889 if (!min_objects) { 4890 /* 4891 * Some architectures will only update present cpus when 4892 * onlining them, so don't trust the number if it's just 1. But 4893 * we also don't want to use nr_cpu_ids always, as on some other 4894 * architectures, there can be many possible cpus, but never 4895 * onlined. Here we compromise between trying to avoid too high 4896 * order on systems that appear larger than they are, and too 4897 * low order on systems that appear smaller than they are. 4898 */ 4899 unsigned int nr_cpus = num_present_cpus(); 4900 if (nr_cpus <= 1) 4901 nr_cpus = nr_cpu_ids; 4902 min_objects = 4 * (fls(nr_cpus) + 1); 4903 } 4904 /* min_objects can't be 0 because get_order(0) is undefined */ 4905 max_objects = max(order_objects(slub_max_order, size), 1U); 4906 min_objects = min(min_objects, max_objects); 4907 4908 min_order = max_t(unsigned int, slub_min_order, 4909 get_order(min_objects * size)); 4910 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 4911 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 4912 4913 /* 4914 * Attempt to find best configuration for a slab. This works by first 4915 * attempting to generate a layout with the best possible configuration 4916 * and backing off gradually. 4917 * 4918 * We start with accepting at most 1/16 waste and try to find the 4919 * smallest order from min_objects-derived/slab_min_order up to 4920 * slab_max_order that will satisfy the constraint. Note that increasing 4921 * the order can only result in same or less fractional waste, not more. 4922 * 4923 * If that fails, we increase the acceptable fraction of waste and try 4924 * again. The last iteration with fraction of 1/2 would effectively 4925 * accept any waste and give us the order determined by min_objects, as 4926 * long as at least single object fits within slab_max_order. 4927 */ 4928 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { 4929 order = calc_slab_order(size, min_order, slub_max_order, 4930 fraction); 4931 if (order <= slub_max_order) 4932 return order; 4933 } 4934 4935 /* 4936 * Doh this slab cannot be placed using slab_max_order. 4937 */ 4938 order = get_order(size); 4939 if (order <= MAX_PAGE_ORDER) 4940 return order; 4941 return -ENOSYS; 4942 } 4943 4944 static void 4945 init_kmem_cache_node(struct kmem_cache_node *n) 4946 { 4947 n->nr_partial = 0; 4948 spin_lock_init(&n->list_lock); 4949 INIT_LIST_HEAD(&n->partial); 4950 #ifdef CONFIG_SLUB_DEBUG 4951 atomic_long_set(&n->nr_slabs, 0); 4952 atomic_long_set(&n->total_objects, 0); 4953 INIT_LIST_HEAD(&n->full); 4954 #endif 4955 } 4956 4957 #ifndef CONFIG_SLUB_TINY 4958 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 4959 { 4960 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 4961 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * 4962 sizeof(struct kmem_cache_cpu)); 4963 4964 /* 4965 * Must align to double word boundary for the double cmpxchg 4966 * instructions to work; see __pcpu_double_call_return_bool(). 4967 */ 4968 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 4969 2 * sizeof(void *)); 4970 4971 if (!s->cpu_slab) 4972 return 0; 4973 4974 init_kmem_cache_cpus(s); 4975 4976 return 1; 4977 } 4978 #else 4979 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 4980 { 4981 return 1; 4982 } 4983 #endif /* CONFIG_SLUB_TINY */ 4984 4985 static struct kmem_cache *kmem_cache_node; 4986 4987 /* 4988 * No kmalloc_node yet so do it by hand. We know that this is the first 4989 * slab on the node for this slabcache. There are no concurrent accesses 4990 * possible. 4991 * 4992 * Note that this function only works on the kmem_cache_node 4993 * when allocating for the kmem_cache_node. This is used for bootstrapping 4994 * memory on a fresh node that has no slab structures yet. 4995 */ 4996 static void early_kmem_cache_node_alloc(int node) 4997 { 4998 struct slab *slab; 4999 struct kmem_cache_node *n; 5000 5001 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 5002 5003 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); 5004 5005 BUG_ON(!slab); 5006 if (slab_nid(slab) != node) { 5007 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 5008 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 5009 } 5010 5011 n = slab->freelist; 5012 BUG_ON(!n); 5013 #ifdef CONFIG_SLUB_DEBUG 5014 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 5015 #endif 5016 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 5017 slab->freelist = get_freepointer(kmem_cache_node, n); 5018 slab->inuse = 1; 5019 kmem_cache_node->node[node] = n; 5020 init_kmem_cache_node(n); 5021 inc_slabs_node(kmem_cache_node, node, slab->objects); 5022 5023 /* 5024 * No locks need to be taken here as it has just been 5025 * initialized and there is no concurrent access. 5026 */ 5027 __add_partial(n, slab, DEACTIVATE_TO_HEAD); 5028 } 5029 5030 static void free_kmem_cache_nodes(struct kmem_cache *s) 5031 { 5032 int node; 5033 struct kmem_cache_node *n; 5034 5035 for_each_kmem_cache_node(s, node, n) { 5036 s->node[node] = NULL; 5037 kmem_cache_free(kmem_cache_node, n); 5038 } 5039 } 5040 5041 void __kmem_cache_release(struct kmem_cache *s) 5042 { 5043 cache_random_seq_destroy(s); 5044 #ifndef CONFIG_SLUB_TINY 5045 free_percpu(s->cpu_slab); 5046 #endif 5047 free_kmem_cache_nodes(s); 5048 } 5049 5050 static int init_kmem_cache_nodes(struct kmem_cache *s) 5051 { 5052 int node; 5053 5054 for_each_node_mask(node, slab_nodes) { 5055 struct kmem_cache_node *n; 5056 5057 if (slab_state == DOWN) { 5058 early_kmem_cache_node_alloc(node); 5059 continue; 5060 } 5061 n = kmem_cache_alloc_node(kmem_cache_node, 5062 GFP_KERNEL, node); 5063 5064 if (!n) { 5065 free_kmem_cache_nodes(s); 5066 return 0; 5067 } 5068 5069 init_kmem_cache_node(n); 5070 s->node[node] = n; 5071 } 5072 return 1; 5073 } 5074 5075 static void set_cpu_partial(struct kmem_cache *s) 5076 { 5077 #ifdef CONFIG_SLUB_CPU_PARTIAL 5078 unsigned int nr_objects; 5079 5080 /* 5081 * cpu_partial determined the maximum number of objects kept in the 5082 * per cpu partial lists of a processor. 5083 * 5084 * Per cpu partial lists mainly contain slabs that just have one 5085 * object freed. If they are used for allocation then they can be 5086 * filled up again with minimal effort. The slab will never hit the 5087 * per node partial lists and therefore no locking will be required. 5088 * 5089 * For backwards compatibility reasons, this is determined as number 5090 * of objects, even though we now limit maximum number of pages, see 5091 * slub_set_cpu_partial() 5092 */ 5093 if (!kmem_cache_has_cpu_partial(s)) 5094 nr_objects = 0; 5095 else if (s->size >= PAGE_SIZE) 5096 nr_objects = 6; 5097 else if (s->size >= 1024) 5098 nr_objects = 24; 5099 else if (s->size >= 256) 5100 nr_objects = 52; 5101 else 5102 nr_objects = 120; 5103 5104 slub_set_cpu_partial(s, nr_objects); 5105 #endif 5106 } 5107 5108 /* 5109 * calculate_sizes() determines the order and the distribution of data within 5110 * a slab object. 5111 */ 5112 static int calculate_sizes(struct kmem_cache *s) 5113 { 5114 slab_flags_t flags = s->flags; 5115 unsigned int size = s->object_size; 5116 unsigned int order; 5117 5118 /* 5119 * Round up object size to the next word boundary. We can only 5120 * place the free pointer at word boundaries and this determines 5121 * the possible location of the free pointer. 5122 */ 5123 size = ALIGN(size, sizeof(void *)); 5124 5125 #ifdef CONFIG_SLUB_DEBUG 5126 /* 5127 * Determine if we can poison the object itself. If the user of 5128 * the slab may touch the object after free or before allocation 5129 * then we should never poison the object itself. 5130 */ 5131 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 5132 !s->ctor) 5133 s->flags |= __OBJECT_POISON; 5134 else 5135 s->flags &= ~__OBJECT_POISON; 5136 5137 5138 /* 5139 * If we are Redzoning then check if there is some space between the 5140 * end of the object and the free pointer. If not then add an 5141 * additional word to have some bytes to store Redzone information. 5142 */ 5143 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 5144 size += sizeof(void *); 5145 #endif 5146 5147 /* 5148 * With that we have determined the number of bytes in actual use 5149 * by the object and redzoning. 5150 */ 5151 s->inuse = size; 5152 5153 if (slub_debug_orig_size(s) || 5154 (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 5155 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || 5156 s->ctor) { 5157 /* 5158 * Relocate free pointer after the object if it is not 5159 * permitted to overwrite the first word of the object on 5160 * kmem_cache_free. 5161 * 5162 * This is the case if we do RCU, have a constructor or 5163 * destructor, are poisoning the objects, or are 5164 * redzoning an object smaller than sizeof(void *). 5165 * 5166 * The assumption that s->offset >= s->inuse means free 5167 * pointer is outside of the object is used in the 5168 * freeptr_outside_object() function. If that is no 5169 * longer true, the function needs to be modified. 5170 */ 5171 s->offset = size; 5172 size += sizeof(void *); 5173 } else { 5174 /* 5175 * Store freelist pointer near middle of object to keep 5176 * it away from the edges of the object to avoid small 5177 * sized over/underflows from neighboring allocations. 5178 */ 5179 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 5180 } 5181 5182 #ifdef CONFIG_SLUB_DEBUG 5183 if (flags & SLAB_STORE_USER) { 5184 /* 5185 * Need to store information about allocs and frees after 5186 * the object. 5187 */ 5188 size += 2 * sizeof(struct track); 5189 5190 /* Save the original kmalloc request size */ 5191 if (flags & SLAB_KMALLOC) 5192 size += sizeof(unsigned int); 5193 } 5194 #endif 5195 5196 kasan_cache_create(s, &size, &s->flags); 5197 #ifdef CONFIG_SLUB_DEBUG 5198 if (flags & SLAB_RED_ZONE) { 5199 /* 5200 * Add some empty padding so that we can catch 5201 * overwrites from earlier objects rather than let 5202 * tracking information or the free pointer be 5203 * corrupted if a user writes before the start 5204 * of the object. 5205 */ 5206 size += sizeof(void *); 5207 5208 s->red_left_pad = sizeof(void *); 5209 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 5210 size += s->red_left_pad; 5211 } 5212 #endif 5213 5214 /* 5215 * SLUB stores one object immediately after another beginning from 5216 * offset 0. In order to align the objects we have to simply size 5217 * each object to conform to the alignment. 5218 */ 5219 size = ALIGN(size, s->align); 5220 s->size = size; 5221 s->reciprocal_size = reciprocal_value(size); 5222 order = calculate_order(size); 5223 5224 if ((int)order < 0) 5225 return 0; 5226 5227 s->allocflags = __GFP_COMP; 5228 5229 if (s->flags & SLAB_CACHE_DMA) 5230 s->allocflags |= GFP_DMA; 5231 5232 if (s->flags & SLAB_CACHE_DMA32) 5233 s->allocflags |= GFP_DMA32; 5234 5235 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5236 s->allocflags |= __GFP_RECLAIMABLE; 5237 5238 /* 5239 * Determine the number of objects per slab 5240 */ 5241 s->oo = oo_make(order, size); 5242 s->min = oo_make(get_order(size), size); 5243 5244 return !!oo_objects(s->oo); 5245 } 5246 5247 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 5248 { 5249 s->flags = kmem_cache_flags(flags, s->name); 5250 #ifdef CONFIG_SLAB_FREELIST_HARDENED 5251 s->random = get_random_long(); 5252 #endif 5253 5254 if (!calculate_sizes(s)) 5255 goto error; 5256 if (disable_higher_order_debug) { 5257 /* 5258 * Disable debugging flags that store metadata if the min slab 5259 * order increased. 5260 */ 5261 if (get_order(s->size) > get_order(s->object_size)) { 5262 s->flags &= ~DEBUG_METADATA_FLAGS; 5263 s->offset = 0; 5264 if (!calculate_sizes(s)) 5265 goto error; 5266 } 5267 } 5268 5269 #ifdef system_has_freelist_aba 5270 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { 5271 /* Enable fast mode */ 5272 s->flags |= __CMPXCHG_DOUBLE; 5273 } 5274 #endif 5275 5276 /* 5277 * The larger the object size is, the more slabs we want on the partial 5278 * list to avoid pounding the page allocator excessively. 5279 */ 5280 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); 5281 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); 5282 5283 set_cpu_partial(s); 5284 5285 #ifdef CONFIG_NUMA 5286 s->remote_node_defrag_ratio = 1000; 5287 #endif 5288 5289 /* Initialize the pre-computed randomized freelist if slab is up */ 5290 if (slab_state >= UP) { 5291 if (init_cache_random_seq(s)) 5292 goto error; 5293 } 5294 5295 if (!init_kmem_cache_nodes(s)) 5296 goto error; 5297 5298 if (alloc_kmem_cache_cpus(s)) 5299 return 0; 5300 5301 error: 5302 __kmem_cache_release(s); 5303 return -EINVAL; 5304 } 5305 5306 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, 5307 const char *text) 5308 { 5309 #ifdef CONFIG_SLUB_DEBUG 5310 void *addr = slab_address(slab); 5311 void *p; 5312 5313 slab_err(s, slab, text, s->name); 5314 5315 spin_lock(&object_map_lock); 5316 __fill_map(object_map, s, slab); 5317 5318 for_each_object(p, s, addr, slab->objects) { 5319 5320 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { 5321 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 5322 print_tracking(s, p); 5323 } 5324 } 5325 spin_unlock(&object_map_lock); 5326 #endif 5327 } 5328 5329 /* 5330 * Attempt to free all partial slabs on a node. 5331 * This is called from __kmem_cache_shutdown(). We must take list_lock 5332 * because sysfs file might still access partial list after the shutdowning. 5333 */ 5334 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 5335 { 5336 LIST_HEAD(discard); 5337 struct slab *slab, *h; 5338 5339 BUG_ON(irqs_disabled()); 5340 spin_lock_irq(&n->list_lock); 5341 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { 5342 if (!slab->inuse) { 5343 remove_partial(n, slab); 5344 list_add(&slab->slab_list, &discard); 5345 } else { 5346 list_slab_objects(s, slab, 5347 "Objects remaining in %s on __kmem_cache_shutdown()"); 5348 } 5349 } 5350 spin_unlock_irq(&n->list_lock); 5351 5352 list_for_each_entry_safe(slab, h, &discard, slab_list) 5353 discard_slab(s, slab); 5354 } 5355 5356 bool __kmem_cache_empty(struct kmem_cache *s) 5357 { 5358 int node; 5359 struct kmem_cache_node *n; 5360 5361 for_each_kmem_cache_node(s, node, n) 5362 if (n->nr_partial || node_nr_slabs(n)) 5363 return false; 5364 return true; 5365 } 5366 5367 /* 5368 * Release all resources used by a slab cache. 5369 */ 5370 int __kmem_cache_shutdown(struct kmem_cache *s) 5371 { 5372 int node; 5373 struct kmem_cache_node *n; 5374 5375 flush_all_cpus_locked(s); 5376 /* Attempt to free all objects */ 5377 for_each_kmem_cache_node(s, node, n) { 5378 free_partial(s, n); 5379 if (n->nr_partial || node_nr_slabs(n)) 5380 return 1; 5381 } 5382 return 0; 5383 } 5384 5385 #ifdef CONFIG_PRINTK 5386 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 5387 { 5388 void *base; 5389 int __maybe_unused i; 5390 unsigned int objnr; 5391 void *objp; 5392 void *objp0; 5393 struct kmem_cache *s = slab->slab_cache; 5394 struct track __maybe_unused *trackp; 5395 5396 kpp->kp_ptr = object; 5397 kpp->kp_slab = slab; 5398 kpp->kp_slab_cache = s; 5399 base = slab_address(slab); 5400 objp0 = kasan_reset_tag(object); 5401 #ifdef CONFIG_SLUB_DEBUG 5402 objp = restore_red_left(s, objp0); 5403 #else 5404 objp = objp0; 5405 #endif 5406 objnr = obj_to_index(s, slab, objp); 5407 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 5408 objp = base + s->size * objnr; 5409 kpp->kp_objp = objp; 5410 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size 5411 || (objp - base) % s->size) || 5412 !(s->flags & SLAB_STORE_USER)) 5413 return; 5414 #ifdef CONFIG_SLUB_DEBUG 5415 objp = fixup_red_left(s, objp); 5416 trackp = get_track(s, objp, TRACK_ALLOC); 5417 kpp->kp_ret = (void *)trackp->addr; 5418 #ifdef CONFIG_STACKDEPOT 5419 { 5420 depot_stack_handle_t handle; 5421 unsigned long *entries; 5422 unsigned int nr_entries; 5423 5424 handle = READ_ONCE(trackp->handle); 5425 if (handle) { 5426 nr_entries = stack_depot_fetch(handle, &entries); 5427 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5428 kpp->kp_stack[i] = (void *)entries[i]; 5429 } 5430 5431 trackp = get_track(s, objp, TRACK_FREE); 5432 handle = READ_ONCE(trackp->handle); 5433 if (handle) { 5434 nr_entries = stack_depot_fetch(handle, &entries); 5435 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5436 kpp->kp_free_stack[i] = (void *)entries[i]; 5437 } 5438 } 5439 #endif 5440 #endif 5441 } 5442 #endif 5443 5444 /******************************************************************** 5445 * Kmalloc subsystem 5446 *******************************************************************/ 5447 5448 static int __init setup_slub_min_order(char *str) 5449 { 5450 get_option(&str, (int *)&slub_min_order); 5451 5452 if (slub_min_order > slub_max_order) 5453 slub_max_order = slub_min_order; 5454 5455 return 1; 5456 } 5457 5458 __setup("slab_min_order=", setup_slub_min_order); 5459 __setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0); 5460 5461 5462 static int __init setup_slub_max_order(char *str) 5463 { 5464 get_option(&str, (int *)&slub_max_order); 5465 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER); 5466 5467 if (slub_min_order > slub_max_order) 5468 slub_min_order = slub_max_order; 5469 5470 return 1; 5471 } 5472 5473 __setup("slab_max_order=", setup_slub_max_order); 5474 __setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0); 5475 5476 static int __init setup_slub_min_objects(char *str) 5477 { 5478 get_option(&str, (int *)&slub_min_objects); 5479 5480 return 1; 5481 } 5482 5483 __setup("slab_min_objects=", setup_slub_min_objects); 5484 __setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0); 5485 5486 #ifdef CONFIG_HARDENED_USERCOPY 5487 /* 5488 * Rejects incorrectly sized objects and objects that are to be copied 5489 * to/from userspace but do not fall entirely within the containing slab 5490 * cache's usercopy region. 5491 * 5492 * Returns NULL if check passes, otherwise const char * to name of cache 5493 * to indicate an error. 5494 */ 5495 void __check_heap_object(const void *ptr, unsigned long n, 5496 const struct slab *slab, bool to_user) 5497 { 5498 struct kmem_cache *s; 5499 unsigned int offset; 5500 bool is_kfence = is_kfence_address(ptr); 5501 5502 ptr = kasan_reset_tag(ptr); 5503 5504 /* Find object and usable object size. */ 5505 s = slab->slab_cache; 5506 5507 /* Reject impossible pointers. */ 5508 if (ptr < slab_address(slab)) 5509 usercopy_abort("SLUB object not in SLUB page?!", NULL, 5510 to_user, 0, n); 5511 5512 /* Find offset within object. */ 5513 if (is_kfence) 5514 offset = ptr - kfence_object_start(ptr); 5515 else 5516 offset = (ptr - slab_address(slab)) % s->size; 5517 5518 /* Adjust for redzone and reject if within the redzone. */ 5519 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 5520 if (offset < s->red_left_pad) 5521 usercopy_abort("SLUB object in left red zone", 5522 s->name, to_user, offset, n); 5523 offset -= s->red_left_pad; 5524 } 5525 5526 /* Allow address range falling entirely within usercopy region. */ 5527 if (offset >= s->useroffset && 5528 offset - s->useroffset <= s->usersize && 5529 n <= s->useroffset - offset + s->usersize) 5530 return; 5531 5532 usercopy_abort("SLUB object", s->name, to_user, offset, n); 5533 } 5534 #endif /* CONFIG_HARDENED_USERCOPY */ 5535 5536 #define SHRINK_PROMOTE_MAX 32 5537 5538 /* 5539 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 5540 * up most to the head of the partial lists. New allocations will then 5541 * fill those up and thus they can be removed from the partial lists. 5542 * 5543 * The slabs with the least items are placed last. This results in them 5544 * being allocated from last increasing the chance that the last objects 5545 * are freed in them. 5546 */ 5547 static int __kmem_cache_do_shrink(struct kmem_cache *s) 5548 { 5549 int node; 5550 int i; 5551 struct kmem_cache_node *n; 5552 struct slab *slab; 5553 struct slab *t; 5554 struct list_head discard; 5555 struct list_head promote[SHRINK_PROMOTE_MAX]; 5556 unsigned long flags; 5557 int ret = 0; 5558 5559 for_each_kmem_cache_node(s, node, n) { 5560 INIT_LIST_HEAD(&discard); 5561 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 5562 INIT_LIST_HEAD(promote + i); 5563 5564 spin_lock_irqsave(&n->list_lock, flags); 5565 5566 /* 5567 * Build lists of slabs to discard or promote. 5568 * 5569 * Note that concurrent frees may occur while we hold the 5570 * list_lock. slab->inuse here is the upper limit. 5571 */ 5572 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { 5573 int free = slab->objects - slab->inuse; 5574 5575 /* Do not reread slab->inuse */ 5576 barrier(); 5577 5578 /* We do not keep full slabs on the list */ 5579 BUG_ON(free <= 0); 5580 5581 if (free == slab->objects) { 5582 list_move(&slab->slab_list, &discard); 5583 slab_clear_node_partial(slab); 5584 n->nr_partial--; 5585 dec_slabs_node(s, node, slab->objects); 5586 } else if (free <= SHRINK_PROMOTE_MAX) 5587 list_move(&slab->slab_list, promote + free - 1); 5588 } 5589 5590 /* 5591 * Promote the slabs filled up most to the head of the 5592 * partial list. 5593 */ 5594 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 5595 list_splice(promote + i, &n->partial); 5596 5597 spin_unlock_irqrestore(&n->list_lock, flags); 5598 5599 /* Release empty slabs */ 5600 list_for_each_entry_safe(slab, t, &discard, slab_list) 5601 free_slab(s, slab); 5602 5603 if (node_nr_slabs(n)) 5604 ret = 1; 5605 } 5606 5607 return ret; 5608 } 5609 5610 int __kmem_cache_shrink(struct kmem_cache *s) 5611 { 5612 flush_all(s); 5613 return __kmem_cache_do_shrink(s); 5614 } 5615 5616 static int slab_mem_going_offline_callback(void *arg) 5617 { 5618 struct kmem_cache *s; 5619 5620 mutex_lock(&slab_mutex); 5621 list_for_each_entry(s, &slab_caches, list) { 5622 flush_all_cpus_locked(s); 5623 __kmem_cache_do_shrink(s); 5624 } 5625 mutex_unlock(&slab_mutex); 5626 5627 return 0; 5628 } 5629 5630 static void slab_mem_offline_callback(void *arg) 5631 { 5632 struct memory_notify *marg = arg; 5633 int offline_node; 5634 5635 offline_node = marg->status_change_nid_normal; 5636 5637 /* 5638 * If the node still has available memory. we need kmem_cache_node 5639 * for it yet. 5640 */ 5641 if (offline_node < 0) 5642 return; 5643 5644 mutex_lock(&slab_mutex); 5645 node_clear(offline_node, slab_nodes); 5646 /* 5647 * We no longer free kmem_cache_node structures here, as it would be 5648 * racy with all get_node() users, and infeasible to protect them with 5649 * slab_mutex. 5650 */ 5651 mutex_unlock(&slab_mutex); 5652 } 5653 5654 static int slab_mem_going_online_callback(void *arg) 5655 { 5656 struct kmem_cache_node *n; 5657 struct kmem_cache *s; 5658 struct memory_notify *marg = arg; 5659 int nid = marg->status_change_nid_normal; 5660 int ret = 0; 5661 5662 /* 5663 * If the node's memory is already available, then kmem_cache_node is 5664 * already created. Nothing to do. 5665 */ 5666 if (nid < 0) 5667 return 0; 5668 5669 /* 5670 * We are bringing a node online. No memory is available yet. We must 5671 * allocate a kmem_cache_node structure in order to bring the node 5672 * online. 5673 */ 5674 mutex_lock(&slab_mutex); 5675 list_for_each_entry(s, &slab_caches, list) { 5676 /* 5677 * The structure may already exist if the node was previously 5678 * onlined and offlined. 5679 */ 5680 if (get_node(s, nid)) 5681 continue; 5682 /* 5683 * XXX: kmem_cache_alloc_node will fallback to other nodes 5684 * since memory is not yet available from the node that 5685 * is brought up. 5686 */ 5687 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 5688 if (!n) { 5689 ret = -ENOMEM; 5690 goto out; 5691 } 5692 init_kmem_cache_node(n); 5693 s->node[nid] = n; 5694 } 5695 /* 5696 * Any cache created after this point will also have kmem_cache_node 5697 * initialized for the new node. 5698 */ 5699 node_set(nid, slab_nodes); 5700 out: 5701 mutex_unlock(&slab_mutex); 5702 return ret; 5703 } 5704 5705 static int slab_memory_callback(struct notifier_block *self, 5706 unsigned long action, void *arg) 5707 { 5708 int ret = 0; 5709 5710 switch (action) { 5711 case MEM_GOING_ONLINE: 5712 ret = slab_mem_going_online_callback(arg); 5713 break; 5714 case MEM_GOING_OFFLINE: 5715 ret = slab_mem_going_offline_callback(arg); 5716 break; 5717 case MEM_OFFLINE: 5718 case MEM_CANCEL_ONLINE: 5719 slab_mem_offline_callback(arg); 5720 break; 5721 case MEM_ONLINE: 5722 case MEM_CANCEL_OFFLINE: 5723 break; 5724 } 5725 if (ret) 5726 ret = notifier_from_errno(ret); 5727 else 5728 ret = NOTIFY_OK; 5729 return ret; 5730 } 5731 5732 /******************************************************************** 5733 * Basic setup of slabs 5734 *******************************************************************/ 5735 5736 /* 5737 * Used for early kmem_cache structures that were allocated using 5738 * the page allocator. Allocate them properly then fix up the pointers 5739 * that may be pointing to the wrong kmem_cache structure. 5740 */ 5741 5742 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 5743 { 5744 int node; 5745 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 5746 struct kmem_cache_node *n; 5747 5748 memcpy(s, static_cache, kmem_cache->object_size); 5749 5750 /* 5751 * This runs very early, and only the boot processor is supposed to be 5752 * up. Even if it weren't true, IRQs are not up so we couldn't fire 5753 * IPIs around. 5754 */ 5755 __flush_cpu_slab(s, smp_processor_id()); 5756 for_each_kmem_cache_node(s, node, n) { 5757 struct slab *p; 5758 5759 list_for_each_entry(p, &n->partial, slab_list) 5760 p->slab_cache = s; 5761 5762 #ifdef CONFIG_SLUB_DEBUG 5763 list_for_each_entry(p, &n->full, slab_list) 5764 p->slab_cache = s; 5765 #endif 5766 } 5767 list_add(&s->list, &slab_caches); 5768 return s; 5769 } 5770 5771 void __init kmem_cache_init(void) 5772 { 5773 static __initdata struct kmem_cache boot_kmem_cache, 5774 boot_kmem_cache_node; 5775 int node; 5776 5777 if (debug_guardpage_minorder()) 5778 slub_max_order = 0; 5779 5780 /* Print slub debugging pointers without hashing */ 5781 if (__slub_debug_enabled()) 5782 no_hash_pointers_enable(NULL); 5783 5784 kmem_cache_node = &boot_kmem_cache_node; 5785 kmem_cache = &boot_kmem_cache; 5786 5787 /* 5788 * Initialize the nodemask for which we will allocate per node 5789 * structures. Here we don't need taking slab_mutex yet. 5790 */ 5791 for_each_node_state(node, N_NORMAL_MEMORY) 5792 node_set(node, slab_nodes); 5793 5794 create_boot_cache(kmem_cache_node, "kmem_cache_node", 5795 sizeof(struct kmem_cache_node), 5796 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 5797 5798 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 5799 5800 /* Able to allocate the per node structures */ 5801 slab_state = PARTIAL; 5802 5803 create_boot_cache(kmem_cache, "kmem_cache", 5804 offsetof(struct kmem_cache, node) + 5805 nr_node_ids * sizeof(struct kmem_cache_node *), 5806 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 5807 5808 kmem_cache = bootstrap(&boot_kmem_cache); 5809 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 5810 5811 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 5812 setup_kmalloc_cache_index_table(); 5813 create_kmalloc_caches(); 5814 5815 /* Setup random freelists for each cache */ 5816 init_freelist_randomization(); 5817 5818 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 5819 slub_cpu_dead); 5820 5821 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 5822 cache_line_size(), 5823 slub_min_order, slub_max_order, slub_min_objects, 5824 nr_cpu_ids, nr_node_ids); 5825 } 5826 5827 void __init kmem_cache_init_late(void) 5828 { 5829 #ifndef CONFIG_SLUB_TINY 5830 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); 5831 WARN_ON(!flushwq); 5832 #endif 5833 } 5834 5835 struct kmem_cache * 5836 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 5837 slab_flags_t flags, void (*ctor)(void *)) 5838 { 5839 struct kmem_cache *s; 5840 5841 s = find_mergeable(size, align, flags, name, ctor); 5842 if (s) { 5843 if (sysfs_slab_alias(s, name)) 5844 return NULL; 5845 5846 s->refcount++; 5847 5848 /* 5849 * Adjust the object sizes so that we clear 5850 * the complete object on kzalloc. 5851 */ 5852 s->object_size = max(s->object_size, size); 5853 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 5854 } 5855 5856 return s; 5857 } 5858 5859 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 5860 { 5861 int err; 5862 5863 err = kmem_cache_open(s, flags); 5864 if (err) 5865 return err; 5866 5867 /* Mutex is not taken during early boot */ 5868 if (slab_state <= UP) 5869 return 0; 5870 5871 err = sysfs_slab_add(s); 5872 if (err) { 5873 __kmem_cache_release(s); 5874 return err; 5875 } 5876 5877 if (s->flags & SLAB_STORE_USER) 5878 debugfs_slab_add(s); 5879 5880 return 0; 5881 } 5882 5883 #ifdef SLAB_SUPPORTS_SYSFS 5884 static int count_inuse(struct slab *slab) 5885 { 5886 return slab->inuse; 5887 } 5888 5889 static int count_total(struct slab *slab) 5890 { 5891 return slab->objects; 5892 } 5893 #endif 5894 5895 #ifdef CONFIG_SLUB_DEBUG 5896 static void validate_slab(struct kmem_cache *s, struct slab *slab, 5897 unsigned long *obj_map) 5898 { 5899 void *p; 5900 void *addr = slab_address(slab); 5901 5902 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) 5903 return; 5904 5905 /* Now we know that a valid freelist exists */ 5906 __fill_map(obj_map, s, slab); 5907 for_each_object(p, s, addr, slab->objects) { 5908 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 5909 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 5910 5911 if (!check_object(s, slab, p, val)) 5912 break; 5913 } 5914 } 5915 5916 static int validate_slab_node(struct kmem_cache *s, 5917 struct kmem_cache_node *n, unsigned long *obj_map) 5918 { 5919 unsigned long count = 0; 5920 struct slab *slab; 5921 unsigned long flags; 5922 5923 spin_lock_irqsave(&n->list_lock, flags); 5924 5925 list_for_each_entry(slab, &n->partial, slab_list) { 5926 validate_slab(s, slab, obj_map); 5927 count++; 5928 } 5929 if (count != n->nr_partial) { 5930 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 5931 s->name, count, n->nr_partial); 5932 slab_add_kunit_errors(); 5933 } 5934 5935 if (!(s->flags & SLAB_STORE_USER)) 5936 goto out; 5937 5938 list_for_each_entry(slab, &n->full, slab_list) { 5939 validate_slab(s, slab, obj_map); 5940 count++; 5941 } 5942 if (count != node_nr_slabs(n)) { 5943 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 5944 s->name, count, node_nr_slabs(n)); 5945 slab_add_kunit_errors(); 5946 } 5947 5948 out: 5949 spin_unlock_irqrestore(&n->list_lock, flags); 5950 return count; 5951 } 5952 5953 long validate_slab_cache(struct kmem_cache *s) 5954 { 5955 int node; 5956 unsigned long count = 0; 5957 struct kmem_cache_node *n; 5958 unsigned long *obj_map; 5959 5960 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 5961 if (!obj_map) 5962 return -ENOMEM; 5963 5964 flush_all(s); 5965 for_each_kmem_cache_node(s, node, n) 5966 count += validate_slab_node(s, n, obj_map); 5967 5968 bitmap_free(obj_map); 5969 5970 return count; 5971 } 5972 EXPORT_SYMBOL(validate_slab_cache); 5973 5974 #ifdef CONFIG_DEBUG_FS 5975 /* 5976 * Generate lists of code addresses where slabcache objects are allocated 5977 * and freed. 5978 */ 5979 5980 struct location { 5981 depot_stack_handle_t handle; 5982 unsigned long count; 5983 unsigned long addr; 5984 unsigned long waste; 5985 long long sum_time; 5986 long min_time; 5987 long max_time; 5988 long min_pid; 5989 long max_pid; 5990 DECLARE_BITMAP(cpus, NR_CPUS); 5991 nodemask_t nodes; 5992 }; 5993 5994 struct loc_track { 5995 unsigned long max; 5996 unsigned long count; 5997 struct location *loc; 5998 loff_t idx; 5999 }; 6000 6001 static struct dentry *slab_debugfs_root; 6002 6003 static void free_loc_track(struct loc_track *t) 6004 { 6005 if (t->max) 6006 free_pages((unsigned long)t->loc, 6007 get_order(sizeof(struct location) * t->max)); 6008 } 6009 6010 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 6011 { 6012 struct location *l; 6013 int order; 6014 6015 order = get_order(sizeof(struct location) * max); 6016 6017 l = (void *)__get_free_pages(flags, order); 6018 if (!l) 6019 return 0; 6020 6021 if (t->count) { 6022 memcpy(l, t->loc, sizeof(struct location) * t->count); 6023 free_loc_track(t); 6024 } 6025 t->max = max; 6026 t->loc = l; 6027 return 1; 6028 } 6029 6030 static int add_location(struct loc_track *t, struct kmem_cache *s, 6031 const struct track *track, 6032 unsigned int orig_size) 6033 { 6034 long start, end, pos; 6035 struct location *l; 6036 unsigned long caddr, chandle, cwaste; 6037 unsigned long age = jiffies - track->when; 6038 depot_stack_handle_t handle = 0; 6039 unsigned int waste = s->object_size - orig_size; 6040 6041 #ifdef CONFIG_STACKDEPOT 6042 handle = READ_ONCE(track->handle); 6043 #endif 6044 start = -1; 6045 end = t->count; 6046 6047 for ( ; ; ) { 6048 pos = start + (end - start + 1) / 2; 6049 6050 /* 6051 * There is nothing at "end". If we end up there 6052 * we need to add something to before end. 6053 */ 6054 if (pos == end) 6055 break; 6056 6057 l = &t->loc[pos]; 6058 caddr = l->addr; 6059 chandle = l->handle; 6060 cwaste = l->waste; 6061 if ((track->addr == caddr) && (handle == chandle) && 6062 (waste == cwaste)) { 6063 6064 l->count++; 6065 if (track->when) { 6066 l->sum_time += age; 6067 if (age < l->min_time) 6068 l->min_time = age; 6069 if (age > l->max_time) 6070 l->max_time = age; 6071 6072 if (track->pid < l->min_pid) 6073 l->min_pid = track->pid; 6074 if (track->pid > l->max_pid) 6075 l->max_pid = track->pid; 6076 6077 cpumask_set_cpu(track->cpu, 6078 to_cpumask(l->cpus)); 6079 } 6080 node_set(page_to_nid(virt_to_page(track)), l->nodes); 6081 return 1; 6082 } 6083 6084 if (track->addr < caddr) 6085 end = pos; 6086 else if (track->addr == caddr && handle < chandle) 6087 end = pos; 6088 else if (track->addr == caddr && handle == chandle && 6089 waste < cwaste) 6090 end = pos; 6091 else 6092 start = pos; 6093 } 6094 6095 /* 6096 * Not found. Insert new tracking element. 6097 */ 6098 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 6099 return 0; 6100 6101 l = t->loc + pos; 6102 if (pos < t->count) 6103 memmove(l + 1, l, 6104 (t->count - pos) * sizeof(struct location)); 6105 t->count++; 6106 l->count = 1; 6107 l->addr = track->addr; 6108 l->sum_time = age; 6109 l->min_time = age; 6110 l->max_time = age; 6111 l->min_pid = track->pid; 6112 l->max_pid = track->pid; 6113 l->handle = handle; 6114 l->waste = waste; 6115 cpumask_clear(to_cpumask(l->cpus)); 6116 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 6117 nodes_clear(l->nodes); 6118 node_set(page_to_nid(virt_to_page(track)), l->nodes); 6119 return 1; 6120 } 6121 6122 static void process_slab(struct loc_track *t, struct kmem_cache *s, 6123 struct slab *slab, enum track_item alloc, 6124 unsigned long *obj_map) 6125 { 6126 void *addr = slab_address(slab); 6127 bool is_alloc = (alloc == TRACK_ALLOC); 6128 void *p; 6129 6130 __fill_map(obj_map, s, slab); 6131 6132 for_each_object(p, s, addr, slab->objects) 6133 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 6134 add_location(t, s, get_track(s, p, alloc), 6135 is_alloc ? get_orig_size(s, p) : 6136 s->object_size); 6137 } 6138 #endif /* CONFIG_DEBUG_FS */ 6139 #endif /* CONFIG_SLUB_DEBUG */ 6140 6141 #ifdef SLAB_SUPPORTS_SYSFS 6142 enum slab_stat_type { 6143 SL_ALL, /* All slabs */ 6144 SL_PARTIAL, /* Only partially allocated slabs */ 6145 SL_CPU, /* Only slabs used for cpu caches */ 6146 SL_OBJECTS, /* Determine allocated objects not slabs */ 6147 SL_TOTAL /* Determine object capacity not slabs */ 6148 }; 6149 6150 #define SO_ALL (1 << SL_ALL) 6151 #define SO_PARTIAL (1 << SL_PARTIAL) 6152 #define SO_CPU (1 << SL_CPU) 6153 #define SO_OBJECTS (1 << SL_OBJECTS) 6154 #define SO_TOTAL (1 << SL_TOTAL) 6155 6156 static ssize_t show_slab_objects(struct kmem_cache *s, 6157 char *buf, unsigned long flags) 6158 { 6159 unsigned long total = 0; 6160 int node; 6161 int x; 6162 unsigned long *nodes; 6163 int len = 0; 6164 6165 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 6166 if (!nodes) 6167 return -ENOMEM; 6168 6169 if (flags & SO_CPU) { 6170 int cpu; 6171 6172 for_each_possible_cpu(cpu) { 6173 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 6174 cpu); 6175 int node; 6176 struct slab *slab; 6177 6178 slab = READ_ONCE(c->slab); 6179 if (!slab) 6180 continue; 6181 6182 node = slab_nid(slab); 6183 if (flags & SO_TOTAL) 6184 x = slab->objects; 6185 else if (flags & SO_OBJECTS) 6186 x = slab->inuse; 6187 else 6188 x = 1; 6189 6190 total += x; 6191 nodes[node] += x; 6192 6193 #ifdef CONFIG_SLUB_CPU_PARTIAL 6194 slab = slub_percpu_partial_read_once(c); 6195 if (slab) { 6196 node = slab_nid(slab); 6197 if (flags & SO_TOTAL) 6198 WARN_ON_ONCE(1); 6199 else if (flags & SO_OBJECTS) 6200 WARN_ON_ONCE(1); 6201 else 6202 x = data_race(slab->slabs); 6203 total += x; 6204 nodes[node] += x; 6205 } 6206 #endif 6207 } 6208 } 6209 6210 /* 6211 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 6212 * already held which will conflict with an existing lock order: 6213 * 6214 * mem_hotplug_lock->slab_mutex->kernfs_mutex 6215 * 6216 * We don't really need mem_hotplug_lock (to hold off 6217 * slab_mem_going_offline_callback) here because slab's memory hot 6218 * unplug code doesn't destroy the kmem_cache->node[] data. 6219 */ 6220 6221 #ifdef CONFIG_SLUB_DEBUG 6222 if (flags & SO_ALL) { 6223 struct kmem_cache_node *n; 6224 6225 for_each_kmem_cache_node(s, node, n) { 6226 6227 if (flags & SO_TOTAL) 6228 x = node_nr_objs(n); 6229 else if (flags & SO_OBJECTS) 6230 x = node_nr_objs(n) - count_partial(n, count_free); 6231 else 6232 x = node_nr_slabs(n); 6233 total += x; 6234 nodes[node] += x; 6235 } 6236 6237 } else 6238 #endif 6239 if (flags & SO_PARTIAL) { 6240 struct kmem_cache_node *n; 6241 6242 for_each_kmem_cache_node(s, node, n) { 6243 if (flags & SO_TOTAL) 6244 x = count_partial(n, count_total); 6245 else if (flags & SO_OBJECTS) 6246 x = count_partial(n, count_inuse); 6247 else 6248 x = n->nr_partial; 6249 total += x; 6250 nodes[node] += x; 6251 } 6252 } 6253 6254 len += sysfs_emit_at(buf, len, "%lu", total); 6255 #ifdef CONFIG_NUMA 6256 for (node = 0; node < nr_node_ids; node++) { 6257 if (nodes[node]) 6258 len += sysfs_emit_at(buf, len, " N%d=%lu", 6259 node, nodes[node]); 6260 } 6261 #endif 6262 len += sysfs_emit_at(buf, len, "\n"); 6263 kfree(nodes); 6264 6265 return len; 6266 } 6267 6268 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 6269 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 6270 6271 struct slab_attribute { 6272 struct attribute attr; 6273 ssize_t (*show)(struct kmem_cache *s, char *buf); 6274 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 6275 }; 6276 6277 #define SLAB_ATTR_RO(_name) \ 6278 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) 6279 6280 #define SLAB_ATTR(_name) \ 6281 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) 6282 6283 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 6284 { 6285 return sysfs_emit(buf, "%u\n", s->size); 6286 } 6287 SLAB_ATTR_RO(slab_size); 6288 6289 static ssize_t align_show(struct kmem_cache *s, char *buf) 6290 { 6291 return sysfs_emit(buf, "%u\n", s->align); 6292 } 6293 SLAB_ATTR_RO(align); 6294 6295 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 6296 { 6297 return sysfs_emit(buf, "%u\n", s->object_size); 6298 } 6299 SLAB_ATTR_RO(object_size); 6300 6301 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 6302 { 6303 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 6304 } 6305 SLAB_ATTR_RO(objs_per_slab); 6306 6307 static ssize_t order_show(struct kmem_cache *s, char *buf) 6308 { 6309 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 6310 } 6311 SLAB_ATTR_RO(order); 6312 6313 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 6314 { 6315 return sysfs_emit(buf, "%lu\n", s->min_partial); 6316 } 6317 6318 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 6319 size_t length) 6320 { 6321 unsigned long min; 6322 int err; 6323 6324 err = kstrtoul(buf, 10, &min); 6325 if (err) 6326 return err; 6327 6328 s->min_partial = min; 6329 return length; 6330 } 6331 SLAB_ATTR(min_partial); 6332 6333 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 6334 { 6335 unsigned int nr_partial = 0; 6336 #ifdef CONFIG_SLUB_CPU_PARTIAL 6337 nr_partial = s->cpu_partial; 6338 #endif 6339 6340 return sysfs_emit(buf, "%u\n", nr_partial); 6341 } 6342 6343 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 6344 size_t length) 6345 { 6346 unsigned int objects; 6347 int err; 6348 6349 err = kstrtouint(buf, 10, &objects); 6350 if (err) 6351 return err; 6352 if (objects && !kmem_cache_has_cpu_partial(s)) 6353 return -EINVAL; 6354 6355 slub_set_cpu_partial(s, objects); 6356 flush_all(s); 6357 return length; 6358 } 6359 SLAB_ATTR(cpu_partial); 6360 6361 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 6362 { 6363 if (!s->ctor) 6364 return 0; 6365 return sysfs_emit(buf, "%pS\n", s->ctor); 6366 } 6367 SLAB_ATTR_RO(ctor); 6368 6369 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 6370 { 6371 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 6372 } 6373 SLAB_ATTR_RO(aliases); 6374 6375 static ssize_t partial_show(struct kmem_cache *s, char *buf) 6376 { 6377 return show_slab_objects(s, buf, SO_PARTIAL); 6378 } 6379 SLAB_ATTR_RO(partial); 6380 6381 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 6382 { 6383 return show_slab_objects(s, buf, SO_CPU); 6384 } 6385 SLAB_ATTR_RO(cpu_slabs); 6386 6387 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 6388 { 6389 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 6390 } 6391 SLAB_ATTR_RO(objects_partial); 6392 6393 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 6394 { 6395 int objects = 0; 6396 int slabs = 0; 6397 int cpu __maybe_unused; 6398 int len = 0; 6399 6400 #ifdef CONFIG_SLUB_CPU_PARTIAL 6401 for_each_online_cpu(cpu) { 6402 struct slab *slab; 6403 6404 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6405 6406 if (slab) 6407 slabs += data_race(slab->slabs); 6408 } 6409 #endif 6410 6411 /* Approximate half-full slabs, see slub_set_cpu_partial() */ 6412 objects = (slabs * oo_objects(s->oo)) / 2; 6413 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); 6414 6415 #ifdef CONFIG_SLUB_CPU_PARTIAL 6416 for_each_online_cpu(cpu) { 6417 struct slab *slab; 6418 6419 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6420 if (slab) { 6421 slabs = data_race(slab->slabs); 6422 objects = (slabs * oo_objects(s->oo)) / 2; 6423 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 6424 cpu, objects, slabs); 6425 } 6426 } 6427 #endif 6428 len += sysfs_emit_at(buf, len, "\n"); 6429 6430 return len; 6431 } 6432 SLAB_ATTR_RO(slabs_cpu_partial); 6433 6434 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 6435 { 6436 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 6437 } 6438 SLAB_ATTR_RO(reclaim_account); 6439 6440 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 6441 { 6442 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 6443 } 6444 SLAB_ATTR_RO(hwcache_align); 6445 6446 #ifdef CONFIG_ZONE_DMA 6447 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 6448 { 6449 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 6450 } 6451 SLAB_ATTR_RO(cache_dma); 6452 #endif 6453 6454 #ifdef CONFIG_HARDENED_USERCOPY 6455 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 6456 { 6457 return sysfs_emit(buf, "%u\n", s->usersize); 6458 } 6459 SLAB_ATTR_RO(usersize); 6460 #endif 6461 6462 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 6463 { 6464 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 6465 } 6466 SLAB_ATTR_RO(destroy_by_rcu); 6467 6468 #ifdef CONFIG_SLUB_DEBUG 6469 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 6470 { 6471 return show_slab_objects(s, buf, SO_ALL); 6472 } 6473 SLAB_ATTR_RO(slabs); 6474 6475 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 6476 { 6477 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 6478 } 6479 SLAB_ATTR_RO(total_objects); 6480 6481 static ssize_t objects_show(struct kmem_cache *s, char *buf) 6482 { 6483 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 6484 } 6485 SLAB_ATTR_RO(objects); 6486 6487 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 6488 { 6489 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 6490 } 6491 SLAB_ATTR_RO(sanity_checks); 6492 6493 static ssize_t trace_show(struct kmem_cache *s, char *buf) 6494 { 6495 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 6496 } 6497 SLAB_ATTR_RO(trace); 6498 6499 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 6500 { 6501 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 6502 } 6503 6504 SLAB_ATTR_RO(red_zone); 6505 6506 static ssize_t poison_show(struct kmem_cache *s, char *buf) 6507 { 6508 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 6509 } 6510 6511 SLAB_ATTR_RO(poison); 6512 6513 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 6514 { 6515 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 6516 } 6517 6518 SLAB_ATTR_RO(store_user); 6519 6520 static ssize_t validate_show(struct kmem_cache *s, char *buf) 6521 { 6522 return 0; 6523 } 6524 6525 static ssize_t validate_store(struct kmem_cache *s, 6526 const char *buf, size_t length) 6527 { 6528 int ret = -EINVAL; 6529 6530 if (buf[0] == '1' && kmem_cache_debug(s)) { 6531 ret = validate_slab_cache(s); 6532 if (ret >= 0) 6533 ret = length; 6534 } 6535 return ret; 6536 } 6537 SLAB_ATTR(validate); 6538 6539 #endif /* CONFIG_SLUB_DEBUG */ 6540 6541 #ifdef CONFIG_FAILSLAB 6542 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 6543 { 6544 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 6545 } 6546 6547 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 6548 size_t length) 6549 { 6550 if (s->refcount > 1) 6551 return -EINVAL; 6552 6553 if (buf[0] == '1') 6554 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); 6555 else 6556 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); 6557 6558 return length; 6559 } 6560 SLAB_ATTR(failslab); 6561 #endif 6562 6563 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 6564 { 6565 return 0; 6566 } 6567 6568 static ssize_t shrink_store(struct kmem_cache *s, 6569 const char *buf, size_t length) 6570 { 6571 if (buf[0] == '1') 6572 kmem_cache_shrink(s); 6573 else 6574 return -EINVAL; 6575 return length; 6576 } 6577 SLAB_ATTR(shrink); 6578 6579 #ifdef CONFIG_NUMA 6580 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 6581 { 6582 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 6583 } 6584 6585 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 6586 const char *buf, size_t length) 6587 { 6588 unsigned int ratio; 6589 int err; 6590 6591 err = kstrtouint(buf, 10, &ratio); 6592 if (err) 6593 return err; 6594 if (ratio > 100) 6595 return -ERANGE; 6596 6597 s->remote_node_defrag_ratio = ratio * 10; 6598 6599 return length; 6600 } 6601 SLAB_ATTR(remote_node_defrag_ratio); 6602 #endif 6603 6604 #ifdef CONFIG_SLUB_STATS 6605 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 6606 { 6607 unsigned long sum = 0; 6608 int cpu; 6609 int len = 0; 6610 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 6611 6612 if (!data) 6613 return -ENOMEM; 6614 6615 for_each_online_cpu(cpu) { 6616 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 6617 6618 data[cpu] = x; 6619 sum += x; 6620 } 6621 6622 len += sysfs_emit_at(buf, len, "%lu", sum); 6623 6624 #ifdef CONFIG_SMP 6625 for_each_online_cpu(cpu) { 6626 if (data[cpu]) 6627 len += sysfs_emit_at(buf, len, " C%d=%u", 6628 cpu, data[cpu]); 6629 } 6630 #endif 6631 kfree(data); 6632 len += sysfs_emit_at(buf, len, "\n"); 6633 6634 return len; 6635 } 6636 6637 static void clear_stat(struct kmem_cache *s, enum stat_item si) 6638 { 6639 int cpu; 6640 6641 for_each_online_cpu(cpu) 6642 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 6643 } 6644 6645 #define STAT_ATTR(si, text) \ 6646 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 6647 { \ 6648 return show_stat(s, buf, si); \ 6649 } \ 6650 static ssize_t text##_store(struct kmem_cache *s, \ 6651 const char *buf, size_t length) \ 6652 { \ 6653 if (buf[0] != '0') \ 6654 return -EINVAL; \ 6655 clear_stat(s, si); \ 6656 return length; \ 6657 } \ 6658 SLAB_ATTR(text); \ 6659 6660 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 6661 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 6662 STAT_ATTR(FREE_FASTPATH, free_fastpath); 6663 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 6664 STAT_ATTR(FREE_FROZEN, free_frozen); 6665 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 6666 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 6667 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 6668 STAT_ATTR(ALLOC_SLAB, alloc_slab); 6669 STAT_ATTR(ALLOC_REFILL, alloc_refill); 6670 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 6671 STAT_ATTR(FREE_SLAB, free_slab); 6672 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 6673 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 6674 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 6675 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 6676 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 6677 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 6678 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 6679 STAT_ATTR(ORDER_FALLBACK, order_fallback); 6680 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 6681 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 6682 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 6683 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 6684 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 6685 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 6686 #endif /* CONFIG_SLUB_STATS */ 6687 6688 #ifdef CONFIG_KFENCE 6689 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) 6690 { 6691 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); 6692 } 6693 6694 static ssize_t skip_kfence_store(struct kmem_cache *s, 6695 const char *buf, size_t length) 6696 { 6697 int ret = length; 6698 6699 if (buf[0] == '0') 6700 s->flags &= ~SLAB_SKIP_KFENCE; 6701 else if (buf[0] == '1') 6702 s->flags |= SLAB_SKIP_KFENCE; 6703 else 6704 ret = -EINVAL; 6705 6706 return ret; 6707 } 6708 SLAB_ATTR(skip_kfence); 6709 #endif 6710 6711 static struct attribute *slab_attrs[] = { 6712 &slab_size_attr.attr, 6713 &object_size_attr.attr, 6714 &objs_per_slab_attr.attr, 6715 &order_attr.attr, 6716 &min_partial_attr.attr, 6717 &cpu_partial_attr.attr, 6718 &objects_partial_attr.attr, 6719 &partial_attr.attr, 6720 &cpu_slabs_attr.attr, 6721 &ctor_attr.attr, 6722 &aliases_attr.attr, 6723 &align_attr.attr, 6724 &hwcache_align_attr.attr, 6725 &reclaim_account_attr.attr, 6726 &destroy_by_rcu_attr.attr, 6727 &shrink_attr.attr, 6728 &slabs_cpu_partial_attr.attr, 6729 #ifdef CONFIG_SLUB_DEBUG 6730 &total_objects_attr.attr, 6731 &objects_attr.attr, 6732 &slabs_attr.attr, 6733 &sanity_checks_attr.attr, 6734 &trace_attr.attr, 6735 &red_zone_attr.attr, 6736 &poison_attr.attr, 6737 &store_user_attr.attr, 6738 &validate_attr.attr, 6739 #endif 6740 #ifdef CONFIG_ZONE_DMA 6741 &cache_dma_attr.attr, 6742 #endif 6743 #ifdef CONFIG_NUMA 6744 &remote_node_defrag_ratio_attr.attr, 6745 #endif 6746 #ifdef CONFIG_SLUB_STATS 6747 &alloc_fastpath_attr.attr, 6748 &alloc_slowpath_attr.attr, 6749 &free_fastpath_attr.attr, 6750 &free_slowpath_attr.attr, 6751 &free_frozen_attr.attr, 6752 &free_add_partial_attr.attr, 6753 &free_remove_partial_attr.attr, 6754 &alloc_from_partial_attr.attr, 6755 &alloc_slab_attr.attr, 6756 &alloc_refill_attr.attr, 6757 &alloc_node_mismatch_attr.attr, 6758 &free_slab_attr.attr, 6759 &cpuslab_flush_attr.attr, 6760 &deactivate_full_attr.attr, 6761 &deactivate_empty_attr.attr, 6762 &deactivate_to_head_attr.attr, 6763 &deactivate_to_tail_attr.attr, 6764 &deactivate_remote_frees_attr.attr, 6765 &deactivate_bypass_attr.attr, 6766 &order_fallback_attr.attr, 6767 &cmpxchg_double_fail_attr.attr, 6768 &cmpxchg_double_cpu_fail_attr.attr, 6769 &cpu_partial_alloc_attr.attr, 6770 &cpu_partial_free_attr.attr, 6771 &cpu_partial_node_attr.attr, 6772 &cpu_partial_drain_attr.attr, 6773 #endif 6774 #ifdef CONFIG_FAILSLAB 6775 &failslab_attr.attr, 6776 #endif 6777 #ifdef CONFIG_HARDENED_USERCOPY 6778 &usersize_attr.attr, 6779 #endif 6780 #ifdef CONFIG_KFENCE 6781 &skip_kfence_attr.attr, 6782 #endif 6783 6784 NULL 6785 }; 6786 6787 static const struct attribute_group slab_attr_group = { 6788 .attrs = slab_attrs, 6789 }; 6790 6791 static ssize_t slab_attr_show(struct kobject *kobj, 6792 struct attribute *attr, 6793 char *buf) 6794 { 6795 struct slab_attribute *attribute; 6796 struct kmem_cache *s; 6797 6798 attribute = to_slab_attr(attr); 6799 s = to_slab(kobj); 6800 6801 if (!attribute->show) 6802 return -EIO; 6803 6804 return attribute->show(s, buf); 6805 } 6806 6807 static ssize_t slab_attr_store(struct kobject *kobj, 6808 struct attribute *attr, 6809 const char *buf, size_t len) 6810 { 6811 struct slab_attribute *attribute; 6812 struct kmem_cache *s; 6813 6814 attribute = to_slab_attr(attr); 6815 s = to_slab(kobj); 6816 6817 if (!attribute->store) 6818 return -EIO; 6819 6820 return attribute->store(s, buf, len); 6821 } 6822 6823 static void kmem_cache_release(struct kobject *k) 6824 { 6825 slab_kmem_cache_release(to_slab(k)); 6826 } 6827 6828 static const struct sysfs_ops slab_sysfs_ops = { 6829 .show = slab_attr_show, 6830 .store = slab_attr_store, 6831 }; 6832 6833 static const struct kobj_type slab_ktype = { 6834 .sysfs_ops = &slab_sysfs_ops, 6835 .release = kmem_cache_release, 6836 }; 6837 6838 static struct kset *slab_kset; 6839 6840 static inline struct kset *cache_kset(struct kmem_cache *s) 6841 { 6842 return slab_kset; 6843 } 6844 6845 #define ID_STR_LENGTH 32 6846 6847 /* Create a unique string id for a slab cache: 6848 * 6849 * Format :[flags-]size 6850 */ 6851 static char *create_unique_id(struct kmem_cache *s) 6852 { 6853 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 6854 char *p = name; 6855 6856 if (!name) 6857 return ERR_PTR(-ENOMEM); 6858 6859 *p++ = ':'; 6860 /* 6861 * First flags affecting slabcache operations. We will only 6862 * get here for aliasable slabs so we do not need to support 6863 * too many flags. The flags here must cover all flags that 6864 * are matched during merging to guarantee that the id is 6865 * unique. 6866 */ 6867 if (s->flags & SLAB_CACHE_DMA) 6868 *p++ = 'd'; 6869 if (s->flags & SLAB_CACHE_DMA32) 6870 *p++ = 'D'; 6871 if (s->flags & SLAB_RECLAIM_ACCOUNT) 6872 *p++ = 'a'; 6873 if (s->flags & SLAB_CONSISTENCY_CHECKS) 6874 *p++ = 'F'; 6875 if (s->flags & SLAB_ACCOUNT) 6876 *p++ = 'A'; 6877 if (p != name + 1) 6878 *p++ = '-'; 6879 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); 6880 6881 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { 6882 kfree(name); 6883 return ERR_PTR(-EINVAL); 6884 } 6885 kmsan_unpoison_memory(name, p - name); 6886 return name; 6887 } 6888 6889 static int sysfs_slab_add(struct kmem_cache *s) 6890 { 6891 int err; 6892 const char *name; 6893 struct kset *kset = cache_kset(s); 6894 int unmergeable = slab_unmergeable(s); 6895 6896 if (!unmergeable && disable_higher_order_debug && 6897 (slub_debug & DEBUG_METADATA_FLAGS)) 6898 unmergeable = 1; 6899 6900 if (unmergeable) { 6901 /* 6902 * Slabcache can never be merged so we can use the name proper. 6903 * This is typically the case for debug situations. In that 6904 * case we can catch duplicate names easily. 6905 */ 6906 sysfs_remove_link(&slab_kset->kobj, s->name); 6907 name = s->name; 6908 } else { 6909 /* 6910 * Create a unique name for the slab as a target 6911 * for the symlinks. 6912 */ 6913 name = create_unique_id(s); 6914 if (IS_ERR(name)) 6915 return PTR_ERR(name); 6916 } 6917 6918 s->kobj.kset = kset; 6919 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 6920 if (err) 6921 goto out; 6922 6923 err = sysfs_create_group(&s->kobj, &slab_attr_group); 6924 if (err) 6925 goto out_del_kobj; 6926 6927 if (!unmergeable) { 6928 /* Setup first alias */ 6929 sysfs_slab_alias(s, s->name); 6930 } 6931 out: 6932 if (!unmergeable) 6933 kfree(name); 6934 return err; 6935 out_del_kobj: 6936 kobject_del(&s->kobj); 6937 goto out; 6938 } 6939 6940 void sysfs_slab_unlink(struct kmem_cache *s) 6941 { 6942 kobject_del(&s->kobj); 6943 } 6944 6945 void sysfs_slab_release(struct kmem_cache *s) 6946 { 6947 kobject_put(&s->kobj); 6948 } 6949 6950 /* 6951 * Need to buffer aliases during bootup until sysfs becomes 6952 * available lest we lose that information. 6953 */ 6954 struct saved_alias { 6955 struct kmem_cache *s; 6956 const char *name; 6957 struct saved_alias *next; 6958 }; 6959 6960 static struct saved_alias *alias_list; 6961 6962 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 6963 { 6964 struct saved_alias *al; 6965 6966 if (slab_state == FULL) { 6967 /* 6968 * If we have a leftover link then remove it. 6969 */ 6970 sysfs_remove_link(&slab_kset->kobj, name); 6971 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 6972 } 6973 6974 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 6975 if (!al) 6976 return -ENOMEM; 6977 6978 al->s = s; 6979 al->name = name; 6980 al->next = alias_list; 6981 alias_list = al; 6982 kmsan_unpoison_memory(al, sizeof(*al)); 6983 return 0; 6984 } 6985 6986 static int __init slab_sysfs_init(void) 6987 { 6988 struct kmem_cache *s; 6989 int err; 6990 6991 mutex_lock(&slab_mutex); 6992 6993 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 6994 if (!slab_kset) { 6995 mutex_unlock(&slab_mutex); 6996 pr_err("Cannot register slab subsystem.\n"); 6997 return -ENOMEM; 6998 } 6999 7000 slab_state = FULL; 7001 7002 list_for_each_entry(s, &slab_caches, list) { 7003 err = sysfs_slab_add(s); 7004 if (err) 7005 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 7006 s->name); 7007 } 7008 7009 while (alias_list) { 7010 struct saved_alias *al = alias_list; 7011 7012 alias_list = alias_list->next; 7013 err = sysfs_slab_alias(al->s, al->name); 7014 if (err) 7015 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 7016 al->name); 7017 kfree(al); 7018 } 7019 7020 mutex_unlock(&slab_mutex); 7021 return 0; 7022 } 7023 late_initcall(slab_sysfs_init); 7024 #endif /* SLAB_SUPPORTS_SYSFS */ 7025 7026 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 7027 static int slab_debugfs_show(struct seq_file *seq, void *v) 7028 { 7029 struct loc_track *t = seq->private; 7030 struct location *l; 7031 unsigned long idx; 7032 7033 idx = (unsigned long) t->idx; 7034 if (idx < t->count) { 7035 l = &t->loc[idx]; 7036 7037 seq_printf(seq, "%7ld ", l->count); 7038 7039 if (l->addr) 7040 seq_printf(seq, "%pS", (void *)l->addr); 7041 else 7042 seq_puts(seq, "<not-available>"); 7043 7044 if (l->waste) 7045 seq_printf(seq, " waste=%lu/%lu", 7046 l->count * l->waste, l->waste); 7047 7048 if (l->sum_time != l->min_time) { 7049 seq_printf(seq, " age=%ld/%llu/%ld", 7050 l->min_time, div_u64(l->sum_time, l->count), 7051 l->max_time); 7052 } else 7053 seq_printf(seq, " age=%ld", l->min_time); 7054 7055 if (l->min_pid != l->max_pid) 7056 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 7057 else 7058 seq_printf(seq, " pid=%ld", 7059 l->min_pid); 7060 7061 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 7062 seq_printf(seq, " cpus=%*pbl", 7063 cpumask_pr_args(to_cpumask(l->cpus))); 7064 7065 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 7066 seq_printf(seq, " nodes=%*pbl", 7067 nodemask_pr_args(&l->nodes)); 7068 7069 #ifdef CONFIG_STACKDEPOT 7070 { 7071 depot_stack_handle_t handle; 7072 unsigned long *entries; 7073 unsigned int nr_entries, j; 7074 7075 handle = READ_ONCE(l->handle); 7076 if (handle) { 7077 nr_entries = stack_depot_fetch(handle, &entries); 7078 seq_puts(seq, "\n"); 7079 for (j = 0; j < nr_entries; j++) 7080 seq_printf(seq, " %pS\n", (void *)entries[j]); 7081 } 7082 } 7083 #endif 7084 seq_puts(seq, "\n"); 7085 } 7086 7087 if (!idx && !t->count) 7088 seq_puts(seq, "No data\n"); 7089 7090 return 0; 7091 } 7092 7093 static void slab_debugfs_stop(struct seq_file *seq, void *v) 7094 { 7095 } 7096 7097 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 7098 { 7099 struct loc_track *t = seq->private; 7100 7101 t->idx = ++(*ppos); 7102 if (*ppos <= t->count) 7103 return ppos; 7104 7105 return NULL; 7106 } 7107 7108 static int cmp_loc_by_count(const void *a, const void *b, const void *data) 7109 { 7110 struct location *loc1 = (struct location *)a; 7111 struct location *loc2 = (struct location *)b; 7112 7113 if (loc1->count > loc2->count) 7114 return -1; 7115 else 7116 return 1; 7117 } 7118 7119 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 7120 { 7121 struct loc_track *t = seq->private; 7122 7123 t->idx = *ppos; 7124 return ppos; 7125 } 7126 7127 static const struct seq_operations slab_debugfs_sops = { 7128 .start = slab_debugfs_start, 7129 .next = slab_debugfs_next, 7130 .stop = slab_debugfs_stop, 7131 .show = slab_debugfs_show, 7132 }; 7133 7134 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 7135 { 7136 7137 struct kmem_cache_node *n; 7138 enum track_item alloc; 7139 int node; 7140 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 7141 sizeof(struct loc_track)); 7142 struct kmem_cache *s = file_inode(filep)->i_private; 7143 unsigned long *obj_map; 7144 7145 if (!t) 7146 return -ENOMEM; 7147 7148 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 7149 if (!obj_map) { 7150 seq_release_private(inode, filep); 7151 return -ENOMEM; 7152 } 7153 7154 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 7155 alloc = TRACK_ALLOC; 7156 else 7157 alloc = TRACK_FREE; 7158 7159 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 7160 bitmap_free(obj_map); 7161 seq_release_private(inode, filep); 7162 return -ENOMEM; 7163 } 7164 7165 for_each_kmem_cache_node(s, node, n) { 7166 unsigned long flags; 7167 struct slab *slab; 7168 7169 if (!node_nr_slabs(n)) 7170 continue; 7171 7172 spin_lock_irqsave(&n->list_lock, flags); 7173 list_for_each_entry(slab, &n->partial, slab_list) 7174 process_slab(t, s, slab, alloc, obj_map); 7175 list_for_each_entry(slab, &n->full, slab_list) 7176 process_slab(t, s, slab, alloc, obj_map); 7177 spin_unlock_irqrestore(&n->list_lock, flags); 7178 } 7179 7180 /* Sort locations by count */ 7181 sort_r(t->loc, t->count, sizeof(struct location), 7182 cmp_loc_by_count, NULL, NULL); 7183 7184 bitmap_free(obj_map); 7185 return 0; 7186 } 7187 7188 static int slab_debug_trace_release(struct inode *inode, struct file *file) 7189 { 7190 struct seq_file *seq = file->private_data; 7191 struct loc_track *t = seq->private; 7192 7193 free_loc_track(t); 7194 return seq_release_private(inode, file); 7195 } 7196 7197 static const struct file_operations slab_debugfs_fops = { 7198 .open = slab_debug_trace_open, 7199 .read = seq_read, 7200 .llseek = seq_lseek, 7201 .release = slab_debug_trace_release, 7202 }; 7203 7204 static void debugfs_slab_add(struct kmem_cache *s) 7205 { 7206 struct dentry *slab_cache_dir; 7207 7208 if (unlikely(!slab_debugfs_root)) 7209 return; 7210 7211 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 7212 7213 debugfs_create_file("alloc_traces", 0400, 7214 slab_cache_dir, s, &slab_debugfs_fops); 7215 7216 debugfs_create_file("free_traces", 0400, 7217 slab_cache_dir, s, &slab_debugfs_fops); 7218 } 7219 7220 void debugfs_slab_release(struct kmem_cache *s) 7221 { 7222 debugfs_lookup_and_remove(s->name, slab_debugfs_root); 7223 } 7224 7225 static int __init slab_debugfs_init(void) 7226 { 7227 struct kmem_cache *s; 7228 7229 slab_debugfs_root = debugfs_create_dir("slab", NULL); 7230 7231 list_for_each_entry(s, &slab_caches, list) 7232 if (s->flags & SLAB_STORE_USER) 7233 debugfs_slab_add(s); 7234 7235 return 0; 7236 7237 } 7238 __initcall(slab_debugfs_init); 7239 #endif 7240 /* 7241 * The /proc/slabinfo ABI 7242 */ 7243 #ifdef CONFIG_SLUB_DEBUG 7244 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 7245 { 7246 unsigned long nr_slabs = 0; 7247 unsigned long nr_objs = 0; 7248 unsigned long nr_free = 0; 7249 int node; 7250 struct kmem_cache_node *n; 7251 7252 for_each_kmem_cache_node(s, node, n) { 7253 nr_slabs += node_nr_slabs(n); 7254 nr_objs += node_nr_objs(n); 7255 nr_free += count_partial_free_approx(n); 7256 } 7257 7258 sinfo->active_objs = nr_objs - nr_free; 7259 sinfo->num_objs = nr_objs; 7260 sinfo->active_slabs = nr_slabs; 7261 sinfo->num_slabs = nr_slabs; 7262 sinfo->objects_per_slab = oo_objects(s->oo); 7263 sinfo->cache_order = oo_order(s->oo); 7264 } 7265 #endif /* CONFIG_SLUB_DEBUG */ 7266