1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/kmsan.h> 26 #include <linux/cpu.h> 27 #include <linux/cpuset.h> 28 #include <linux/mempolicy.h> 29 #include <linux/ctype.h> 30 #include <linux/stackdepot.h> 31 #include <linux/debugobjects.h> 32 #include <linux/kallsyms.h> 33 #include <linux/kfence.h> 34 #include <linux/memory.h> 35 #include <linux/math64.h> 36 #include <linux/fault-inject.h> 37 #include <linux/kmemleak.h> 38 #include <linux/stacktrace.h> 39 #include <linux/prefetch.h> 40 #include <linux/memcontrol.h> 41 #include <linux/random.h> 42 #include <kunit/test.h> 43 #include <kunit/test-bug.h> 44 #include <linux/sort.h> 45 46 #include <linux/debugfs.h> 47 #include <trace/events/kmem.h> 48 49 #include "internal.h" 50 51 /* 52 * Lock order: 53 * 1. slab_mutex (Global Mutex) 54 * 2. node->list_lock (Spinlock) 55 * 3. kmem_cache->cpu_slab->lock (Local lock) 56 * 4. slab_lock(slab) (Only on some arches) 57 * 5. object_map_lock (Only for debugging) 58 * 59 * slab_mutex 60 * 61 * The role of the slab_mutex is to protect the list of all the slabs 62 * and to synchronize major metadata changes to slab cache structures. 63 * Also synchronizes memory hotplug callbacks. 64 * 65 * slab_lock 66 * 67 * The slab_lock is a wrapper around the page lock, thus it is a bit 68 * spinlock. 69 * 70 * The slab_lock is only used on arches that do not have the ability 71 * to do a cmpxchg_double. It only protects: 72 * 73 * A. slab->freelist -> List of free objects in a slab 74 * B. slab->inuse -> Number of objects in use 75 * C. slab->objects -> Number of objects in slab 76 * D. slab->frozen -> frozen state 77 * 78 * Frozen slabs 79 * 80 * If a slab is frozen then it is exempt from list management. It is 81 * the cpu slab which is actively allocated from by the processor that 82 * froze it and it is not on any list. The processor that froze the 83 * slab is the one who can perform list operations on the slab. Other 84 * processors may put objects onto the freelist but the processor that 85 * froze the slab is the only one that can retrieve the objects from the 86 * slab's freelist. 87 * 88 * CPU partial slabs 89 * 90 * The partially empty slabs cached on the CPU partial list are used 91 * for performance reasons, which speeds up the allocation process. 92 * These slabs are not frozen, but are also exempt from list management, 93 * by clearing the PG_workingset flag when moving out of the node 94 * partial list. Please see __slab_free() for more details. 95 * 96 * To sum up, the current scheme is: 97 * - node partial slab: PG_Workingset && !frozen 98 * - cpu partial slab: !PG_Workingset && !frozen 99 * - cpu slab: !PG_Workingset && frozen 100 * - full slab: !PG_Workingset && !frozen 101 * 102 * list_lock 103 * 104 * The list_lock protects the partial and full list on each node and 105 * the partial slab counter. If taken then no new slabs may be added or 106 * removed from the lists nor make the number of partial slabs be modified. 107 * (Note that the total number of slabs is an atomic value that may be 108 * modified without taking the list lock). 109 * 110 * The list_lock is a centralized lock and thus we avoid taking it as 111 * much as possible. As long as SLUB does not have to handle partial 112 * slabs, operations can continue without any centralized lock. F.e. 113 * allocating a long series of objects that fill up slabs does not require 114 * the list lock. 115 * 116 * For debug caches, all allocations are forced to go through a list_lock 117 * protected region to serialize against concurrent validation. 118 * 119 * cpu_slab->lock local lock 120 * 121 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 122 * except the stat counters. This is a percpu structure manipulated only by 123 * the local cpu, so the lock protects against being preempted or interrupted 124 * by an irq. Fast path operations rely on lockless operations instead. 125 * 126 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption 127 * which means the lockless fastpath cannot be used as it might interfere with 128 * an in-progress slow path operations. In this case the local lock is always 129 * taken but it still utilizes the freelist for the common operations. 130 * 131 * lockless fastpaths 132 * 133 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 134 * are fully lockless when satisfied from the percpu slab (and when 135 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 136 * They also don't disable preemption or migration or irqs. They rely on 137 * the transaction id (tid) field to detect being preempted or moved to 138 * another cpu. 139 * 140 * irq, preemption, migration considerations 141 * 142 * Interrupts are disabled as part of list_lock or local_lock operations, or 143 * around the slab_lock operation, in order to make the slab allocator safe 144 * to use in the context of an irq. 145 * 146 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 147 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 148 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 149 * doesn't have to be revalidated in each section protected by the local lock. 150 * 151 * SLUB assigns one slab for allocation to each processor. 152 * Allocations only occur from these slabs called cpu slabs. 153 * 154 * Slabs with free elements are kept on a partial list and during regular 155 * operations no list for full slabs is used. If an object in a full slab is 156 * freed then the slab will show up again on the partial lists. 157 * We track full slabs for debugging purposes though because otherwise we 158 * cannot scan all objects. 159 * 160 * Slabs are freed when they become empty. Teardown and setup is 161 * minimal so we rely on the page allocators per cpu caches for 162 * fast frees and allocs. 163 * 164 * slab->frozen The slab is frozen and exempt from list processing. 165 * This means that the slab is dedicated to a purpose 166 * such as satisfying allocations for a specific 167 * processor. Objects may be freed in the slab while 168 * it is frozen but slab_free will then skip the usual 169 * list operations. It is up to the processor holding 170 * the slab to integrate the slab into the slab lists 171 * when the slab is no longer needed. 172 * 173 * One use of this flag is to mark slabs that are 174 * used for allocations. Then such a slab becomes a cpu 175 * slab. The cpu slab may be equipped with an additional 176 * freelist that allows lockless access to 177 * free objects in addition to the regular freelist 178 * that requires the slab lock. 179 * 180 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 181 * options set. This moves slab handling out of 182 * the fast path and disables lockless freelists. 183 */ 184 185 /* 186 * We could simply use migrate_disable()/enable() but as long as it's a 187 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 188 */ 189 #ifndef CONFIG_PREEMPT_RT 190 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 191 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 192 #define USE_LOCKLESS_FAST_PATH() (true) 193 #else 194 #define slub_get_cpu_ptr(var) \ 195 ({ \ 196 migrate_disable(); \ 197 this_cpu_ptr(var); \ 198 }) 199 #define slub_put_cpu_ptr(var) \ 200 do { \ 201 (void)(var); \ 202 migrate_enable(); \ 203 } while (0) 204 #define USE_LOCKLESS_FAST_PATH() (false) 205 #endif 206 207 #ifndef CONFIG_SLUB_TINY 208 #define __fastpath_inline __always_inline 209 #else 210 #define __fastpath_inline 211 #endif 212 213 #ifdef CONFIG_SLUB_DEBUG 214 #ifdef CONFIG_SLUB_DEBUG_ON 215 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 216 #else 217 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 218 #endif 219 #endif /* CONFIG_SLUB_DEBUG */ 220 221 /* Structure holding parameters for get_partial() call chain */ 222 struct partial_context { 223 gfp_t flags; 224 unsigned int orig_size; 225 void *object; 226 }; 227 228 static inline bool kmem_cache_debug(struct kmem_cache *s) 229 { 230 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 231 } 232 233 static inline bool slub_debug_orig_size(struct kmem_cache *s) 234 { 235 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) && 236 (s->flags & SLAB_KMALLOC)); 237 } 238 239 void *fixup_red_left(struct kmem_cache *s, void *p) 240 { 241 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 242 p += s->red_left_pad; 243 244 return p; 245 } 246 247 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 248 { 249 #ifdef CONFIG_SLUB_CPU_PARTIAL 250 return !kmem_cache_debug(s); 251 #else 252 return false; 253 #endif 254 } 255 256 /* 257 * Issues still to be resolved: 258 * 259 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 260 * 261 * - Variable sizing of the per node arrays 262 */ 263 264 /* Enable to log cmpxchg failures */ 265 #undef SLUB_DEBUG_CMPXCHG 266 267 #ifndef CONFIG_SLUB_TINY 268 /* 269 * Minimum number of partial slabs. These will be left on the partial 270 * lists even if they are empty. kmem_cache_shrink may reclaim them. 271 */ 272 #define MIN_PARTIAL 5 273 274 /* 275 * Maximum number of desirable partial slabs. 276 * The existence of more partial slabs makes kmem_cache_shrink 277 * sort the partial list by the number of objects in use. 278 */ 279 #define MAX_PARTIAL 10 280 #else 281 #define MIN_PARTIAL 0 282 #define MAX_PARTIAL 0 283 #endif 284 285 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 286 SLAB_POISON | SLAB_STORE_USER) 287 288 /* 289 * These debug flags cannot use CMPXCHG because there might be consistency 290 * issues when checking or reading debug information 291 */ 292 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 293 SLAB_TRACE) 294 295 296 /* 297 * Debugging flags that require metadata to be stored in the slab. These get 298 * disabled when slab_debug=O is used and a cache's min order increases with 299 * metadata. 300 */ 301 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 302 303 #define OO_SHIFT 16 304 #define OO_MASK ((1 << OO_SHIFT) - 1) 305 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ 306 307 /* Internal SLUB flags */ 308 /* Poison object */ 309 #define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON) 310 /* Use cmpxchg_double */ 311 312 #ifdef system_has_freelist_aba 313 #define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE) 314 #else 315 #define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED 316 #endif 317 318 /* 319 * Tracking user of a slab. 320 */ 321 #define TRACK_ADDRS_COUNT 16 322 struct track { 323 unsigned long addr; /* Called from address */ 324 #ifdef CONFIG_STACKDEPOT 325 depot_stack_handle_t handle; 326 #endif 327 int cpu; /* Was running on cpu */ 328 int pid; /* Pid context */ 329 unsigned long when; /* When did the operation occur */ 330 }; 331 332 enum track_item { TRACK_ALLOC, TRACK_FREE }; 333 334 #ifdef SLAB_SUPPORTS_SYSFS 335 static int sysfs_slab_add(struct kmem_cache *); 336 static int sysfs_slab_alias(struct kmem_cache *, const char *); 337 #else 338 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 339 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 340 { return 0; } 341 #endif 342 343 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 344 static void debugfs_slab_add(struct kmem_cache *); 345 #else 346 static inline void debugfs_slab_add(struct kmem_cache *s) { } 347 #endif 348 349 enum stat_item { 350 ALLOC_FASTPATH, /* Allocation from cpu slab */ 351 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 352 FREE_FASTPATH, /* Free to cpu slab */ 353 FREE_SLOWPATH, /* Freeing not to cpu slab */ 354 FREE_FROZEN, /* Freeing to frozen slab */ 355 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 356 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 357 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ 358 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 359 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 360 ALLOC_NODE_MISMATCH, /* Switching cpu slab */ 361 FREE_SLAB, /* Slab freed to the page allocator */ 362 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 363 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 364 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 365 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 366 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 367 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 368 DEACTIVATE_BYPASS, /* Implicit deactivation */ 369 ORDER_FALLBACK, /* Number of times fallback was necessary */ 370 CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */ 371 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */ 372 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ 373 CPU_PARTIAL_FREE, /* Refill cpu partial on free */ 374 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ 375 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ 376 NR_SLUB_STAT_ITEMS 377 }; 378 379 #ifndef CONFIG_SLUB_TINY 380 /* 381 * When changing the layout, make sure freelist and tid are still compatible 382 * with this_cpu_cmpxchg_double() alignment requirements. 383 */ 384 struct kmem_cache_cpu { 385 union { 386 struct { 387 void **freelist; /* Pointer to next available object */ 388 unsigned long tid; /* Globally unique transaction id */ 389 }; 390 freelist_aba_t freelist_tid; 391 }; 392 struct slab *slab; /* The slab from which we are allocating */ 393 #ifdef CONFIG_SLUB_CPU_PARTIAL 394 struct slab *partial; /* Partially allocated slabs */ 395 #endif 396 local_lock_t lock; /* Protects the fields above */ 397 #ifdef CONFIG_SLUB_STATS 398 unsigned int stat[NR_SLUB_STAT_ITEMS]; 399 #endif 400 }; 401 #endif /* CONFIG_SLUB_TINY */ 402 403 static inline void stat(const struct kmem_cache *s, enum stat_item si) 404 { 405 #ifdef CONFIG_SLUB_STATS 406 /* 407 * The rmw is racy on a preemptible kernel but this is acceptable, so 408 * avoid this_cpu_add()'s irq-disable overhead. 409 */ 410 raw_cpu_inc(s->cpu_slab->stat[si]); 411 #endif 412 } 413 414 static inline 415 void stat_add(const struct kmem_cache *s, enum stat_item si, int v) 416 { 417 #ifdef CONFIG_SLUB_STATS 418 raw_cpu_add(s->cpu_slab->stat[si], v); 419 #endif 420 } 421 422 /* 423 * The slab lists for all objects. 424 */ 425 struct kmem_cache_node { 426 spinlock_t list_lock; 427 unsigned long nr_partial; 428 struct list_head partial; 429 #ifdef CONFIG_SLUB_DEBUG 430 atomic_long_t nr_slabs; 431 atomic_long_t total_objects; 432 struct list_head full; 433 #endif 434 }; 435 436 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 437 { 438 return s->node[node]; 439 } 440 441 /* 442 * Iterator over all nodes. The body will be executed for each node that has 443 * a kmem_cache_node structure allocated (which is true for all online nodes) 444 */ 445 #define for_each_kmem_cache_node(__s, __node, __n) \ 446 for (__node = 0; __node < nr_node_ids; __node++) \ 447 if ((__n = get_node(__s, __node))) 448 449 /* 450 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 451 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 452 * differ during memory hotplug/hotremove operations. 453 * Protected by slab_mutex. 454 */ 455 static nodemask_t slab_nodes; 456 457 #ifndef CONFIG_SLUB_TINY 458 /* 459 * Workqueue used for flush_cpu_slab(). 460 */ 461 static struct workqueue_struct *flushwq; 462 #endif 463 464 /******************************************************************** 465 * Core slab cache functions 466 *******************************************************************/ 467 468 /* 469 * freeptr_t represents a SLUB freelist pointer, which might be encoded 470 * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. 471 */ 472 typedef struct { unsigned long v; } freeptr_t; 473 474 /* 475 * Returns freelist pointer (ptr). With hardening, this is obfuscated 476 * with an XOR of the address where the pointer is held and a per-cache 477 * random number. 478 */ 479 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s, 480 void *ptr, unsigned long ptr_addr) 481 { 482 unsigned long encoded; 483 484 #ifdef CONFIG_SLAB_FREELIST_HARDENED 485 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); 486 #else 487 encoded = (unsigned long)ptr; 488 #endif 489 return (freeptr_t){.v = encoded}; 490 } 491 492 static inline void *freelist_ptr_decode(const struct kmem_cache *s, 493 freeptr_t ptr, unsigned long ptr_addr) 494 { 495 void *decoded; 496 497 #ifdef CONFIG_SLAB_FREELIST_HARDENED 498 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); 499 #else 500 decoded = (void *)ptr.v; 501 #endif 502 return decoded; 503 } 504 505 static inline void *get_freepointer(struct kmem_cache *s, void *object) 506 { 507 unsigned long ptr_addr; 508 freeptr_t p; 509 510 object = kasan_reset_tag(object); 511 ptr_addr = (unsigned long)object + s->offset; 512 p = *(freeptr_t *)(ptr_addr); 513 return freelist_ptr_decode(s, p, ptr_addr); 514 } 515 516 #ifndef CONFIG_SLUB_TINY 517 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 518 { 519 prefetchw(object + s->offset); 520 } 521 #endif 522 523 /* 524 * When running under KMSAN, get_freepointer_safe() may return an uninitialized 525 * pointer value in the case the current thread loses the race for the next 526 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in 527 * slab_alloc_node() will fail, so the uninitialized value won't be used, but 528 * KMSAN will still check all arguments of cmpxchg because of imperfect 529 * handling of inline assembly. 530 * To work around this problem, we apply __no_kmsan_checks to ensure that 531 * get_freepointer_safe() returns initialized memory. 532 */ 533 __no_kmsan_checks 534 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 535 { 536 unsigned long freepointer_addr; 537 freeptr_t p; 538 539 if (!debug_pagealloc_enabled_static()) 540 return get_freepointer(s, object); 541 542 object = kasan_reset_tag(object); 543 freepointer_addr = (unsigned long)object + s->offset; 544 copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p)); 545 return freelist_ptr_decode(s, p, freepointer_addr); 546 } 547 548 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 549 { 550 unsigned long freeptr_addr = (unsigned long)object + s->offset; 551 552 #ifdef CONFIG_SLAB_FREELIST_HARDENED 553 BUG_ON(object == fp); /* naive detection of double free or corruption */ 554 #endif 555 556 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 557 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); 558 } 559 560 /* Loop over all objects in a slab */ 561 #define for_each_object(__p, __s, __addr, __objects) \ 562 for (__p = fixup_red_left(__s, __addr); \ 563 __p < (__addr) + (__objects) * (__s)->size; \ 564 __p += (__s)->size) 565 566 static inline unsigned int order_objects(unsigned int order, unsigned int size) 567 { 568 return ((unsigned int)PAGE_SIZE << order) / size; 569 } 570 571 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 572 unsigned int size) 573 { 574 struct kmem_cache_order_objects x = { 575 (order << OO_SHIFT) + order_objects(order, size) 576 }; 577 578 return x; 579 } 580 581 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 582 { 583 return x.x >> OO_SHIFT; 584 } 585 586 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 587 { 588 return x.x & OO_MASK; 589 } 590 591 #ifdef CONFIG_SLUB_CPU_PARTIAL 592 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 593 { 594 unsigned int nr_slabs; 595 596 s->cpu_partial = nr_objects; 597 598 /* 599 * We take the number of objects but actually limit the number of 600 * slabs on the per cpu partial list, in order to limit excessive 601 * growth of the list. For simplicity we assume that the slabs will 602 * be half-full. 603 */ 604 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); 605 s->cpu_partial_slabs = nr_slabs; 606 } 607 #else 608 static inline void 609 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 610 { 611 } 612 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 613 614 /* 615 * Per slab locking using the pagelock 616 */ 617 static __always_inline void slab_lock(struct slab *slab) 618 { 619 struct page *page = slab_page(slab); 620 621 VM_BUG_ON_PAGE(PageTail(page), page); 622 bit_spin_lock(PG_locked, &page->flags); 623 } 624 625 static __always_inline void slab_unlock(struct slab *slab) 626 { 627 struct page *page = slab_page(slab); 628 629 VM_BUG_ON_PAGE(PageTail(page), page); 630 bit_spin_unlock(PG_locked, &page->flags); 631 } 632 633 static inline bool 634 __update_freelist_fast(struct slab *slab, 635 void *freelist_old, unsigned long counters_old, 636 void *freelist_new, unsigned long counters_new) 637 { 638 #ifdef system_has_freelist_aba 639 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; 640 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; 641 642 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); 643 #else 644 return false; 645 #endif 646 } 647 648 static inline bool 649 __update_freelist_slow(struct slab *slab, 650 void *freelist_old, unsigned long counters_old, 651 void *freelist_new, unsigned long counters_new) 652 { 653 bool ret = false; 654 655 slab_lock(slab); 656 if (slab->freelist == freelist_old && 657 slab->counters == counters_old) { 658 slab->freelist = freelist_new; 659 slab->counters = counters_new; 660 ret = true; 661 } 662 slab_unlock(slab); 663 664 return ret; 665 } 666 667 /* 668 * Interrupts must be disabled (for the fallback code to work right), typically 669 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is 670 * part of bit_spin_lock(), is sufficient because the policy is not to allow any 671 * allocation/ free operation in hardirq context. Therefore nothing can 672 * interrupt the operation. 673 */ 674 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, 675 void *freelist_old, unsigned long counters_old, 676 void *freelist_new, unsigned long counters_new, 677 const char *n) 678 { 679 bool ret; 680 681 if (USE_LOCKLESS_FAST_PATH()) 682 lockdep_assert_irqs_disabled(); 683 684 if (s->flags & __CMPXCHG_DOUBLE) { 685 ret = __update_freelist_fast(slab, freelist_old, counters_old, 686 freelist_new, counters_new); 687 } else { 688 ret = __update_freelist_slow(slab, freelist_old, counters_old, 689 freelist_new, counters_new); 690 } 691 if (likely(ret)) 692 return true; 693 694 cpu_relax(); 695 stat(s, CMPXCHG_DOUBLE_FAIL); 696 697 #ifdef SLUB_DEBUG_CMPXCHG 698 pr_info("%s %s: cmpxchg double redo ", n, s->name); 699 #endif 700 701 return false; 702 } 703 704 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, 705 void *freelist_old, unsigned long counters_old, 706 void *freelist_new, unsigned long counters_new, 707 const char *n) 708 { 709 bool ret; 710 711 if (s->flags & __CMPXCHG_DOUBLE) { 712 ret = __update_freelist_fast(slab, freelist_old, counters_old, 713 freelist_new, counters_new); 714 } else { 715 unsigned long flags; 716 717 local_irq_save(flags); 718 ret = __update_freelist_slow(slab, freelist_old, counters_old, 719 freelist_new, counters_new); 720 local_irq_restore(flags); 721 } 722 if (likely(ret)) 723 return true; 724 725 cpu_relax(); 726 stat(s, CMPXCHG_DOUBLE_FAIL); 727 728 #ifdef SLUB_DEBUG_CMPXCHG 729 pr_info("%s %s: cmpxchg double redo ", n, s->name); 730 #endif 731 732 return false; 733 } 734 735 #ifdef CONFIG_SLUB_DEBUG 736 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 737 static DEFINE_SPINLOCK(object_map_lock); 738 739 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 740 struct slab *slab) 741 { 742 void *addr = slab_address(slab); 743 void *p; 744 745 bitmap_zero(obj_map, slab->objects); 746 747 for (p = slab->freelist; p; p = get_freepointer(s, p)) 748 set_bit(__obj_to_index(s, addr, p), obj_map); 749 } 750 751 #if IS_ENABLED(CONFIG_KUNIT) 752 static bool slab_add_kunit_errors(void) 753 { 754 struct kunit_resource *resource; 755 756 if (!kunit_get_current_test()) 757 return false; 758 759 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 760 if (!resource) 761 return false; 762 763 (*(int *)resource->data)++; 764 kunit_put_resource(resource); 765 return true; 766 } 767 #else 768 static inline bool slab_add_kunit_errors(void) { return false; } 769 #endif 770 771 static inline unsigned int size_from_object(struct kmem_cache *s) 772 { 773 if (s->flags & SLAB_RED_ZONE) 774 return s->size - s->red_left_pad; 775 776 return s->size; 777 } 778 779 static inline void *restore_red_left(struct kmem_cache *s, void *p) 780 { 781 if (s->flags & SLAB_RED_ZONE) 782 p -= s->red_left_pad; 783 784 return p; 785 } 786 787 /* 788 * Debug settings: 789 */ 790 #if defined(CONFIG_SLUB_DEBUG_ON) 791 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 792 #else 793 static slab_flags_t slub_debug; 794 #endif 795 796 static char *slub_debug_string; 797 static int disable_higher_order_debug; 798 799 /* 800 * slub is about to manipulate internal object metadata. This memory lies 801 * outside the range of the allocated object, so accessing it would normally 802 * be reported by kasan as a bounds error. metadata_access_enable() is used 803 * to tell kasan that these accesses are OK. 804 */ 805 static inline void metadata_access_enable(void) 806 { 807 kasan_disable_current(); 808 } 809 810 static inline void metadata_access_disable(void) 811 { 812 kasan_enable_current(); 813 } 814 815 /* 816 * Object debugging 817 */ 818 819 /* Verify that a pointer has an address that is valid within a slab page */ 820 static inline int check_valid_pointer(struct kmem_cache *s, 821 struct slab *slab, void *object) 822 { 823 void *base; 824 825 if (!object) 826 return 1; 827 828 base = slab_address(slab); 829 object = kasan_reset_tag(object); 830 object = restore_red_left(s, object); 831 if (object < base || object >= base + slab->objects * s->size || 832 (object - base) % s->size) { 833 return 0; 834 } 835 836 return 1; 837 } 838 839 static void print_section(char *level, char *text, u8 *addr, 840 unsigned int length) 841 { 842 metadata_access_enable(); 843 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 844 16, 1, kasan_reset_tag((void *)addr), length, 1); 845 metadata_access_disable(); 846 } 847 848 /* 849 * See comment in calculate_sizes(). 850 */ 851 static inline bool freeptr_outside_object(struct kmem_cache *s) 852 { 853 return s->offset >= s->inuse; 854 } 855 856 /* 857 * Return offset of the end of info block which is inuse + free pointer if 858 * not overlapping with object. 859 */ 860 static inline unsigned int get_info_end(struct kmem_cache *s) 861 { 862 if (freeptr_outside_object(s)) 863 return s->inuse + sizeof(void *); 864 else 865 return s->inuse; 866 } 867 868 static struct track *get_track(struct kmem_cache *s, void *object, 869 enum track_item alloc) 870 { 871 struct track *p; 872 873 p = object + get_info_end(s); 874 875 return kasan_reset_tag(p + alloc); 876 } 877 878 #ifdef CONFIG_STACKDEPOT 879 static noinline depot_stack_handle_t set_track_prepare(void) 880 { 881 depot_stack_handle_t handle; 882 unsigned long entries[TRACK_ADDRS_COUNT]; 883 unsigned int nr_entries; 884 885 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 886 handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); 887 888 return handle; 889 } 890 #else 891 static inline depot_stack_handle_t set_track_prepare(void) 892 { 893 return 0; 894 } 895 #endif 896 897 static void set_track_update(struct kmem_cache *s, void *object, 898 enum track_item alloc, unsigned long addr, 899 depot_stack_handle_t handle) 900 { 901 struct track *p = get_track(s, object, alloc); 902 903 #ifdef CONFIG_STACKDEPOT 904 p->handle = handle; 905 #endif 906 p->addr = addr; 907 p->cpu = smp_processor_id(); 908 p->pid = current->pid; 909 p->when = jiffies; 910 } 911 912 static __always_inline void set_track(struct kmem_cache *s, void *object, 913 enum track_item alloc, unsigned long addr) 914 { 915 depot_stack_handle_t handle = set_track_prepare(); 916 917 set_track_update(s, object, alloc, addr, handle); 918 } 919 920 static void init_tracking(struct kmem_cache *s, void *object) 921 { 922 struct track *p; 923 924 if (!(s->flags & SLAB_STORE_USER)) 925 return; 926 927 p = get_track(s, object, TRACK_ALLOC); 928 memset(p, 0, 2*sizeof(struct track)); 929 } 930 931 static void print_track(const char *s, struct track *t, unsigned long pr_time) 932 { 933 depot_stack_handle_t handle __maybe_unused; 934 935 if (!t->addr) 936 return; 937 938 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 939 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 940 #ifdef CONFIG_STACKDEPOT 941 handle = READ_ONCE(t->handle); 942 if (handle) 943 stack_depot_print(handle); 944 else 945 pr_err("object allocation/free stack trace missing\n"); 946 #endif 947 } 948 949 void print_tracking(struct kmem_cache *s, void *object) 950 { 951 unsigned long pr_time = jiffies; 952 if (!(s->flags & SLAB_STORE_USER)) 953 return; 954 955 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 956 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 957 } 958 959 static void print_slab_info(const struct slab *slab) 960 { 961 struct folio *folio = (struct folio *)slab_folio(slab); 962 963 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", 964 slab, slab->objects, slab->inuse, slab->freelist, 965 folio_flags(folio, 0)); 966 } 967 968 /* 969 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API 970 * family will round up the real request size to these fixed ones, so 971 * there could be an extra area than what is requested. Save the original 972 * request size in the meta data area, for better debug and sanity check. 973 */ 974 static inline void set_orig_size(struct kmem_cache *s, 975 void *object, unsigned int orig_size) 976 { 977 void *p = kasan_reset_tag(object); 978 unsigned int kasan_meta_size; 979 980 if (!slub_debug_orig_size(s)) 981 return; 982 983 /* 984 * KASAN can save its free meta data inside of the object at offset 0. 985 * If this meta data size is larger than 'orig_size', it will overlap 986 * the data redzone in [orig_size+1, object_size]. Thus, we adjust 987 * 'orig_size' to be as at least as big as KASAN's meta data. 988 */ 989 kasan_meta_size = kasan_metadata_size(s, true); 990 if (kasan_meta_size > orig_size) 991 orig_size = kasan_meta_size; 992 993 p += get_info_end(s); 994 p += sizeof(struct track) * 2; 995 996 *(unsigned int *)p = orig_size; 997 } 998 999 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) 1000 { 1001 void *p = kasan_reset_tag(object); 1002 1003 if (!slub_debug_orig_size(s)) 1004 return s->object_size; 1005 1006 p += get_info_end(s); 1007 p += sizeof(struct track) * 2; 1008 1009 return *(unsigned int *)p; 1010 } 1011 1012 void skip_orig_size_check(struct kmem_cache *s, const void *object) 1013 { 1014 set_orig_size(s, (void *)object, s->object_size); 1015 } 1016 1017 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 1018 { 1019 struct va_format vaf; 1020 va_list args; 1021 1022 va_start(args, fmt); 1023 vaf.fmt = fmt; 1024 vaf.va = &args; 1025 pr_err("=============================================================================\n"); 1026 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 1027 pr_err("-----------------------------------------------------------------------------\n\n"); 1028 va_end(args); 1029 } 1030 1031 __printf(2, 3) 1032 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 1033 { 1034 struct va_format vaf; 1035 va_list args; 1036 1037 if (slab_add_kunit_errors()) 1038 return; 1039 1040 va_start(args, fmt); 1041 vaf.fmt = fmt; 1042 vaf.va = &args; 1043 pr_err("FIX %s: %pV\n", s->name, &vaf); 1044 va_end(args); 1045 } 1046 1047 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) 1048 { 1049 unsigned int off; /* Offset of last byte */ 1050 u8 *addr = slab_address(slab); 1051 1052 print_tracking(s, p); 1053 1054 print_slab_info(slab); 1055 1056 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 1057 p, p - addr, get_freepointer(s, p)); 1058 1059 if (s->flags & SLAB_RED_ZONE) 1060 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 1061 s->red_left_pad); 1062 else if (p > addr + 16) 1063 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 1064 1065 print_section(KERN_ERR, "Object ", p, 1066 min_t(unsigned int, s->object_size, PAGE_SIZE)); 1067 if (s->flags & SLAB_RED_ZONE) 1068 print_section(KERN_ERR, "Redzone ", p + s->object_size, 1069 s->inuse - s->object_size); 1070 1071 off = get_info_end(s); 1072 1073 if (s->flags & SLAB_STORE_USER) 1074 off += 2 * sizeof(struct track); 1075 1076 if (slub_debug_orig_size(s)) 1077 off += sizeof(unsigned int); 1078 1079 off += kasan_metadata_size(s, false); 1080 1081 if (off != size_from_object(s)) 1082 /* Beginning of the filler is the free pointer */ 1083 print_section(KERN_ERR, "Padding ", p + off, 1084 size_from_object(s) - off); 1085 1086 dump_stack(); 1087 } 1088 1089 static void object_err(struct kmem_cache *s, struct slab *slab, 1090 u8 *object, char *reason) 1091 { 1092 if (slab_add_kunit_errors()) 1093 return; 1094 1095 slab_bug(s, "%s", reason); 1096 print_trailer(s, slab, object); 1097 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1098 } 1099 1100 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1101 void **freelist, void *nextfree) 1102 { 1103 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 1104 !check_valid_pointer(s, slab, nextfree) && freelist) { 1105 object_err(s, slab, *freelist, "Freechain corrupt"); 1106 *freelist = NULL; 1107 slab_fix(s, "Isolate corrupted freechain"); 1108 return true; 1109 } 1110 1111 return false; 1112 } 1113 1114 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, 1115 const char *fmt, ...) 1116 { 1117 va_list args; 1118 char buf[100]; 1119 1120 if (slab_add_kunit_errors()) 1121 return; 1122 1123 va_start(args, fmt); 1124 vsnprintf(buf, sizeof(buf), fmt, args); 1125 va_end(args); 1126 slab_bug(s, "%s", buf); 1127 print_slab_info(slab); 1128 dump_stack(); 1129 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1130 } 1131 1132 static void init_object(struct kmem_cache *s, void *object, u8 val) 1133 { 1134 u8 *p = kasan_reset_tag(object); 1135 unsigned int poison_size = s->object_size; 1136 1137 if (s->flags & SLAB_RED_ZONE) { 1138 memset(p - s->red_left_pad, val, s->red_left_pad); 1139 1140 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1141 /* 1142 * Redzone the extra allocated space by kmalloc than 1143 * requested, and the poison size will be limited to 1144 * the original request size accordingly. 1145 */ 1146 poison_size = get_orig_size(s, object); 1147 } 1148 } 1149 1150 if (s->flags & __OBJECT_POISON) { 1151 memset(p, POISON_FREE, poison_size - 1); 1152 p[poison_size - 1] = POISON_END; 1153 } 1154 1155 if (s->flags & SLAB_RED_ZONE) 1156 memset(p + poison_size, val, s->inuse - poison_size); 1157 } 1158 1159 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 1160 void *from, void *to) 1161 { 1162 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 1163 memset(from, data, to - from); 1164 } 1165 1166 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab, 1167 u8 *object, char *what, 1168 u8 *start, unsigned int value, unsigned int bytes) 1169 { 1170 u8 *fault; 1171 u8 *end; 1172 u8 *addr = slab_address(slab); 1173 1174 metadata_access_enable(); 1175 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 1176 metadata_access_disable(); 1177 if (!fault) 1178 return 1; 1179 1180 end = start + bytes; 1181 while (end > fault && end[-1] == value) 1182 end--; 1183 1184 if (slab_add_kunit_errors()) 1185 goto skip_bug_print; 1186 1187 slab_bug(s, "%s overwritten", what); 1188 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 1189 fault, end - 1, fault - addr, 1190 fault[0], value); 1191 print_trailer(s, slab, object); 1192 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1193 1194 skip_bug_print: 1195 restore_bytes(s, what, value, fault, end); 1196 return 0; 1197 } 1198 1199 /* 1200 * Object layout: 1201 * 1202 * object address 1203 * Bytes of the object to be managed. 1204 * If the freepointer may overlay the object then the free 1205 * pointer is at the middle of the object. 1206 * 1207 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 1208 * 0xa5 (POISON_END) 1209 * 1210 * object + s->object_size 1211 * Padding to reach word boundary. This is also used for Redzoning. 1212 * Padding is extended by another word if Redzoning is enabled and 1213 * object_size == inuse. 1214 * 1215 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 1216 * 0xcc (RED_ACTIVE) for objects in use. 1217 * 1218 * object + s->inuse 1219 * Meta data starts here. 1220 * 1221 * A. Free pointer (if we cannot overwrite object on free) 1222 * B. Tracking data for SLAB_STORE_USER 1223 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) 1224 * D. Padding to reach required alignment boundary or at minimum 1225 * one word if debugging is on to be able to detect writes 1226 * before the word boundary. 1227 * 1228 * Padding is done using 0x5a (POISON_INUSE) 1229 * 1230 * object + s->size 1231 * Nothing is used beyond s->size. 1232 * 1233 * If slabcaches are merged then the object_size and inuse boundaries are mostly 1234 * ignored. And therefore no slab options that rely on these boundaries 1235 * may be used with merged slabcaches. 1236 */ 1237 1238 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) 1239 { 1240 unsigned long off = get_info_end(s); /* The end of info */ 1241 1242 if (s->flags & SLAB_STORE_USER) { 1243 /* We also have user information there */ 1244 off += 2 * sizeof(struct track); 1245 1246 if (s->flags & SLAB_KMALLOC) 1247 off += sizeof(unsigned int); 1248 } 1249 1250 off += kasan_metadata_size(s, false); 1251 1252 if (size_from_object(s) == off) 1253 return 1; 1254 1255 return check_bytes_and_report(s, slab, p, "Object padding", 1256 p + off, POISON_INUSE, size_from_object(s) - off); 1257 } 1258 1259 /* Check the pad bytes at the end of a slab page */ 1260 static void slab_pad_check(struct kmem_cache *s, struct slab *slab) 1261 { 1262 u8 *start; 1263 u8 *fault; 1264 u8 *end; 1265 u8 *pad; 1266 int length; 1267 int remainder; 1268 1269 if (!(s->flags & SLAB_POISON)) 1270 return; 1271 1272 start = slab_address(slab); 1273 length = slab_size(slab); 1274 end = start + length; 1275 remainder = length % s->size; 1276 if (!remainder) 1277 return; 1278 1279 pad = end - remainder; 1280 metadata_access_enable(); 1281 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1282 metadata_access_disable(); 1283 if (!fault) 1284 return; 1285 while (end > fault && end[-1] == POISON_INUSE) 1286 end--; 1287 1288 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1289 fault, end - 1, fault - start); 1290 print_section(KERN_ERR, "Padding ", pad, remainder); 1291 1292 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1293 } 1294 1295 static int check_object(struct kmem_cache *s, struct slab *slab, 1296 void *object, u8 val) 1297 { 1298 u8 *p = object; 1299 u8 *endobject = object + s->object_size; 1300 unsigned int orig_size, kasan_meta_size; 1301 1302 if (s->flags & SLAB_RED_ZONE) { 1303 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1304 object - s->red_left_pad, val, s->red_left_pad)) 1305 return 0; 1306 1307 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1308 endobject, val, s->inuse - s->object_size)) 1309 return 0; 1310 1311 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1312 orig_size = get_orig_size(s, object); 1313 1314 if (s->object_size > orig_size && 1315 !check_bytes_and_report(s, slab, object, 1316 "kmalloc Redzone", p + orig_size, 1317 val, s->object_size - orig_size)) { 1318 return 0; 1319 } 1320 } 1321 } else { 1322 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1323 check_bytes_and_report(s, slab, p, "Alignment padding", 1324 endobject, POISON_INUSE, 1325 s->inuse - s->object_size); 1326 } 1327 } 1328 1329 if (s->flags & SLAB_POISON) { 1330 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) { 1331 /* 1332 * KASAN can save its free meta data inside of the 1333 * object at offset 0. Thus, skip checking the part of 1334 * the redzone that overlaps with the meta data. 1335 */ 1336 kasan_meta_size = kasan_metadata_size(s, true); 1337 if (kasan_meta_size < s->object_size - 1 && 1338 !check_bytes_and_report(s, slab, p, "Poison", 1339 p + kasan_meta_size, POISON_FREE, 1340 s->object_size - kasan_meta_size - 1)) 1341 return 0; 1342 if (kasan_meta_size < s->object_size && 1343 !check_bytes_and_report(s, slab, p, "End Poison", 1344 p + s->object_size - 1, POISON_END, 1)) 1345 return 0; 1346 } 1347 /* 1348 * check_pad_bytes cleans up on its own. 1349 */ 1350 check_pad_bytes(s, slab, p); 1351 } 1352 1353 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 1354 /* 1355 * Object and freepointer overlap. Cannot check 1356 * freepointer while object is allocated. 1357 */ 1358 return 1; 1359 1360 /* Check free pointer validity */ 1361 if (!check_valid_pointer(s, slab, get_freepointer(s, p))) { 1362 object_err(s, slab, p, "Freepointer corrupt"); 1363 /* 1364 * No choice but to zap it and thus lose the remainder 1365 * of the free objects in this slab. May cause 1366 * another error because the object count is now wrong. 1367 */ 1368 set_freepointer(s, p, NULL); 1369 return 0; 1370 } 1371 return 1; 1372 } 1373 1374 static int check_slab(struct kmem_cache *s, struct slab *slab) 1375 { 1376 int maxobj; 1377 1378 if (!folio_test_slab(slab_folio(slab))) { 1379 slab_err(s, slab, "Not a valid slab page"); 1380 return 0; 1381 } 1382 1383 maxobj = order_objects(slab_order(slab), s->size); 1384 if (slab->objects > maxobj) { 1385 slab_err(s, slab, "objects %u > max %u", 1386 slab->objects, maxobj); 1387 return 0; 1388 } 1389 if (slab->inuse > slab->objects) { 1390 slab_err(s, slab, "inuse %u > max %u", 1391 slab->inuse, slab->objects); 1392 return 0; 1393 } 1394 /* Slab_pad_check fixes things up after itself */ 1395 slab_pad_check(s, slab); 1396 return 1; 1397 } 1398 1399 /* 1400 * Determine if a certain object in a slab is on the freelist. Must hold the 1401 * slab lock to guarantee that the chains are in a consistent state. 1402 */ 1403 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) 1404 { 1405 int nr = 0; 1406 void *fp; 1407 void *object = NULL; 1408 int max_objects; 1409 1410 fp = slab->freelist; 1411 while (fp && nr <= slab->objects) { 1412 if (fp == search) 1413 return 1; 1414 if (!check_valid_pointer(s, slab, fp)) { 1415 if (object) { 1416 object_err(s, slab, object, 1417 "Freechain corrupt"); 1418 set_freepointer(s, object, NULL); 1419 } else { 1420 slab_err(s, slab, "Freepointer corrupt"); 1421 slab->freelist = NULL; 1422 slab->inuse = slab->objects; 1423 slab_fix(s, "Freelist cleared"); 1424 return 0; 1425 } 1426 break; 1427 } 1428 object = fp; 1429 fp = get_freepointer(s, object); 1430 nr++; 1431 } 1432 1433 max_objects = order_objects(slab_order(slab), s->size); 1434 if (max_objects > MAX_OBJS_PER_PAGE) 1435 max_objects = MAX_OBJS_PER_PAGE; 1436 1437 if (slab->objects != max_objects) { 1438 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", 1439 slab->objects, max_objects); 1440 slab->objects = max_objects; 1441 slab_fix(s, "Number of objects adjusted"); 1442 } 1443 if (slab->inuse != slab->objects - nr) { 1444 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", 1445 slab->inuse, slab->objects - nr); 1446 slab->inuse = slab->objects - nr; 1447 slab_fix(s, "Object count adjusted"); 1448 } 1449 return search == NULL; 1450 } 1451 1452 static void trace(struct kmem_cache *s, struct slab *slab, void *object, 1453 int alloc) 1454 { 1455 if (s->flags & SLAB_TRACE) { 1456 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1457 s->name, 1458 alloc ? "alloc" : "free", 1459 object, slab->inuse, 1460 slab->freelist); 1461 1462 if (!alloc) 1463 print_section(KERN_INFO, "Object ", (void *)object, 1464 s->object_size); 1465 1466 dump_stack(); 1467 } 1468 } 1469 1470 /* 1471 * Tracking of fully allocated slabs for debugging purposes. 1472 */ 1473 static void add_full(struct kmem_cache *s, 1474 struct kmem_cache_node *n, struct slab *slab) 1475 { 1476 if (!(s->flags & SLAB_STORE_USER)) 1477 return; 1478 1479 lockdep_assert_held(&n->list_lock); 1480 list_add(&slab->slab_list, &n->full); 1481 } 1482 1483 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) 1484 { 1485 if (!(s->flags & SLAB_STORE_USER)) 1486 return; 1487 1488 lockdep_assert_held(&n->list_lock); 1489 list_del(&slab->slab_list); 1490 } 1491 1492 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1493 { 1494 return atomic_long_read(&n->nr_slabs); 1495 } 1496 1497 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1498 { 1499 struct kmem_cache_node *n = get_node(s, node); 1500 1501 atomic_long_inc(&n->nr_slabs); 1502 atomic_long_add(objects, &n->total_objects); 1503 } 1504 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1505 { 1506 struct kmem_cache_node *n = get_node(s, node); 1507 1508 atomic_long_dec(&n->nr_slabs); 1509 atomic_long_sub(objects, &n->total_objects); 1510 } 1511 1512 /* Object debug checks for alloc/free paths */ 1513 static void setup_object_debug(struct kmem_cache *s, void *object) 1514 { 1515 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1516 return; 1517 1518 init_object(s, object, SLUB_RED_INACTIVE); 1519 init_tracking(s, object); 1520 } 1521 1522 static 1523 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) 1524 { 1525 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1526 return; 1527 1528 metadata_access_enable(); 1529 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); 1530 metadata_access_disable(); 1531 } 1532 1533 static inline int alloc_consistency_checks(struct kmem_cache *s, 1534 struct slab *slab, void *object) 1535 { 1536 if (!check_slab(s, slab)) 1537 return 0; 1538 1539 if (!check_valid_pointer(s, slab, object)) { 1540 object_err(s, slab, object, "Freelist Pointer check fails"); 1541 return 0; 1542 } 1543 1544 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) 1545 return 0; 1546 1547 return 1; 1548 } 1549 1550 static noinline bool alloc_debug_processing(struct kmem_cache *s, 1551 struct slab *slab, void *object, int orig_size) 1552 { 1553 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1554 if (!alloc_consistency_checks(s, slab, object)) 1555 goto bad; 1556 } 1557 1558 /* Success. Perform special debug activities for allocs */ 1559 trace(s, slab, object, 1); 1560 set_orig_size(s, object, orig_size); 1561 init_object(s, object, SLUB_RED_ACTIVE); 1562 return true; 1563 1564 bad: 1565 if (folio_test_slab(slab_folio(slab))) { 1566 /* 1567 * If this is a slab page then lets do the best we can 1568 * to avoid issues in the future. Marking all objects 1569 * as used avoids touching the remaining objects. 1570 */ 1571 slab_fix(s, "Marking all objects used"); 1572 slab->inuse = slab->objects; 1573 slab->freelist = NULL; 1574 } 1575 return false; 1576 } 1577 1578 static inline int free_consistency_checks(struct kmem_cache *s, 1579 struct slab *slab, void *object, unsigned long addr) 1580 { 1581 if (!check_valid_pointer(s, slab, object)) { 1582 slab_err(s, slab, "Invalid object pointer 0x%p", object); 1583 return 0; 1584 } 1585 1586 if (on_freelist(s, slab, object)) { 1587 object_err(s, slab, object, "Object already free"); 1588 return 0; 1589 } 1590 1591 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) 1592 return 0; 1593 1594 if (unlikely(s != slab->slab_cache)) { 1595 if (!folio_test_slab(slab_folio(slab))) { 1596 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", 1597 object); 1598 } else if (!slab->slab_cache) { 1599 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1600 object); 1601 dump_stack(); 1602 } else 1603 object_err(s, slab, object, 1604 "page slab pointer corrupt."); 1605 return 0; 1606 } 1607 return 1; 1608 } 1609 1610 /* 1611 * Parse a block of slab_debug options. Blocks are delimited by ';' 1612 * 1613 * @str: start of block 1614 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1615 * @slabs: return start of list of slabs, or NULL when there's no list 1616 * @init: assume this is initial parsing and not per-kmem-create parsing 1617 * 1618 * returns the start of next block if there's any, or NULL 1619 */ 1620 static char * 1621 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1622 { 1623 bool higher_order_disable = false; 1624 1625 /* Skip any completely empty blocks */ 1626 while (*str && *str == ';') 1627 str++; 1628 1629 if (*str == ',') { 1630 /* 1631 * No options but restriction on slabs. This means full 1632 * debugging for slabs matching a pattern. 1633 */ 1634 *flags = DEBUG_DEFAULT_FLAGS; 1635 goto check_slabs; 1636 } 1637 *flags = 0; 1638 1639 /* Determine which debug features should be switched on */ 1640 for (; *str && *str != ',' && *str != ';'; str++) { 1641 switch (tolower(*str)) { 1642 case '-': 1643 *flags = 0; 1644 break; 1645 case 'f': 1646 *flags |= SLAB_CONSISTENCY_CHECKS; 1647 break; 1648 case 'z': 1649 *flags |= SLAB_RED_ZONE; 1650 break; 1651 case 'p': 1652 *flags |= SLAB_POISON; 1653 break; 1654 case 'u': 1655 *flags |= SLAB_STORE_USER; 1656 break; 1657 case 't': 1658 *flags |= SLAB_TRACE; 1659 break; 1660 case 'a': 1661 *flags |= SLAB_FAILSLAB; 1662 break; 1663 case 'o': 1664 /* 1665 * Avoid enabling debugging on caches if its minimum 1666 * order would increase as a result. 1667 */ 1668 higher_order_disable = true; 1669 break; 1670 default: 1671 if (init) 1672 pr_err("slab_debug option '%c' unknown. skipped\n", *str); 1673 } 1674 } 1675 check_slabs: 1676 if (*str == ',') 1677 *slabs = ++str; 1678 else 1679 *slabs = NULL; 1680 1681 /* Skip over the slab list */ 1682 while (*str && *str != ';') 1683 str++; 1684 1685 /* Skip any completely empty blocks */ 1686 while (*str && *str == ';') 1687 str++; 1688 1689 if (init && higher_order_disable) 1690 disable_higher_order_debug = 1; 1691 1692 if (*str) 1693 return str; 1694 else 1695 return NULL; 1696 } 1697 1698 static int __init setup_slub_debug(char *str) 1699 { 1700 slab_flags_t flags; 1701 slab_flags_t global_flags; 1702 char *saved_str; 1703 char *slab_list; 1704 bool global_slub_debug_changed = false; 1705 bool slab_list_specified = false; 1706 1707 global_flags = DEBUG_DEFAULT_FLAGS; 1708 if (*str++ != '=' || !*str) 1709 /* 1710 * No options specified. Switch on full debugging. 1711 */ 1712 goto out; 1713 1714 saved_str = str; 1715 while (str) { 1716 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1717 1718 if (!slab_list) { 1719 global_flags = flags; 1720 global_slub_debug_changed = true; 1721 } else { 1722 slab_list_specified = true; 1723 if (flags & SLAB_STORE_USER) 1724 stack_depot_request_early_init(); 1725 } 1726 } 1727 1728 /* 1729 * For backwards compatibility, a single list of flags with list of 1730 * slabs means debugging is only changed for those slabs, so the global 1731 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1732 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1733 * long as there is no option specifying flags without a slab list. 1734 */ 1735 if (slab_list_specified) { 1736 if (!global_slub_debug_changed) 1737 global_flags = slub_debug; 1738 slub_debug_string = saved_str; 1739 } 1740 out: 1741 slub_debug = global_flags; 1742 if (slub_debug & SLAB_STORE_USER) 1743 stack_depot_request_early_init(); 1744 if (slub_debug != 0 || slub_debug_string) 1745 static_branch_enable(&slub_debug_enabled); 1746 else 1747 static_branch_disable(&slub_debug_enabled); 1748 if ((static_branch_unlikely(&init_on_alloc) || 1749 static_branch_unlikely(&init_on_free)) && 1750 (slub_debug & SLAB_POISON)) 1751 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1752 return 1; 1753 } 1754 1755 __setup("slab_debug", setup_slub_debug); 1756 __setup_param("slub_debug", slub_debug, setup_slub_debug, 0); 1757 1758 /* 1759 * kmem_cache_flags - apply debugging options to the cache 1760 * @flags: flags to set 1761 * @name: name of the cache 1762 * 1763 * Debug option(s) are applied to @flags. In addition to the debug 1764 * option(s), if a slab name (or multiple) is specified i.e. 1765 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1766 * then only the select slabs will receive the debug option(s). 1767 */ 1768 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1769 { 1770 char *iter; 1771 size_t len; 1772 char *next_block; 1773 slab_flags_t block_flags; 1774 slab_flags_t slub_debug_local = slub_debug; 1775 1776 if (flags & SLAB_NO_USER_FLAGS) 1777 return flags; 1778 1779 /* 1780 * If the slab cache is for debugging (e.g. kmemleak) then 1781 * don't store user (stack trace) information by default, 1782 * but let the user enable it via the command line below. 1783 */ 1784 if (flags & SLAB_NOLEAKTRACE) 1785 slub_debug_local &= ~SLAB_STORE_USER; 1786 1787 len = strlen(name); 1788 next_block = slub_debug_string; 1789 /* Go through all blocks of debug options, see if any matches our slab's name */ 1790 while (next_block) { 1791 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1792 if (!iter) 1793 continue; 1794 /* Found a block that has a slab list, search it */ 1795 while (*iter) { 1796 char *end, *glob; 1797 size_t cmplen; 1798 1799 end = strchrnul(iter, ','); 1800 if (next_block && next_block < end) 1801 end = next_block - 1; 1802 1803 glob = strnchr(iter, end - iter, '*'); 1804 if (glob) 1805 cmplen = glob - iter; 1806 else 1807 cmplen = max_t(size_t, len, (end - iter)); 1808 1809 if (!strncmp(name, iter, cmplen)) { 1810 flags |= block_flags; 1811 return flags; 1812 } 1813 1814 if (!*end || *end == ';') 1815 break; 1816 iter = end + 1; 1817 } 1818 } 1819 1820 return flags | slub_debug_local; 1821 } 1822 #else /* !CONFIG_SLUB_DEBUG */ 1823 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} 1824 static inline 1825 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} 1826 1827 static inline bool alloc_debug_processing(struct kmem_cache *s, 1828 struct slab *slab, void *object, int orig_size) { return true; } 1829 1830 static inline bool free_debug_processing(struct kmem_cache *s, 1831 struct slab *slab, void *head, void *tail, int *bulk_cnt, 1832 unsigned long addr, depot_stack_handle_t handle) { return true; } 1833 1834 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} 1835 static inline int check_object(struct kmem_cache *s, struct slab *slab, 1836 void *object, u8 val) { return 1; } 1837 static inline depot_stack_handle_t set_track_prepare(void) { return 0; } 1838 static inline void set_track(struct kmem_cache *s, void *object, 1839 enum track_item alloc, unsigned long addr) {} 1840 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1841 struct slab *slab) {} 1842 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1843 struct slab *slab) {} 1844 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1845 { 1846 return flags; 1847 } 1848 #define slub_debug 0 1849 1850 #define disable_higher_order_debug 0 1851 1852 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1853 { return 0; } 1854 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1855 int objects) {} 1856 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1857 int objects) {} 1858 1859 #ifndef CONFIG_SLUB_TINY 1860 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1861 void **freelist, void *nextfree) 1862 { 1863 return false; 1864 } 1865 #endif 1866 #endif /* CONFIG_SLUB_DEBUG */ 1867 1868 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) 1869 { 1870 return (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1871 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; 1872 } 1873 1874 #ifdef CONFIG_SLAB_OBJ_EXT 1875 1876 /* 1877 * The allocated objcg pointers array is not accounted directly. 1878 * Moreover, it should not come from DMA buffer and is not readily 1879 * reclaimable. So those GFP bits should be masked off. 1880 */ 1881 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ 1882 __GFP_ACCOUNT | __GFP_NOFAIL) 1883 1884 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 1885 gfp_t gfp, bool new_slab) 1886 { 1887 unsigned int objects = objs_per_slab(s, slab); 1888 unsigned long obj_exts; 1889 void *vec; 1890 1891 gfp &= ~OBJCGS_CLEAR_MASK; 1892 /* Prevent recursive extension vector allocation */ 1893 gfp |= __GFP_NO_OBJ_EXT; 1894 vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, 1895 slab_nid(slab)); 1896 if (!vec) 1897 return -ENOMEM; 1898 1899 obj_exts = (unsigned long)vec; 1900 #ifdef CONFIG_MEMCG 1901 obj_exts |= MEMCG_DATA_OBJEXTS; 1902 #endif 1903 if (new_slab) { 1904 /* 1905 * If the slab is brand new and nobody can yet access its 1906 * obj_exts, no synchronization is required and obj_exts can 1907 * be simply assigned. 1908 */ 1909 slab->obj_exts = obj_exts; 1910 } else if (cmpxchg(&slab->obj_exts, 0, obj_exts)) { 1911 /* 1912 * If the slab is already in use, somebody can allocate and 1913 * assign slabobj_exts in parallel. In this case the existing 1914 * objcg vector should be reused. 1915 */ 1916 kfree(vec); 1917 return 0; 1918 } 1919 1920 kmemleak_not_leak(vec); 1921 return 0; 1922 } 1923 1924 static inline void free_slab_obj_exts(struct slab *slab) 1925 { 1926 struct slabobj_ext *obj_exts; 1927 1928 obj_exts = slab_obj_exts(slab); 1929 if (!obj_exts) 1930 return; 1931 1932 kfree(obj_exts); 1933 slab->obj_exts = 0; 1934 } 1935 1936 static inline bool need_slab_obj_ext(void) 1937 { 1938 if (mem_alloc_profiling_enabled()) 1939 return true; 1940 1941 /* 1942 * CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally 1943 * inside memcg_slab_post_alloc_hook. No other users for now. 1944 */ 1945 return false; 1946 } 1947 1948 static inline struct slabobj_ext * 1949 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) 1950 { 1951 struct slab *slab; 1952 1953 if (!p) 1954 return NULL; 1955 1956 if (s->flags & SLAB_NO_OBJ_EXT) 1957 return NULL; 1958 1959 if (flags & __GFP_NO_OBJ_EXT) 1960 return NULL; 1961 1962 slab = virt_to_slab(p); 1963 if (!slab_obj_exts(slab) && 1964 WARN(alloc_slab_obj_exts(slab, s, flags, false), 1965 "%s, %s: Failed to create slab extension vector!\n", 1966 __func__, s->name)) 1967 return NULL; 1968 1969 return slab_obj_exts(slab) + obj_to_index(s, slab, p); 1970 } 1971 1972 static inline void 1973 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 1974 int objects) 1975 { 1976 #ifdef CONFIG_MEM_ALLOC_PROFILING 1977 struct slabobj_ext *obj_exts; 1978 int i; 1979 1980 if (!mem_alloc_profiling_enabled()) 1981 return; 1982 1983 obj_exts = slab_obj_exts(slab); 1984 if (!obj_exts) 1985 return; 1986 1987 for (i = 0; i < objects; i++) { 1988 unsigned int off = obj_to_index(s, slab, p[i]); 1989 1990 alloc_tag_sub(&obj_exts[off].ref, s->size); 1991 } 1992 #endif 1993 } 1994 1995 #else /* CONFIG_SLAB_OBJ_EXT */ 1996 1997 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 1998 gfp_t gfp, bool new_slab) 1999 { 2000 return 0; 2001 } 2002 2003 static inline void free_slab_obj_exts(struct slab *slab) 2004 { 2005 } 2006 2007 static inline bool need_slab_obj_ext(void) 2008 { 2009 return false; 2010 } 2011 2012 static inline struct slabobj_ext * 2013 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) 2014 { 2015 return NULL; 2016 } 2017 2018 static inline void 2019 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2020 int objects) 2021 { 2022 } 2023 2024 #endif /* CONFIG_SLAB_OBJ_EXT */ 2025 2026 #ifdef CONFIG_MEMCG_KMEM 2027 static inline size_t obj_full_size(struct kmem_cache *s) 2028 { 2029 /* 2030 * For each accounted object there is an extra space which is used 2031 * to store obj_cgroup membership. Charge it too. 2032 */ 2033 return s->size + sizeof(struct obj_cgroup *); 2034 } 2035 2036 /* 2037 * Returns false if the allocation should fail. 2038 */ 2039 static bool __memcg_slab_pre_alloc_hook(struct kmem_cache *s, 2040 struct list_lru *lru, 2041 struct obj_cgroup **objcgp, 2042 size_t objects, gfp_t flags) 2043 { 2044 /* 2045 * The obtained objcg pointer is safe to use within the current scope, 2046 * defined by current task or set_active_memcg() pair. 2047 * obj_cgroup_get() is used to get a permanent reference. 2048 */ 2049 struct obj_cgroup *objcg = current_obj_cgroup(); 2050 if (!objcg) 2051 return true; 2052 2053 if (lru) { 2054 int ret; 2055 struct mem_cgroup *memcg; 2056 2057 memcg = get_mem_cgroup_from_objcg(objcg); 2058 ret = memcg_list_lru_alloc(memcg, lru, flags); 2059 css_put(&memcg->css); 2060 2061 if (ret) 2062 return false; 2063 } 2064 2065 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) 2066 return false; 2067 2068 *objcgp = objcg; 2069 return true; 2070 } 2071 2072 /* 2073 * Returns false if the allocation should fail. 2074 */ 2075 static __fastpath_inline 2076 bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 2077 struct obj_cgroup **objcgp, size_t objects, 2078 gfp_t flags) 2079 { 2080 if (!memcg_kmem_online()) 2081 return true; 2082 2083 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))) 2084 return true; 2085 2086 return likely(__memcg_slab_pre_alloc_hook(s, lru, objcgp, objects, 2087 flags)); 2088 } 2089 2090 static void __memcg_slab_post_alloc_hook(struct kmem_cache *s, 2091 struct obj_cgroup *objcg, 2092 gfp_t flags, size_t size, 2093 void **p) 2094 { 2095 struct slab *slab; 2096 unsigned long off; 2097 size_t i; 2098 2099 flags &= gfp_allowed_mask; 2100 2101 for (i = 0; i < size; i++) { 2102 if (likely(p[i])) { 2103 slab = virt_to_slab(p[i]); 2104 2105 if (!slab_obj_exts(slab) && 2106 alloc_slab_obj_exts(slab, s, flags, false)) { 2107 obj_cgroup_uncharge(objcg, obj_full_size(s)); 2108 continue; 2109 } 2110 2111 off = obj_to_index(s, slab, p[i]); 2112 obj_cgroup_get(objcg); 2113 slab_obj_exts(slab)[off].objcg = objcg; 2114 mod_objcg_state(objcg, slab_pgdat(slab), 2115 cache_vmstat_idx(s), obj_full_size(s)); 2116 } else { 2117 obj_cgroup_uncharge(objcg, obj_full_size(s)); 2118 } 2119 } 2120 } 2121 2122 static __fastpath_inline 2123 void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, 2124 gfp_t flags, size_t size, void **p) 2125 { 2126 if (likely(!memcg_kmem_online() || !objcg)) 2127 return; 2128 2129 return __memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 2130 } 2131 2132 static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 2133 void **p, int objects, 2134 struct slabobj_ext *obj_exts) 2135 { 2136 for (int i = 0; i < objects; i++) { 2137 struct obj_cgroup *objcg; 2138 unsigned int off; 2139 2140 off = obj_to_index(s, slab, p[i]); 2141 objcg = obj_exts[off].objcg; 2142 if (!objcg) 2143 continue; 2144 2145 obj_exts[off].objcg = NULL; 2146 obj_cgroup_uncharge(objcg, obj_full_size(s)); 2147 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), 2148 -obj_full_size(s)); 2149 obj_cgroup_put(objcg); 2150 } 2151 } 2152 2153 static __fastpath_inline 2154 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2155 int objects) 2156 { 2157 struct slabobj_ext *obj_exts; 2158 2159 if (!memcg_kmem_online()) 2160 return; 2161 2162 obj_exts = slab_obj_exts(slab); 2163 if (likely(!obj_exts)) 2164 return; 2165 2166 __memcg_slab_free_hook(s, slab, p, objects, obj_exts); 2167 } 2168 2169 static inline 2170 void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects, 2171 struct obj_cgroup *objcg) 2172 { 2173 if (objcg) 2174 obj_cgroup_uncharge(objcg, objects * obj_full_size(s)); 2175 } 2176 #else /* CONFIG_MEMCG_KMEM */ 2177 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 2178 struct list_lru *lru, 2179 struct obj_cgroup **objcgp, 2180 size_t objects, gfp_t flags) 2181 { 2182 return true; 2183 } 2184 2185 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 2186 struct obj_cgroup *objcg, 2187 gfp_t flags, size_t size, 2188 void **p) 2189 { 2190 } 2191 2192 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 2193 void **p, int objects) 2194 { 2195 } 2196 2197 static inline 2198 void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects, 2199 struct obj_cgroup *objcg) 2200 { 2201 } 2202 #endif /* CONFIG_MEMCG_KMEM */ 2203 2204 /* 2205 * Hooks for other subsystems that check memory allocations. In a typical 2206 * production configuration these hooks all should produce no code at all. 2207 * 2208 * Returns true if freeing of the object can proceed, false if its reuse 2209 * was delayed by KASAN quarantine, or it was returned to KFENCE. 2210 */ 2211 static __always_inline 2212 bool slab_free_hook(struct kmem_cache *s, void *x, bool init) 2213 { 2214 kmemleak_free_recursive(x, s->flags); 2215 kmsan_slab_free(s, x); 2216 2217 debug_check_no_locks_freed(x, s->object_size); 2218 2219 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 2220 debug_check_no_obj_freed(x, s->object_size); 2221 2222 /* Use KCSAN to help debug racy use-after-free. */ 2223 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 2224 __kcsan_check_access(x, s->object_size, 2225 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 2226 2227 if (kfence_free(x)) 2228 return false; 2229 2230 /* 2231 * As memory initialization might be integrated into KASAN, 2232 * kasan_slab_free and initialization memset's must be 2233 * kept together to avoid discrepancies in behavior. 2234 * 2235 * The initialization memset's clear the object and the metadata, 2236 * but don't touch the SLAB redzone. 2237 */ 2238 if (unlikely(init)) { 2239 int rsize; 2240 2241 if (!kasan_has_integrated_init()) 2242 memset(kasan_reset_tag(x), 0, s->object_size); 2243 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 2244 memset((char *)kasan_reset_tag(x) + s->inuse, 0, 2245 s->size - s->inuse - rsize); 2246 } 2247 /* KASAN might put x into memory quarantine, delaying its reuse. */ 2248 return !kasan_slab_free(s, x, init); 2249 } 2250 2251 static __fastpath_inline 2252 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail, 2253 int *cnt) 2254 { 2255 2256 void *object; 2257 void *next = *head; 2258 void *old_tail = *tail; 2259 bool init; 2260 2261 if (is_kfence_address(next)) { 2262 slab_free_hook(s, next, false); 2263 return false; 2264 } 2265 2266 /* Head and tail of the reconstructed freelist */ 2267 *head = NULL; 2268 *tail = NULL; 2269 2270 init = slab_want_init_on_free(s); 2271 2272 do { 2273 object = next; 2274 next = get_freepointer(s, object); 2275 2276 /* If object's reuse doesn't have to be delayed */ 2277 if (likely(slab_free_hook(s, object, init))) { 2278 /* Move object to the new freelist */ 2279 set_freepointer(s, object, *head); 2280 *head = object; 2281 if (!*tail) 2282 *tail = object; 2283 } else { 2284 /* 2285 * Adjust the reconstructed freelist depth 2286 * accordingly if object's reuse is delayed. 2287 */ 2288 --(*cnt); 2289 } 2290 } while (object != old_tail); 2291 2292 return *head != NULL; 2293 } 2294 2295 static void *setup_object(struct kmem_cache *s, void *object) 2296 { 2297 setup_object_debug(s, object); 2298 object = kasan_init_slab_obj(s, object); 2299 if (unlikely(s->ctor)) { 2300 kasan_unpoison_new_object(s, object); 2301 s->ctor(object); 2302 kasan_poison_new_object(s, object); 2303 } 2304 return object; 2305 } 2306 2307 /* 2308 * Slab allocation and freeing 2309 */ 2310 static inline struct slab *alloc_slab_page(gfp_t flags, int node, 2311 struct kmem_cache_order_objects oo) 2312 { 2313 struct folio *folio; 2314 struct slab *slab; 2315 unsigned int order = oo_order(oo); 2316 2317 folio = (struct folio *)alloc_pages_node(node, flags, order); 2318 if (!folio) 2319 return NULL; 2320 2321 slab = folio_slab(folio); 2322 __folio_set_slab(folio); 2323 /* Make the flag visible before any changes to folio->mapping */ 2324 smp_wmb(); 2325 if (folio_is_pfmemalloc(folio)) 2326 slab_set_pfmemalloc(slab); 2327 2328 return slab; 2329 } 2330 2331 #ifdef CONFIG_SLAB_FREELIST_RANDOM 2332 /* Pre-initialize the random sequence cache */ 2333 static int init_cache_random_seq(struct kmem_cache *s) 2334 { 2335 unsigned int count = oo_objects(s->oo); 2336 int err; 2337 2338 /* Bailout if already initialised */ 2339 if (s->random_seq) 2340 return 0; 2341 2342 err = cache_random_seq_create(s, count, GFP_KERNEL); 2343 if (err) { 2344 pr_err("SLUB: Unable to initialize free list for %s\n", 2345 s->name); 2346 return err; 2347 } 2348 2349 /* Transform to an offset on the set of pages */ 2350 if (s->random_seq) { 2351 unsigned int i; 2352 2353 for (i = 0; i < count; i++) 2354 s->random_seq[i] *= s->size; 2355 } 2356 return 0; 2357 } 2358 2359 /* Initialize each random sequence freelist per cache */ 2360 static void __init init_freelist_randomization(void) 2361 { 2362 struct kmem_cache *s; 2363 2364 mutex_lock(&slab_mutex); 2365 2366 list_for_each_entry(s, &slab_caches, list) 2367 init_cache_random_seq(s); 2368 2369 mutex_unlock(&slab_mutex); 2370 } 2371 2372 /* Get the next entry on the pre-computed freelist randomized */ 2373 static void *next_freelist_entry(struct kmem_cache *s, 2374 unsigned long *pos, void *start, 2375 unsigned long page_limit, 2376 unsigned long freelist_count) 2377 { 2378 unsigned int idx; 2379 2380 /* 2381 * If the target page allocation failed, the number of objects on the 2382 * page might be smaller than the usual size defined by the cache. 2383 */ 2384 do { 2385 idx = s->random_seq[*pos]; 2386 *pos += 1; 2387 if (*pos >= freelist_count) 2388 *pos = 0; 2389 } while (unlikely(idx >= page_limit)); 2390 2391 return (char *)start + idx; 2392 } 2393 2394 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 2395 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2396 { 2397 void *start; 2398 void *cur; 2399 void *next; 2400 unsigned long idx, pos, page_limit, freelist_count; 2401 2402 if (slab->objects < 2 || !s->random_seq) 2403 return false; 2404 2405 freelist_count = oo_objects(s->oo); 2406 pos = get_random_u32_below(freelist_count); 2407 2408 page_limit = slab->objects * s->size; 2409 start = fixup_red_left(s, slab_address(slab)); 2410 2411 /* First entry is used as the base of the freelist */ 2412 cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count); 2413 cur = setup_object(s, cur); 2414 slab->freelist = cur; 2415 2416 for (idx = 1; idx < slab->objects; idx++) { 2417 next = next_freelist_entry(s, &pos, start, page_limit, 2418 freelist_count); 2419 next = setup_object(s, next); 2420 set_freepointer(s, cur, next); 2421 cur = next; 2422 } 2423 set_freepointer(s, cur, NULL); 2424 2425 return true; 2426 } 2427 #else 2428 static inline int init_cache_random_seq(struct kmem_cache *s) 2429 { 2430 return 0; 2431 } 2432 static inline void init_freelist_randomization(void) { } 2433 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2434 { 2435 return false; 2436 } 2437 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 2438 2439 static __always_inline void account_slab(struct slab *slab, int order, 2440 struct kmem_cache *s, gfp_t gfp) 2441 { 2442 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 2443 alloc_slab_obj_exts(slab, s, gfp, true); 2444 2445 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2446 PAGE_SIZE << order); 2447 } 2448 2449 static __always_inline void unaccount_slab(struct slab *slab, int order, 2450 struct kmem_cache *s) 2451 { 2452 if (memcg_kmem_online() || need_slab_obj_ext()) 2453 free_slab_obj_exts(slab); 2454 2455 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2456 -(PAGE_SIZE << order)); 2457 } 2458 2459 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 2460 { 2461 struct slab *slab; 2462 struct kmem_cache_order_objects oo = s->oo; 2463 gfp_t alloc_gfp; 2464 void *start, *p, *next; 2465 int idx; 2466 bool shuffle; 2467 2468 flags &= gfp_allowed_mask; 2469 2470 flags |= s->allocflags; 2471 2472 /* 2473 * Let the initial higher-order allocation fail under memory pressure 2474 * so we fall-back to the minimum order allocation. 2475 */ 2476 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 2477 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 2478 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; 2479 2480 slab = alloc_slab_page(alloc_gfp, node, oo); 2481 if (unlikely(!slab)) { 2482 oo = s->min; 2483 alloc_gfp = flags; 2484 /* 2485 * Allocation may have failed due to fragmentation. 2486 * Try a lower order alloc if possible 2487 */ 2488 slab = alloc_slab_page(alloc_gfp, node, oo); 2489 if (unlikely(!slab)) 2490 return NULL; 2491 stat(s, ORDER_FALLBACK); 2492 } 2493 2494 slab->objects = oo_objects(oo); 2495 slab->inuse = 0; 2496 slab->frozen = 0; 2497 2498 account_slab(slab, oo_order(oo), s, flags); 2499 2500 slab->slab_cache = s; 2501 2502 kasan_poison_slab(slab); 2503 2504 start = slab_address(slab); 2505 2506 setup_slab_debug(s, slab, start); 2507 2508 shuffle = shuffle_freelist(s, slab); 2509 2510 if (!shuffle) { 2511 start = fixup_red_left(s, start); 2512 start = setup_object(s, start); 2513 slab->freelist = start; 2514 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { 2515 next = p + s->size; 2516 next = setup_object(s, next); 2517 set_freepointer(s, p, next); 2518 p = next; 2519 } 2520 set_freepointer(s, p, NULL); 2521 } 2522 2523 return slab; 2524 } 2525 2526 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) 2527 { 2528 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 2529 flags = kmalloc_fix_flags(flags); 2530 2531 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2532 2533 return allocate_slab(s, 2534 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 2535 } 2536 2537 static void __free_slab(struct kmem_cache *s, struct slab *slab) 2538 { 2539 struct folio *folio = slab_folio(slab); 2540 int order = folio_order(folio); 2541 int pages = 1 << order; 2542 2543 __slab_clear_pfmemalloc(slab); 2544 folio->mapping = NULL; 2545 /* Make the mapping reset visible before clearing the flag */ 2546 smp_wmb(); 2547 __folio_clear_slab(folio); 2548 mm_account_reclaimed_pages(pages); 2549 unaccount_slab(slab, order, s); 2550 __free_pages(&folio->page, order); 2551 } 2552 2553 static void rcu_free_slab(struct rcu_head *h) 2554 { 2555 struct slab *slab = container_of(h, struct slab, rcu_head); 2556 2557 __free_slab(slab->slab_cache, slab); 2558 } 2559 2560 static void free_slab(struct kmem_cache *s, struct slab *slab) 2561 { 2562 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 2563 void *p; 2564 2565 slab_pad_check(s, slab); 2566 for_each_object(p, s, slab_address(slab), slab->objects) 2567 check_object(s, slab, p, SLUB_RED_INACTIVE); 2568 } 2569 2570 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) 2571 call_rcu(&slab->rcu_head, rcu_free_slab); 2572 else 2573 __free_slab(s, slab); 2574 } 2575 2576 static void discard_slab(struct kmem_cache *s, struct slab *slab) 2577 { 2578 dec_slabs_node(s, slab_nid(slab), slab->objects); 2579 free_slab(s, slab); 2580 } 2581 2582 /* 2583 * SLUB reuses PG_workingset bit to keep track of whether it's on 2584 * the per-node partial list. 2585 */ 2586 static inline bool slab_test_node_partial(const struct slab *slab) 2587 { 2588 return folio_test_workingset((struct folio *)slab_folio(slab)); 2589 } 2590 2591 static inline void slab_set_node_partial(struct slab *slab) 2592 { 2593 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2594 } 2595 2596 static inline void slab_clear_node_partial(struct slab *slab) 2597 { 2598 clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2599 } 2600 2601 /* 2602 * Management of partially allocated slabs. 2603 */ 2604 static inline void 2605 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 2606 { 2607 n->nr_partial++; 2608 if (tail == DEACTIVATE_TO_TAIL) 2609 list_add_tail(&slab->slab_list, &n->partial); 2610 else 2611 list_add(&slab->slab_list, &n->partial); 2612 slab_set_node_partial(slab); 2613 } 2614 2615 static inline void add_partial(struct kmem_cache_node *n, 2616 struct slab *slab, int tail) 2617 { 2618 lockdep_assert_held(&n->list_lock); 2619 __add_partial(n, slab, tail); 2620 } 2621 2622 static inline void remove_partial(struct kmem_cache_node *n, 2623 struct slab *slab) 2624 { 2625 lockdep_assert_held(&n->list_lock); 2626 list_del(&slab->slab_list); 2627 slab_clear_node_partial(slab); 2628 n->nr_partial--; 2629 } 2630 2631 /* 2632 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a 2633 * slab from the n->partial list. Remove only a single object from the slab, do 2634 * the alloc_debug_processing() checks and leave the slab on the list, or move 2635 * it to full list if it was the last free object. 2636 */ 2637 static void *alloc_single_from_partial(struct kmem_cache *s, 2638 struct kmem_cache_node *n, struct slab *slab, int orig_size) 2639 { 2640 void *object; 2641 2642 lockdep_assert_held(&n->list_lock); 2643 2644 object = slab->freelist; 2645 slab->freelist = get_freepointer(s, object); 2646 slab->inuse++; 2647 2648 if (!alloc_debug_processing(s, slab, object, orig_size)) { 2649 remove_partial(n, slab); 2650 return NULL; 2651 } 2652 2653 if (slab->inuse == slab->objects) { 2654 remove_partial(n, slab); 2655 add_full(s, n, slab); 2656 } 2657 2658 return object; 2659 } 2660 2661 /* 2662 * Called only for kmem_cache_debug() caches to allocate from a freshly 2663 * allocated slab. Allocate a single object instead of whole freelist 2664 * and put the slab to the partial (or full) list. 2665 */ 2666 static void *alloc_single_from_new_slab(struct kmem_cache *s, 2667 struct slab *slab, int orig_size) 2668 { 2669 int nid = slab_nid(slab); 2670 struct kmem_cache_node *n = get_node(s, nid); 2671 unsigned long flags; 2672 void *object; 2673 2674 2675 object = slab->freelist; 2676 slab->freelist = get_freepointer(s, object); 2677 slab->inuse = 1; 2678 2679 if (!alloc_debug_processing(s, slab, object, orig_size)) 2680 /* 2681 * It's not really expected that this would fail on a 2682 * freshly allocated slab, but a concurrent memory 2683 * corruption in theory could cause that. 2684 */ 2685 return NULL; 2686 2687 spin_lock_irqsave(&n->list_lock, flags); 2688 2689 if (slab->inuse == slab->objects) 2690 add_full(s, n, slab); 2691 else 2692 add_partial(n, slab, DEACTIVATE_TO_HEAD); 2693 2694 inc_slabs_node(s, nid, slab->objects); 2695 spin_unlock_irqrestore(&n->list_lock, flags); 2696 2697 return object; 2698 } 2699 2700 #ifdef CONFIG_SLUB_CPU_PARTIAL 2701 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); 2702 #else 2703 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, 2704 int drain) { } 2705 #endif 2706 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); 2707 2708 /* 2709 * Try to allocate a partial slab from a specific node. 2710 */ 2711 static struct slab *get_partial_node(struct kmem_cache *s, 2712 struct kmem_cache_node *n, 2713 struct partial_context *pc) 2714 { 2715 struct slab *slab, *slab2, *partial = NULL; 2716 unsigned long flags; 2717 unsigned int partial_slabs = 0; 2718 2719 /* 2720 * Racy check. If we mistakenly see no partial slabs then we 2721 * just allocate an empty slab. If we mistakenly try to get a 2722 * partial slab and there is none available then get_partial() 2723 * will return NULL. 2724 */ 2725 if (!n || !n->nr_partial) 2726 return NULL; 2727 2728 spin_lock_irqsave(&n->list_lock, flags); 2729 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { 2730 if (!pfmemalloc_match(slab, pc->flags)) 2731 continue; 2732 2733 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 2734 void *object = alloc_single_from_partial(s, n, slab, 2735 pc->orig_size); 2736 if (object) { 2737 partial = slab; 2738 pc->object = object; 2739 break; 2740 } 2741 continue; 2742 } 2743 2744 remove_partial(n, slab); 2745 2746 if (!partial) { 2747 partial = slab; 2748 stat(s, ALLOC_FROM_PARTIAL); 2749 } else { 2750 put_cpu_partial(s, slab, 0); 2751 stat(s, CPU_PARTIAL_NODE); 2752 partial_slabs++; 2753 } 2754 #ifdef CONFIG_SLUB_CPU_PARTIAL 2755 if (!kmem_cache_has_cpu_partial(s) 2756 || partial_slabs > s->cpu_partial_slabs / 2) 2757 break; 2758 #else 2759 break; 2760 #endif 2761 2762 } 2763 spin_unlock_irqrestore(&n->list_lock, flags); 2764 return partial; 2765 } 2766 2767 /* 2768 * Get a slab from somewhere. Search in increasing NUMA distances. 2769 */ 2770 static struct slab *get_any_partial(struct kmem_cache *s, 2771 struct partial_context *pc) 2772 { 2773 #ifdef CONFIG_NUMA 2774 struct zonelist *zonelist; 2775 struct zoneref *z; 2776 struct zone *zone; 2777 enum zone_type highest_zoneidx = gfp_zone(pc->flags); 2778 struct slab *slab; 2779 unsigned int cpuset_mems_cookie; 2780 2781 /* 2782 * The defrag ratio allows a configuration of the tradeoffs between 2783 * inter node defragmentation and node local allocations. A lower 2784 * defrag_ratio increases the tendency to do local allocations 2785 * instead of attempting to obtain partial slabs from other nodes. 2786 * 2787 * If the defrag_ratio is set to 0 then kmalloc() always 2788 * returns node local objects. If the ratio is higher then kmalloc() 2789 * may return off node objects because partial slabs are obtained 2790 * from other nodes and filled up. 2791 * 2792 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2793 * (which makes defrag_ratio = 1000) then every (well almost) 2794 * allocation will first attempt to defrag slab caches on other nodes. 2795 * This means scanning over all nodes to look for partial slabs which 2796 * may be expensive if we do it every time we are trying to find a slab 2797 * with available objects. 2798 */ 2799 if (!s->remote_node_defrag_ratio || 2800 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2801 return NULL; 2802 2803 do { 2804 cpuset_mems_cookie = read_mems_allowed_begin(); 2805 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); 2806 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2807 struct kmem_cache_node *n; 2808 2809 n = get_node(s, zone_to_nid(zone)); 2810 2811 if (n && cpuset_zone_allowed(zone, pc->flags) && 2812 n->nr_partial > s->min_partial) { 2813 slab = get_partial_node(s, n, pc); 2814 if (slab) { 2815 /* 2816 * Don't check read_mems_allowed_retry() 2817 * here - if mems_allowed was updated in 2818 * parallel, that was a harmless race 2819 * between allocation and the cpuset 2820 * update 2821 */ 2822 return slab; 2823 } 2824 } 2825 } 2826 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2827 #endif /* CONFIG_NUMA */ 2828 return NULL; 2829 } 2830 2831 /* 2832 * Get a partial slab, lock it and return it. 2833 */ 2834 static struct slab *get_partial(struct kmem_cache *s, int node, 2835 struct partial_context *pc) 2836 { 2837 struct slab *slab; 2838 int searchnode = node; 2839 2840 if (node == NUMA_NO_NODE) 2841 searchnode = numa_mem_id(); 2842 2843 slab = get_partial_node(s, get_node(s, searchnode), pc); 2844 if (slab || node != NUMA_NO_NODE) 2845 return slab; 2846 2847 return get_any_partial(s, pc); 2848 } 2849 2850 #ifndef CONFIG_SLUB_TINY 2851 2852 #ifdef CONFIG_PREEMPTION 2853 /* 2854 * Calculate the next globally unique transaction for disambiguation 2855 * during cmpxchg. The transactions start with the cpu number and are then 2856 * incremented by CONFIG_NR_CPUS. 2857 */ 2858 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2859 #else 2860 /* 2861 * No preemption supported therefore also no need to check for 2862 * different cpus. 2863 */ 2864 #define TID_STEP 1 2865 #endif /* CONFIG_PREEMPTION */ 2866 2867 static inline unsigned long next_tid(unsigned long tid) 2868 { 2869 return tid + TID_STEP; 2870 } 2871 2872 #ifdef SLUB_DEBUG_CMPXCHG 2873 static inline unsigned int tid_to_cpu(unsigned long tid) 2874 { 2875 return tid % TID_STEP; 2876 } 2877 2878 static inline unsigned long tid_to_event(unsigned long tid) 2879 { 2880 return tid / TID_STEP; 2881 } 2882 #endif 2883 2884 static inline unsigned int init_tid(int cpu) 2885 { 2886 return cpu; 2887 } 2888 2889 static inline void note_cmpxchg_failure(const char *n, 2890 const struct kmem_cache *s, unsigned long tid) 2891 { 2892 #ifdef SLUB_DEBUG_CMPXCHG 2893 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2894 2895 pr_info("%s %s: cmpxchg redo ", n, s->name); 2896 2897 #ifdef CONFIG_PREEMPTION 2898 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2899 pr_warn("due to cpu change %d -> %d\n", 2900 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2901 else 2902 #endif 2903 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2904 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2905 tid_to_event(tid), tid_to_event(actual_tid)); 2906 else 2907 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2908 actual_tid, tid, next_tid(tid)); 2909 #endif 2910 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2911 } 2912 2913 static void init_kmem_cache_cpus(struct kmem_cache *s) 2914 { 2915 int cpu; 2916 struct kmem_cache_cpu *c; 2917 2918 for_each_possible_cpu(cpu) { 2919 c = per_cpu_ptr(s->cpu_slab, cpu); 2920 local_lock_init(&c->lock); 2921 c->tid = init_tid(cpu); 2922 } 2923 } 2924 2925 /* 2926 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, 2927 * unfreezes the slabs and puts it on the proper list. 2928 * Assumes the slab has been already safely taken away from kmem_cache_cpu 2929 * by the caller. 2930 */ 2931 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, 2932 void *freelist) 2933 { 2934 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 2935 int free_delta = 0; 2936 void *nextfree, *freelist_iter, *freelist_tail; 2937 int tail = DEACTIVATE_TO_HEAD; 2938 unsigned long flags = 0; 2939 struct slab new; 2940 struct slab old; 2941 2942 if (slab->freelist) { 2943 stat(s, DEACTIVATE_REMOTE_FREES); 2944 tail = DEACTIVATE_TO_TAIL; 2945 } 2946 2947 /* 2948 * Stage one: Count the objects on cpu's freelist as free_delta and 2949 * remember the last object in freelist_tail for later splicing. 2950 */ 2951 freelist_tail = NULL; 2952 freelist_iter = freelist; 2953 while (freelist_iter) { 2954 nextfree = get_freepointer(s, freelist_iter); 2955 2956 /* 2957 * If 'nextfree' is invalid, it is possible that the object at 2958 * 'freelist_iter' is already corrupted. So isolate all objects 2959 * starting at 'freelist_iter' by skipping them. 2960 */ 2961 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) 2962 break; 2963 2964 freelist_tail = freelist_iter; 2965 free_delta++; 2966 2967 freelist_iter = nextfree; 2968 } 2969 2970 /* 2971 * Stage two: Unfreeze the slab while splicing the per-cpu 2972 * freelist to the head of slab's freelist. 2973 */ 2974 do { 2975 old.freelist = READ_ONCE(slab->freelist); 2976 old.counters = READ_ONCE(slab->counters); 2977 VM_BUG_ON(!old.frozen); 2978 2979 /* Determine target state of the slab */ 2980 new.counters = old.counters; 2981 new.frozen = 0; 2982 if (freelist_tail) { 2983 new.inuse -= free_delta; 2984 set_freepointer(s, freelist_tail, old.freelist); 2985 new.freelist = freelist; 2986 } else { 2987 new.freelist = old.freelist; 2988 } 2989 } while (!slab_update_freelist(s, slab, 2990 old.freelist, old.counters, 2991 new.freelist, new.counters, 2992 "unfreezing slab")); 2993 2994 /* 2995 * Stage three: Manipulate the slab list based on the updated state. 2996 */ 2997 if (!new.inuse && n->nr_partial >= s->min_partial) { 2998 stat(s, DEACTIVATE_EMPTY); 2999 discard_slab(s, slab); 3000 stat(s, FREE_SLAB); 3001 } else if (new.freelist) { 3002 spin_lock_irqsave(&n->list_lock, flags); 3003 add_partial(n, slab, tail); 3004 spin_unlock_irqrestore(&n->list_lock, flags); 3005 stat(s, tail); 3006 } else { 3007 stat(s, DEACTIVATE_FULL); 3008 } 3009 } 3010 3011 #ifdef CONFIG_SLUB_CPU_PARTIAL 3012 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab) 3013 { 3014 struct kmem_cache_node *n = NULL, *n2 = NULL; 3015 struct slab *slab, *slab_to_discard = NULL; 3016 unsigned long flags = 0; 3017 3018 while (partial_slab) { 3019 slab = partial_slab; 3020 partial_slab = slab->next; 3021 3022 n2 = get_node(s, slab_nid(slab)); 3023 if (n != n2) { 3024 if (n) 3025 spin_unlock_irqrestore(&n->list_lock, flags); 3026 3027 n = n2; 3028 spin_lock_irqsave(&n->list_lock, flags); 3029 } 3030 3031 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { 3032 slab->next = slab_to_discard; 3033 slab_to_discard = slab; 3034 } else { 3035 add_partial(n, slab, DEACTIVATE_TO_TAIL); 3036 stat(s, FREE_ADD_PARTIAL); 3037 } 3038 } 3039 3040 if (n) 3041 spin_unlock_irqrestore(&n->list_lock, flags); 3042 3043 while (slab_to_discard) { 3044 slab = slab_to_discard; 3045 slab_to_discard = slab_to_discard->next; 3046 3047 stat(s, DEACTIVATE_EMPTY); 3048 discard_slab(s, slab); 3049 stat(s, FREE_SLAB); 3050 } 3051 } 3052 3053 /* 3054 * Put all the cpu partial slabs to the node partial list. 3055 */ 3056 static void put_partials(struct kmem_cache *s) 3057 { 3058 struct slab *partial_slab; 3059 unsigned long flags; 3060 3061 local_lock_irqsave(&s->cpu_slab->lock, flags); 3062 partial_slab = this_cpu_read(s->cpu_slab->partial); 3063 this_cpu_write(s->cpu_slab->partial, NULL); 3064 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3065 3066 if (partial_slab) 3067 __put_partials(s, partial_slab); 3068 } 3069 3070 static void put_partials_cpu(struct kmem_cache *s, 3071 struct kmem_cache_cpu *c) 3072 { 3073 struct slab *partial_slab; 3074 3075 partial_slab = slub_percpu_partial(c); 3076 c->partial = NULL; 3077 3078 if (partial_slab) 3079 __put_partials(s, partial_slab); 3080 } 3081 3082 /* 3083 * Put a slab into a partial slab slot if available. 3084 * 3085 * If we did not find a slot then simply move all the partials to the 3086 * per node partial list. 3087 */ 3088 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) 3089 { 3090 struct slab *oldslab; 3091 struct slab *slab_to_put = NULL; 3092 unsigned long flags; 3093 int slabs = 0; 3094 3095 local_lock_irqsave(&s->cpu_slab->lock, flags); 3096 3097 oldslab = this_cpu_read(s->cpu_slab->partial); 3098 3099 if (oldslab) { 3100 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { 3101 /* 3102 * Partial array is full. Move the existing set to the 3103 * per node partial list. Postpone the actual unfreezing 3104 * outside of the critical section. 3105 */ 3106 slab_to_put = oldslab; 3107 oldslab = NULL; 3108 } else { 3109 slabs = oldslab->slabs; 3110 } 3111 } 3112 3113 slabs++; 3114 3115 slab->slabs = slabs; 3116 slab->next = oldslab; 3117 3118 this_cpu_write(s->cpu_slab->partial, slab); 3119 3120 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3121 3122 if (slab_to_put) { 3123 __put_partials(s, slab_to_put); 3124 stat(s, CPU_PARTIAL_DRAIN); 3125 } 3126 } 3127 3128 #else /* CONFIG_SLUB_CPU_PARTIAL */ 3129 3130 static inline void put_partials(struct kmem_cache *s) { } 3131 static inline void put_partials_cpu(struct kmem_cache *s, 3132 struct kmem_cache_cpu *c) { } 3133 3134 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 3135 3136 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 3137 { 3138 unsigned long flags; 3139 struct slab *slab; 3140 void *freelist; 3141 3142 local_lock_irqsave(&s->cpu_slab->lock, flags); 3143 3144 slab = c->slab; 3145 freelist = c->freelist; 3146 3147 c->slab = NULL; 3148 c->freelist = NULL; 3149 c->tid = next_tid(c->tid); 3150 3151 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3152 3153 if (slab) { 3154 deactivate_slab(s, slab, freelist); 3155 stat(s, CPUSLAB_FLUSH); 3156 } 3157 } 3158 3159 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 3160 { 3161 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3162 void *freelist = c->freelist; 3163 struct slab *slab = c->slab; 3164 3165 c->slab = NULL; 3166 c->freelist = NULL; 3167 c->tid = next_tid(c->tid); 3168 3169 if (slab) { 3170 deactivate_slab(s, slab, freelist); 3171 stat(s, CPUSLAB_FLUSH); 3172 } 3173 3174 put_partials_cpu(s, c); 3175 } 3176 3177 struct slub_flush_work { 3178 struct work_struct work; 3179 struct kmem_cache *s; 3180 bool skip; 3181 }; 3182 3183 /* 3184 * Flush cpu slab. 3185 * 3186 * Called from CPU work handler with migration disabled. 3187 */ 3188 static void flush_cpu_slab(struct work_struct *w) 3189 { 3190 struct kmem_cache *s; 3191 struct kmem_cache_cpu *c; 3192 struct slub_flush_work *sfw; 3193 3194 sfw = container_of(w, struct slub_flush_work, work); 3195 3196 s = sfw->s; 3197 c = this_cpu_ptr(s->cpu_slab); 3198 3199 if (c->slab) 3200 flush_slab(s, c); 3201 3202 put_partials(s); 3203 } 3204 3205 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 3206 { 3207 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3208 3209 return c->slab || slub_percpu_partial(c); 3210 } 3211 3212 static DEFINE_MUTEX(flush_lock); 3213 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 3214 3215 static void flush_all_cpus_locked(struct kmem_cache *s) 3216 { 3217 struct slub_flush_work *sfw; 3218 unsigned int cpu; 3219 3220 lockdep_assert_cpus_held(); 3221 mutex_lock(&flush_lock); 3222 3223 for_each_online_cpu(cpu) { 3224 sfw = &per_cpu(slub_flush, cpu); 3225 if (!has_cpu_slab(cpu, s)) { 3226 sfw->skip = true; 3227 continue; 3228 } 3229 INIT_WORK(&sfw->work, flush_cpu_slab); 3230 sfw->skip = false; 3231 sfw->s = s; 3232 queue_work_on(cpu, flushwq, &sfw->work); 3233 } 3234 3235 for_each_online_cpu(cpu) { 3236 sfw = &per_cpu(slub_flush, cpu); 3237 if (sfw->skip) 3238 continue; 3239 flush_work(&sfw->work); 3240 } 3241 3242 mutex_unlock(&flush_lock); 3243 } 3244 3245 static void flush_all(struct kmem_cache *s) 3246 { 3247 cpus_read_lock(); 3248 flush_all_cpus_locked(s); 3249 cpus_read_unlock(); 3250 } 3251 3252 /* 3253 * Use the cpu notifier to insure that the cpu slabs are flushed when 3254 * necessary. 3255 */ 3256 static int slub_cpu_dead(unsigned int cpu) 3257 { 3258 struct kmem_cache *s; 3259 3260 mutex_lock(&slab_mutex); 3261 list_for_each_entry(s, &slab_caches, list) 3262 __flush_cpu_slab(s, cpu); 3263 mutex_unlock(&slab_mutex); 3264 return 0; 3265 } 3266 3267 #else /* CONFIG_SLUB_TINY */ 3268 static inline void flush_all_cpus_locked(struct kmem_cache *s) { } 3269 static inline void flush_all(struct kmem_cache *s) { } 3270 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { } 3271 static inline int slub_cpu_dead(unsigned int cpu) { return 0; } 3272 #endif /* CONFIG_SLUB_TINY */ 3273 3274 /* 3275 * Check if the objects in a per cpu structure fit numa 3276 * locality expectations. 3277 */ 3278 static inline int node_match(struct slab *slab, int node) 3279 { 3280 #ifdef CONFIG_NUMA 3281 if (node != NUMA_NO_NODE && slab_nid(slab) != node) 3282 return 0; 3283 #endif 3284 return 1; 3285 } 3286 3287 #ifdef CONFIG_SLUB_DEBUG 3288 static int count_free(struct slab *slab) 3289 { 3290 return slab->objects - slab->inuse; 3291 } 3292 3293 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 3294 { 3295 return atomic_long_read(&n->total_objects); 3296 } 3297 3298 /* Supports checking bulk free of a constructed freelist */ 3299 static inline bool free_debug_processing(struct kmem_cache *s, 3300 struct slab *slab, void *head, void *tail, int *bulk_cnt, 3301 unsigned long addr, depot_stack_handle_t handle) 3302 { 3303 bool checks_ok = false; 3304 void *object = head; 3305 int cnt = 0; 3306 3307 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3308 if (!check_slab(s, slab)) 3309 goto out; 3310 } 3311 3312 if (slab->inuse < *bulk_cnt) { 3313 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", 3314 slab->inuse, *bulk_cnt); 3315 goto out; 3316 } 3317 3318 next_object: 3319 3320 if (++cnt > *bulk_cnt) 3321 goto out_cnt; 3322 3323 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3324 if (!free_consistency_checks(s, slab, object, addr)) 3325 goto out; 3326 } 3327 3328 if (s->flags & SLAB_STORE_USER) 3329 set_track_update(s, object, TRACK_FREE, addr, handle); 3330 trace(s, slab, object, 0); 3331 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 3332 init_object(s, object, SLUB_RED_INACTIVE); 3333 3334 /* Reached end of constructed freelist yet? */ 3335 if (object != tail) { 3336 object = get_freepointer(s, object); 3337 goto next_object; 3338 } 3339 checks_ok = true; 3340 3341 out_cnt: 3342 if (cnt != *bulk_cnt) { 3343 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", 3344 *bulk_cnt, cnt); 3345 *bulk_cnt = cnt; 3346 } 3347 3348 out: 3349 3350 if (!checks_ok) 3351 slab_fix(s, "Object at 0x%p not freed", object); 3352 3353 return checks_ok; 3354 } 3355 #endif /* CONFIG_SLUB_DEBUG */ 3356 3357 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS) 3358 static unsigned long count_partial(struct kmem_cache_node *n, 3359 int (*get_count)(struct slab *)) 3360 { 3361 unsigned long flags; 3362 unsigned long x = 0; 3363 struct slab *slab; 3364 3365 spin_lock_irqsave(&n->list_lock, flags); 3366 list_for_each_entry(slab, &n->partial, slab_list) 3367 x += get_count(slab); 3368 spin_unlock_irqrestore(&n->list_lock, flags); 3369 return x; 3370 } 3371 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */ 3372 3373 #ifdef CONFIG_SLUB_DEBUG 3374 static noinline void 3375 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 3376 { 3377 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 3378 DEFAULT_RATELIMIT_BURST); 3379 int node; 3380 struct kmem_cache_node *n; 3381 3382 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 3383 return; 3384 3385 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 3386 nid, gfpflags, &gfpflags); 3387 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 3388 s->name, s->object_size, s->size, oo_order(s->oo), 3389 oo_order(s->min)); 3390 3391 if (oo_order(s->min) > get_order(s->object_size)) 3392 pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n", 3393 s->name); 3394 3395 for_each_kmem_cache_node(s, node, n) { 3396 unsigned long nr_slabs; 3397 unsigned long nr_objs; 3398 unsigned long nr_free; 3399 3400 nr_free = count_partial(n, count_free); 3401 nr_slabs = node_nr_slabs(n); 3402 nr_objs = node_nr_objs(n); 3403 3404 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 3405 node, nr_slabs, nr_objs, nr_free); 3406 } 3407 } 3408 #else /* CONFIG_SLUB_DEBUG */ 3409 static inline void 3410 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } 3411 #endif 3412 3413 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) 3414 { 3415 if (unlikely(slab_test_pfmemalloc(slab))) 3416 return gfp_pfmemalloc_allowed(gfpflags); 3417 3418 return true; 3419 } 3420 3421 #ifndef CONFIG_SLUB_TINY 3422 static inline bool 3423 __update_cpu_freelist_fast(struct kmem_cache *s, 3424 void *freelist_old, void *freelist_new, 3425 unsigned long tid) 3426 { 3427 freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; 3428 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; 3429 3430 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, 3431 &old.full, new.full); 3432 } 3433 3434 /* 3435 * Check the slab->freelist and either transfer the freelist to the 3436 * per cpu freelist or deactivate the slab. 3437 * 3438 * The slab is still frozen if the return value is not NULL. 3439 * 3440 * If this function returns NULL then the slab has been unfrozen. 3441 */ 3442 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) 3443 { 3444 struct slab new; 3445 unsigned long counters; 3446 void *freelist; 3447 3448 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3449 3450 do { 3451 freelist = slab->freelist; 3452 counters = slab->counters; 3453 3454 new.counters = counters; 3455 3456 new.inuse = slab->objects; 3457 new.frozen = freelist != NULL; 3458 3459 } while (!__slab_update_freelist(s, slab, 3460 freelist, counters, 3461 NULL, new.counters, 3462 "get_freelist")); 3463 3464 return freelist; 3465 } 3466 3467 /* 3468 * Freeze the partial slab and return the pointer to the freelist. 3469 */ 3470 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab) 3471 { 3472 struct slab new; 3473 unsigned long counters; 3474 void *freelist; 3475 3476 do { 3477 freelist = slab->freelist; 3478 counters = slab->counters; 3479 3480 new.counters = counters; 3481 VM_BUG_ON(new.frozen); 3482 3483 new.inuse = slab->objects; 3484 new.frozen = 1; 3485 3486 } while (!slab_update_freelist(s, slab, 3487 freelist, counters, 3488 NULL, new.counters, 3489 "freeze_slab")); 3490 3491 return freelist; 3492 } 3493 3494 /* 3495 * Slow path. The lockless freelist is empty or we need to perform 3496 * debugging duties. 3497 * 3498 * Processing is still very fast if new objects have been freed to the 3499 * regular freelist. In that case we simply take over the regular freelist 3500 * as the lockless freelist and zap the regular freelist. 3501 * 3502 * If that is not working then we fall back to the partial lists. We take the 3503 * first element of the freelist as the object to allocate now and move the 3504 * rest of the freelist to the lockless freelist. 3505 * 3506 * And if we were unable to get a new slab from the partial slab lists then 3507 * we need to allocate a new slab. This is the slowest path since it involves 3508 * a call to the page allocator and the setup of a new slab. 3509 * 3510 * Version of __slab_alloc to use when we know that preemption is 3511 * already disabled (which is the case for bulk allocation). 3512 */ 3513 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3514 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3515 { 3516 void *freelist; 3517 struct slab *slab; 3518 unsigned long flags; 3519 struct partial_context pc; 3520 3521 stat(s, ALLOC_SLOWPATH); 3522 3523 reread_slab: 3524 3525 slab = READ_ONCE(c->slab); 3526 if (!slab) { 3527 /* 3528 * if the node is not online or has no normal memory, just 3529 * ignore the node constraint 3530 */ 3531 if (unlikely(node != NUMA_NO_NODE && 3532 !node_isset(node, slab_nodes))) 3533 node = NUMA_NO_NODE; 3534 goto new_slab; 3535 } 3536 3537 if (unlikely(!node_match(slab, node))) { 3538 /* 3539 * same as above but node_match() being false already 3540 * implies node != NUMA_NO_NODE 3541 */ 3542 if (!node_isset(node, slab_nodes)) { 3543 node = NUMA_NO_NODE; 3544 } else { 3545 stat(s, ALLOC_NODE_MISMATCH); 3546 goto deactivate_slab; 3547 } 3548 } 3549 3550 /* 3551 * By rights, we should be searching for a slab page that was 3552 * PFMEMALLOC but right now, we are losing the pfmemalloc 3553 * information when the page leaves the per-cpu allocator 3554 */ 3555 if (unlikely(!pfmemalloc_match(slab, gfpflags))) 3556 goto deactivate_slab; 3557 3558 /* must check again c->slab in case we got preempted and it changed */ 3559 local_lock_irqsave(&s->cpu_slab->lock, flags); 3560 if (unlikely(slab != c->slab)) { 3561 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3562 goto reread_slab; 3563 } 3564 freelist = c->freelist; 3565 if (freelist) 3566 goto load_freelist; 3567 3568 freelist = get_freelist(s, slab); 3569 3570 if (!freelist) { 3571 c->slab = NULL; 3572 c->tid = next_tid(c->tid); 3573 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3574 stat(s, DEACTIVATE_BYPASS); 3575 goto new_slab; 3576 } 3577 3578 stat(s, ALLOC_REFILL); 3579 3580 load_freelist: 3581 3582 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3583 3584 /* 3585 * freelist is pointing to the list of objects to be used. 3586 * slab is pointing to the slab from which the objects are obtained. 3587 * That slab must be frozen for per cpu allocations to work. 3588 */ 3589 VM_BUG_ON(!c->slab->frozen); 3590 c->freelist = get_freepointer(s, freelist); 3591 c->tid = next_tid(c->tid); 3592 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3593 return freelist; 3594 3595 deactivate_slab: 3596 3597 local_lock_irqsave(&s->cpu_slab->lock, flags); 3598 if (slab != c->slab) { 3599 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3600 goto reread_slab; 3601 } 3602 freelist = c->freelist; 3603 c->slab = NULL; 3604 c->freelist = NULL; 3605 c->tid = next_tid(c->tid); 3606 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3607 deactivate_slab(s, slab, freelist); 3608 3609 new_slab: 3610 3611 #ifdef CONFIG_SLUB_CPU_PARTIAL 3612 while (slub_percpu_partial(c)) { 3613 local_lock_irqsave(&s->cpu_slab->lock, flags); 3614 if (unlikely(c->slab)) { 3615 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3616 goto reread_slab; 3617 } 3618 if (unlikely(!slub_percpu_partial(c))) { 3619 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3620 /* we were preempted and partial list got empty */ 3621 goto new_objects; 3622 } 3623 3624 slab = slub_percpu_partial(c); 3625 slub_set_percpu_partial(c, slab); 3626 3627 if (likely(node_match(slab, node) && 3628 pfmemalloc_match(slab, gfpflags))) { 3629 c->slab = slab; 3630 freelist = get_freelist(s, slab); 3631 VM_BUG_ON(!freelist); 3632 stat(s, CPU_PARTIAL_ALLOC); 3633 goto load_freelist; 3634 } 3635 3636 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3637 3638 slab->next = NULL; 3639 __put_partials(s, slab); 3640 } 3641 #endif 3642 3643 new_objects: 3644 3645 pc.flags = gfpflags; 3646 pc.orig_size = orig_size; 3647 slab = get_partial(s, node, &pc); 3648 if (slab) { 3649 if (kmem_cache_debug(s)) { 3650 freelist = pc.object; 3651 /* 3652 * For debug caches here we had to go through 3653 * alloc_single_from_partial() so just store the 3654 * tracking info and return the object. 3655 */ 3656 if (s->flags & SLAB_STORE_USER) 3657 set_track(s, freelist, TRACK_ALLOC, addr); 3658 3659 return freelist; 3660 } 3661 3662 freelist = freeze_slab(s, slab); 3663 goto retry_load_slab; 3664 } 3665 3666 slub_put_cpu_ptr(s->cpu_slab); 3667 slab = new_slab(s, gfpflags, node); 3668 c = slub_get_cpu_ptr(s->cpu_slab); 3669 3670 if (unlikely(!slab)) { 3671 slab_out_of_memory(s, gfpflags, node); 3672 return NULL; 3673 } 3674 3675 stat(s, ALLOC_SLAB); 3676 3677 if (kmem_cache_debug(s)) { 3678 freelist = alloc_single_from_new_slab(s, slab, orig_size); 3679 3680 if (unlikely(!freelist)) 3681 goto new_objects; 3682 3683 if (s->flags & SLAB_STORE_USER) 3684 set_track(s, freelist, TRACK_ALLOC, addr); 3685 3686 return freelist; 3687 } 3688 3689 /* 3690 * No other reference to the slab yet so we can 3691 * muck around with it freely without cmpxchg 3692 */ 3693 freelist = slab->freelist; 3694 slab->freelist = NULL; 3695 slab->inuse = slab->objects; 3696 slab->frozen = 1; 3697 3698 inc_slabs_node(s, slab_nid(slab), slab->objects); 3699 3700 if (unlikely(!pfmemalloc_match(slab, gfpflags))) { 3701 /* 3702 * For !pfmemalloc_match() case we don't load freelist so that 3703 * we don't make further mismatched allocations easier. 3704 */ 3705 deactivate_slab(s, slab, get_freepointer(s, freelist)); 3706 return freelist; 3707 } 3708 3709 retry_load_slab: 3710 3711 local_lock_irqsave(&s->cpu_slab->lock, flags); 3712 if (unlikely(c->slab)) { 3713 void *flush_freelist = c->freelist; 3714 struct slab *flush_slab = c->slab; 3715 3716 c->slab = NULL; 3717 c->freelist = NULL; 3718 c->tid = next_tid(c->tid); 3719 3720 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3721 3722 deactivate_slab(s, flush_slab, flush_freelist); 3723 3724 stat(s, CPUSLAB_FLUSH); 3725 3726 goto retry_load_slab; 3727 } 3728 c->slab = slab; 3729 3730 goto load_freelist; 3731 } 3732 3733 /* 3734 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 3735 * disabled. Compensates for possible cpu changes by refetching the per cpu area 3736 * pointer. 3737 */ 3738 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3739 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3740 { 3741 void *p; 3742 3743 #ifdef CONFIG_PREEMPT_COUNT 3744 /* 3745 * We may have been preempted and rescheduled on a different 3746 * cpu before disabling preemption. Need to reload cpu area 3747 * pointer. 3748 */ 3749 c = slub_get_cpu_ptr(s->cpu_slab); 3750 #endif 3751 3752 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); 3753 #ifdef CONFIG_PREEMPT_COUNT 3754 slub_put_cpu_ptr(s->cpu_slab); 3755 #endif 3756 return p; 3757 } 3758 3759 static __always_inline void *__slab_alloc_node(struct kmem_cache *s, 3760 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3761 { 3762 struct kmem_cache_cpu *c; 3763 struct slab *slab; 3764 unsigned long tid; 3765 void *object; 3766 3767 redo: 3768 /* 3769 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 3770 * enabled. We may switch back and forth between cpus while 3771 * reading from one cpu area. That does not matter as long 3772 * as we end up on the original cpu again when doing the cmpxchg. 3773 * 3774 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 3775 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 3776 * the tid. If we are preempted and switched to another cpu between the 3777 * two reads, it's OK as the two are still associated with the same cpu 3778 * and cmpxchg later will validate the cpu. 3779 */ 3780 c = raw_cpu_ptr(s->cpu_slab); 3781 tid = READ_ONCE(c->tid); 3782 3783 /* 3784 * Irqless object alloc/free algorithm used here depends on sequence 3785 * of fetching cpu_slab's data. tid should be fetched before anything 3786 * on c to guarantee that object and slab associated with previous tid 3787 * won't be used with current tid. If we fetch tid first, object and 3788 * slab could be one associated with next tid and our alloc/free 3789 * request will be failed. In this case, we will retry. So, no problem. 3790 */ 3791 barrier(); 3792 3793 /* 3794 * The transaction ids are globally unique per cpu and per operation on 3795 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 3796 * occurs on the right processor and that there was no operation on the 3797 * linked list in between. 3798 */ 3799 3800 object = c->freelist; 3801 slab = c->slab; 3802 3803 if (!USE_LOCKLESS_FAST_PATH() || 3804 unlikely(!object || !slab || !node_match(slab, node))) { 3805 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); 3806 } else { 3807 void *next_object = get_freepointer_safe(s, object); 3808 3809 /* 3810 * The cmpxchg will only match if there was no additional 3811 * operation and if we are on the right processor. 3812 * 3813 * The cmpxchg does the following atomically (without lock 3814 * semantics!) 3815 * 1. Relocate first pointer to the current per cpu area. 3816 * 2. Verify that tid and freelist have not been changed 3817 * 3. If they were not changed replace tid and freelist 3818 * 3819 * Since this is without lock semantics the protection is only 3820 * against code executing on this cpu *not* from access by 3821 * other cpus. 3822 */ 3823 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { 3824 note_cmpxchg_failure("slab_alloc", s, tid); 3825 goto redo; 3826 } 3827 prefetch_freepointer(s, next_object); 3828 stat(s, ALLOC_FASTPATH); 3829 } 3830 3831 return object; 3832 } 3833 #else /* CONFIG_SLUB_TINY */ 3834 static void *__slab_alloc_node(struct kmem_cache *s, 3835 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3836 { 3837 struct partial_context pc; 3838 struct slab *slab; 3839 void *object; 3840 3841 pc.flags = gfpflags; 3842 pc.orig_size = orig_size; 3843 slab = get_partial(s, node, &pc); 3844 3845 if (slab) 3846 return pc.object; 3847 3848 slab = new_slab(s, gfpflags, node); 3849 if (unlikely(!slab)) { 3850 slab_out_of_memory(s, gfpflags, node); 3851 return NULL; 3852 } 3853 3854 object = alloc_single_from_new_slab(s, slab, orig_size); 3855 3856 return object; 3857 } 3858 #endif /* CONFIG_SLUB_TINY */ 3859 3860 /* 3861 * If the object has been wiped upon free, make sure it's fully initialized by 3862 * zeroing out freelist pointer. 3863 */ 3864 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 3865 void *obj) 3866 { 3867 if (unlikely(slab_want_init_on_free(s)) && obj) 3868 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 3869 0, sizeof(void *)); 3870 } 3871 3872 noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags) 3873 { 3874 if (__should_failslab(s, gfpflags)) 3875 return -ENOMEM; 3876 return 0; 3877 } 3878 ALLOW_ERROR_INJECTION(should_failslab, ERRNO); 3879 3880 static __fastpath_inline 3881 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 3882 struct list_lru *lru, 3883 struct obj_cgroup **objcgp, 3884 size_t size, gfp_t flags) 3885 { 3886 flags &= gfp_allowed_mask; 3887 3888 might_alloc(flags); 3889 3890 if (unlikely(should_failslab(s, flags))) 3891 return NULL; 3892 3893 if (unlikely(!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))) 3894 return NULL; 3895 3896 return s; 3897 } 3898 3899 static __fastpath_inline 3900 void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, 3901 gfp_t flags, size_t size, void **p, bool init, 3902 unsigned int orig_size) 3903 { 3904 unsigned int zero_size = s->object_size; 3905 struct slabobj_ext *obj_exts; 3906 bool kasan_init = init; 3907 size_t i; 3908 gfp_t init_flags = flags & gfp_allowed_mask; 3909 3910 /* 3911 * For kmalloc object, the allocated memory size(object_size) is likely 3912 * larger than the requested size(orig_size). If redzone check is 3913 * enabled for the extra space, don't zero it, as it will be redzoned 3914 * soon. The redzone operation for this extra space could be seen as a 3915 * replacement of current poisoning under certain debug option, and 3916 * won't break other sanity checks. 3917 */ 3918 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 3919 (s->flags & SLAB_KMALLOC)) 3920 zero_size = orig_size; 3921 3922 /* 3923 * When slab_debug is enabled, avoid memory initialization integrated 3924 * into KASAN and instead zero out the memory via the memset below with 3925 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and 3926 * cause false-positive reports. This does not lead to a performance 3927 * penalty on production builds, as slab_debug is not intended to be 3928 * enabled there. 3929 */ 3930 if (__slub_debug_enabled()) 3931 kasan_init = false; 3932 3933 /* 3934 * As memory initialization might be integrated into KASAN, 3935 * kasan_slab_alloc and initialization memset must be 3936 * kept together to avoid discrepancies in behavior. 3937 * 3938 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 3939 */ 3940 for (i = 0; i < size; i++) { 3941 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init); 3942 if (p[i] && init && (!kasan_init || 3943 !kasan_has_integrated_init())) 3944 memset(p[i], 0, zero_size); 3945 kmemleak_alloc_recursive(p[i], s->object_size, 1, 3946 s->flags, init_flags); 3947 kmsan_slab_alloc(s, p[i], init_flags); 3948 if (need_slab_obj_ext()) { 3949 obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]); 3950 #ifdef CONFIG_MEM_ALLOC_PROFILING 3951 /* 3952 * Currently obj_exts is used only for allocation profiling. 3953 * If other users appear then mem_alloc_profiling_enabled() 3954 * check should be added before alloc_tag_add(). 3955 */ 3956 if (likely(obj_exts)) 3957 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); 3958 #endif 3959 } 3960 } 3961 3962 memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 3963 } 3964 3965 /* 3966 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 3967 * have the fastpath folded into their functions. So no function call 3968 * overhead for requests that can be satisfied on the fastpath. 3969 * 3970 * The fastpath works by first checking if the lockless freelist can be used. 3971 * If not then __slab_alloc is called for slow processing. 3972 * 3973 * Otherwise we can simply pick the next object from the lockless free list. 3974 */ 3975 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, 3976 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3977 { 3978 void *object; 3979 struct obj_cgroup *objcg = NULL; 3980 bool init = false; 3981 3982 s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags); 3983 if (unlikely(!s)) 3984 return NULL; 3985 3986 object = kfence_alloc(s, orig_size, gfpflags); 3987 if (unlikely(object)) 3988 goto out; 3989 3990 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); 3991 3992 maybe_wipe_obj_freeptr(s, object); 3993 init = slab_want_init_on_alloc(gfpflags, s); 3994 3995 out: 3996 /* 3997 * When init equals 'true', like for kzalloc() family, only 3998 * @orig_size bytes might be zeroed instead of s->object_size 3999 */ 4000 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size); 4001 4002 return object; 4003 } 4004 4005 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 4006 { 4007 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, 4008 s->object_size); 4009 4010 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 4011 4012 return ret; 4013 } 4014 EXPORT_SYMBOL(kmem_cache_alloc); 4015 4016 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, 4017 gfp_t gfpflags) 4018 { 4019 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, 4020 s->object_size); 4021 4022 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 4023 4024 return ret; 4025 } 4026 EXPORT_SYMBOL(kmem_cache_alloc_lru); 4027 4028 /** 4029 * kmem_cache_alloc_node - Allocate an object on the specified node 4030 * @s: The cache to allocate from. 4031 * @gfpflags: See kmalloc(). 4032 * @node: node number of the target node. 4033 * 4034 * Identical to kmem_cache_alloc but it will allocate memory on the given 4035 * node, which can improve the performance for cpu bound structures. 4036 * 4037 * Fallback to other node is possible if __GFP_THISNODE is not set. 4038 * 4039 * Return: pointer to the new object or %NULL in case of error 4040 */ 4041 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 4042 { 4043 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); 4044 4045 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); 4046 4047 return ret; 4048 } 4049 EXPORT_SYMBOL(kmem_cache_alloc_node); 4050 4051 /* 4052 * To avoid unnecessary overhead, we pass through large allocation requests 4053 * directly to the page allocator. We use __GFP_COMP, because we will need to 4054 * know the allocation order to free the pages properly in kfree. 4055 */ 4056 static void *__kmalloc_large_node(size_t size, gfp_t flags, int node) 4057 { 4058 struct folio *folio; 4059 void *ptr = NULL; 4060 unsigned int order = get_order(size); 4061 4062 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 4063 flags = kmalloc_fix_flags(flags); 4064 4065 flags |= __GFP_COMP; 4066 folio = (struct folio *)alloc_pages_node(node, flags, order); 4067 if (folio) { 4068 ptr = folio_address(folio); 4069 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4070 PAGE_SIZE << order); 4071 } 4072 4073 ptr = kasan_kmalloc_large(ptr, size, flags); 4074 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 4075 kmemleak_alloc(ptr, size, 1, flags); 4076 kmsan_kmalloc_large(ptr, size, flags); 4077 4078 return ptr; 4079 } 4080 4081 void *kmalloc_large(size_t size, gfp_t flags) 4082 { 4083 void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE); 4084 4085 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 4086 flags, NUMA_NO_NODE); 4087 return ret; 4088 } 4089 EXPORT_SYMBOL(kmalloc_large); 4090 4091 void *kmalloc_large_node(size_t size, gfp_t flags, int node) 4092 { 4093 void *ret = __kmalloc_large_node(size, flags, node); 4094 4095 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 4096 flags, node); 4097 return ret; 4098 } 4099 EXPORT_SYMBOL(kmalloc_large_node); 4100 4101 static __always_inline 4102 void *__do_kmalloc_node(size_t size, gfp_t flags, int node, 4103 unsigned long caller) 4104 { 4105 struct kmem_cache *s; 4106 void *ret; 4107 4108 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4109 ret = __kmalloc_large_node(size, flags, node); 4110 trace_kmalloc(caller, ret, size, 4111 PAGE_SIZE << get_order(size), flags, node); 4112 return ret; 4113 } 4114 4115 if (unlikely(!size)) 4116 return ZERO_SIZE_PTR; 4117 4118 s = kmalloc_slab(size, flags, caller); 4119 4120 ret = slab_alloc_node(s, NULL, flags, node, caller, size); 4121 ret = kasan_kmalloc(s, ret, size, flags); 4122 trace_kmalloc(caller, ret, size, s->size, flags, node); 4123 return ret; 4124 } 4125 4126 void *__kmalloc_node(size_t size, gfp_t flags, int node) 4127 { 4128 return __do_kmalloc_node(size, flags, node, _RET_IP_); 4129 } 4130 EXPORT_SYMBOL(__kmalloc_node); 4131 4132 void *__kmalloc(size_t size, gfp_t flags) 4133 { 4134 return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_); 4135 } 4136 EXPORT_SYMBOL(__kmalloc); 4137 4138 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 4139 int node, unsigned long caller) 4140 { 4141 return __do_kmalloc_node(size, flags, node, caller); 4142 } 4143 EXPORT_SYMBOL(__kmalloc_node_track_caller); 4144 4145 void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 4146 { 4147 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, 4148 _RET_IP_, size); 4149 4150 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); 4151 4152 ret = kasan_kmalloc(s, ret, size, gfpflags); 4153 return ret; 4154 } 4155 EXPORT_SYMBOL(kmalloc_trace); 4156 4157 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, 4158 int node, size_t size) 4159 { 4160 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); 4161 4162 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); 4163 4164 ret = kasan_kmalloc(s, ret, size, gfpflags); 4165 return ret; 4166 } 4167 EXPORT_SYMBOL(kmalloc_node_trace); 4168 4169 static noinline void free_to_partial_list( 4170 struct kmem_cache *s, struct slab *slab, 4171 void *head, void *tail, int bulk_cnt, 4172 unsigned long addr) 4173 { 4174 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 4175 struct slab *slab_free = NULL; 4176 int cnt = bulk_cnt; 4177 unsigned long flags; 4178 depot_stack_handle_t handle = 0; 4179 4180 if (s->flags & SLAB_STORE_USER) 4181 handle = set_track_prepare(); 4182 4183 spin_lock_irqsave(&n->list_lock, flags); 4184 4185 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { 4186 void *prior = slab->freelist; 4187 4188 /* Perform the actual freeing while we still hold the locks */ 4189 slab->inuse -= cnt; 4190 set_freepointer(s, tail, prior); 4191 slab->freelist = head; 4192 4193 /* 4194 * If the slab is empty, and node's partial list is full, 4195 * it should be discarded anyway no matter it's on full or 4196 * partial list. 4197 */ 4198 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) 4199 slab_free = slab; 4200 4201 if (!prior) { 4202 /* was on full list */ 4203 remove_full(s, n, slab); 4204 if (!slab_free) { 4205 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4206 stat(s, FREE_ADD_PARTIAL); 4207 } 4208 } else if (slab_free) { 4209 remove_partial(n, slab); 4210 stat(s, FREE_REMOVE_PARTIAL); 4211 } 4212 } 4213 4214 if (slab_free) { 4215 /* 4216 * Update the counters while still holding n->list_lock to 4217 * prevent spurious validation warnings 4218 */ 4219 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); 4220 } 4221 4222 spin_unlock_irqrestore(&n->list_lock, flags); 4223 4224 if (slab_free) { 4225 stat(s, FREE_SLAB); 4226 free_slab(s, slab_free); 4227 } 4228 } 4229 4230 /* 4231 * Slow path handling. This may still be called frequently since objects 4232 * have a longer lifetime than the cpu slabs in most processing loads. 4233 * 4234 * So we still attempt to reduce cache line usage. Just take the slab 4235 * lock and free the item. If there is no additional partial slab 4236 * handling required then we can return immediately. 4237 */ 4238 static void __slab_free(struct kmem_cache *s, struct slab *slab, 4239 void *head, void *tail, int cnt, 4240 unsigned long addr) 4241 4242 { 4243 void *prior; 4244 int was_frozen; 4245 struct slab new; 4246 unsigned long counters; 4247 struct kmem_cache_node *n = NULL; 4248 unsigned long flags; 4249 bool on_node_partial; 4250 4251 stat(s, FREE_SLOWPATH); 4252 4253 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 4254 free_to_partial_list(s, slab, head, tail, cnt, addr); 4255 return; 4256 } 4257 4258 do { 4259 if (unlikely(n)) { 4260 spin_unlock_irqrestore(&n->list_lock, flags); 4261 n = NULL; 4262 } 4263 prior = slab->freelist; 4264 counters = slab->counters; 4265 set_freepointer(s, tail, prior); 4266 new.counters = counters; 4267 was_frozen = new.frozen; 4268 new.inuse -= cnt; 4269 if ((!new.inuse || !prior) && !was_frozen) { 4270 /* Needs to be taken off a list */ 4271 if (!kmem_cache_has_cpu_partial(s) || prior) { 4272 4273 n = get_node(s, slab_nid(slab)); 4274 /* 4275 * Speculatively acquire the list_lock. 4276 * If the cmpxchg does not succeed then we may 4277 * drop the list_lock without any processing. 4278 * 4279 * Otherwise the list_lock will synchronize with 4280 * other processors updating the list of slabs. 4281 */ 4282 spin_lock_irqsave(&n->list_lock, flags); 4283 4284 on_node_partial = slab_test_node_partial(slab); 4285 } 4286 } 4287 4288 } while (!slab_update_freelist(s, slab, 4289 prior, counters, 4290 head, new.counters, 4291 "__slab_free")); 4292 4293 if (likely(!n)) { 4294 4295 if (likely(was_frozen)) { 4296 /* 4297 * The list lock was not taken therefore no list 4298 * activity can be necessary. 4299 */ 4300 stat(s, FREE_FROZEN); 4301 } else if (kmem_cache_has_cpu_partial(s) && !prior) { 4302 /* 4303 * If we started with a full slab then put it onto the 4304 * per cpu partial list. 4305 */ 4306 put_cpu_partial(s, slab, 1); 4307 stat(s, CPU_PARTIAL_FREE); 4308 } 4309 4310 return; 4311 } 4312 4313 /* 4314 * This slab was partially empty but not on the per-node partial list, 4315 * in which case we shouldn't manipulate its list, just return. 4316 */ 4317 if (prior && !on_node_partial) { 4318 spin_unlock_irqrestore(&n->list_lock, flags); 4319 return; 4320 } 4321 4322 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 4323 goto slab_empty; 4324 4325 /* 4326 * Objects left in the slab. If it was not on the partial list before 4327 * then add it. 4328 */ 4329 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 4330 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4331 stat(s, FREE_ADD_PARTIAL); 4332 } 4333 spin_unlock_irqrestore(&n->list_lock, flags); 4334 return; 4335 4336 slab_empty: 4337 if (prior) { 4338 /* 4339 * Slab on the partial list. 4340 */ 4341 remove_partial(n, slab); 4342 stat(s, FREE_REMOVE_PARTIAL); 4343 } 4344 4345 spin_unlock_irqrestore(&n->list_lock, flags); 4346 stat(s, FREE_SLAB); 4347 discard_slab(s, slab); 4348 } 4349 4350 #ifndef CONFIG_SLUB_TINY 4351 /* 4352 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 4353 * can perform fastpath freeing without additional function calls. 4354 * 4355 * The fastpath is only possible if we are freeing to the current cpu slab 4356 * of this processor. This typically the case if we have just allocated 4357 * the item before. 4358 * 4359 * If fastpath is not possible then fall back to __slab_free where we deal 4360 * with all sorts of special processing. 4361 * 4362 * Bulk free of a freelist with several objects (all pointing to the 4363 * same slab) possible by specifying head and tail ptr, plus objects 4364 * count (cnt). Bulk free indicated by tail pointer being set. 4365 */ 4366 static __always_inline void do_slab_free(struct kmem_cache *s, 4367 struct slab *slab, void *head, void *tail, 4368 int cnt, unsigned long addr) 4369 { 4370 struct kmem_cache_cpu *c; 4371 unsigned long tid; 4372 void **freelist; 4373 4374 redo: 4375 /* 4376 * Determine the currently cpus per cpu slab. 4377 * The cpu may change afterward. However that does not matter since 4378 * data is retrieved via this pointer. If we are on the same cpu 4379 * during the cmpxchg then the free will succeed. 4380 */ 4381 c = raw_cpu_ptr(s->cpu_slab); 4382 tid = READ_ONCE(c->tid); 4383 4384 /* Same with comment on barrier() in slab_alloc_node() */ 4385 barrier(); 4386 4387 if (unlikely(slab != c->slab)) { 4388 __slab_free(s, slab, head, tail, cnt, addr); 4389 return; 4390 } 4391 4392 if (USE_LOCKLESS_FAST_PATH()) { 4393 freelist = READ_ONCE(c->freelist); 4394 4395 set_freepointer(s, tail, freelist); 4396 4397 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { 4398 note_cmpxchg_failure("slab_free", s, tid); 4399 goto redo; 4400 } 4401 } else { 4402 /* Update the free list under the local lock */ 4403 local_lock(&s->cpu_slab->lock); 4404 c = this_cpu_ptr(s->cpu_slab); 4405 if (unlikely(slab != c->slab)) { 4406 local_unlock(&s->cpu_slab->lock); 4407 goto redo; 4408 } 4409 tid = c->tid; 4410 freelist = c->freelist; 4411 4412 set_freepointer(s, tail, freelist); 4413 c->freelist = head; 4414 c->tid = next_tid(tid); 4415 4416 local_unlock(&s->cpu_slab->lock); 4417 } 4418 stat_add(s, FREE_FASTPATH, cnt); 4419 } 4420 #else /* CONFIG_SLUB_TINY */ 4421 static void do_slab_free(struct kmem_cache *s, 4422 struct slab *slab, void *head, void *tail, 4423 int cnt, unsigned long addr) 4424 { 4425 __slab_free(s, slab, head, tail, cnt, addr); 4426 } 4427 #endif /* CONFIG_SLUB_TINY */ 4428 4429 static __fastpath_inline 4430 void slab_free(struct kmem_cache *s, struct slab *slab, void *object, 4431 unsigned long addr) 4432 { 4433 memcg_slab_free_hook(s, slab, &object, 1); 4434 alloc_tagging_slab_free_hook(s, slab, &object, 1); 4435 4436 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s)))) 4437 do_slab_free(s, slab, object, object, 1, addr); 4438 } 4439 4440 static __fastpath_inline 4441 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, 4442 void *tail, void **p, int cnt, unsigned long addr) 4443 { 4444 memcg_slab_free_hook(s, slab, p, cnt); 4445 alloc_tagging_slab_free_hook(s, slab, p, cnt); 4446 /* 4447 * With KASAN enabled slab_free_freelist_hook modifies the freelist 4448 * to remove objects, whose reuse must be delayed. 4449 */ 4450 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) 4451 do_slab_free(s, slab, head, tail, cnt, addr); 4452 } 4453 4454 #ifdef CONFIG_KASAN_GENERIC 4455 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 4456 { 4457 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr); 4458 } 4459 #endif 4460 4461 static inline struct kmem_cache *virt_to_cache(const void *obj) 4462 { 4463 struct slab *slab; 4464 4465 slab = virt_to_slab(obj); 4466 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__)) 4467 return NULL; 4468 return slab->slab_cache; 4469 } 4470 4471 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 4472 { 4473 struct kmem_cache *cachep; 4474 4475 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 4476 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 4477 return s; 4478 4479 cachep = virt_to_cache(x); 4480 if (WARN(cachep && cachep != s, 4481 "%s: Wrong slab cache. %s but object is from %s\n", 4482 __func__, s->name, cachep->name)) 4483 print_tracking(cachep, x); 4484 return cachep; 4485 } 4486 4487 /** 4488 * kmem_cache_free - Deallocate an object 4489 * @s: The cache the allocation was from. 4490 * @x: The previously allocated object. 4491 * 4492 * Free an object which was previously allocated from this 4493 * cache. 4494 */ 4495 void kmem_cache_free(struct kmem_cache *s, void *x) 4496 { 4497 s = cache_from_obj(s, x); 4498 if (!s) 4499 return; 4500 trace_kmem_cache_free(_RET_IP_, x, s); 4501 slab_free(s, virt_to_slab(x), x, _RET_IP_); 4502 } 4503 EXPORT_SYMBOL(kmem_cache_free); 4504 4505 static void free_large_kmalloc(struct folio *folio, void *object) 4506 { 4507 unsigned int order = folio_order(folio); 4508 4509 if (WARN_ON_ONCE(order == 0)) 4510 pr_warn_once("object pointer: 0x%p\n", object); 4511 4512 kmemleak_free(object); 4513 kasan_kfree_large(object); 4514 kmsan_kfree_large(object); 4515 4516 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4517 -(PAGE_SIZE << order)); 4518 folio_put(folio); 4519 } 4520 4521 /** 4522 * kfree - free previously allocated memory 4523 * @object: pointer returned by kmalloc() or kmem_cache_alloc() 4524 * 4525 * If @object is NULL, no operation is performed. 4526 */ 4527 void kfree(const void *object) 4528 { 4529 struct folio *folio; 4530 struct slab *slab; 4531 struct kmem_cache *s; 4532 void *x = (void *)object; 4533 4534 trace_kfree(_RET_IP_, object); 4535 4536 if (unlikely(ZERO_OR_NULL_PTR(object))) 4537 return; 4538 4539 folio = virt_to_folio(object); 4540 if (unlikely(!folio_test_slab(folio))) { 4541 free_large_kmalloc(folio, (void *)object); 4542 return; 4543 } 4544 4545 slab = folio_slab(folio); 4546 s = slab->slab_cache; 4547 slab_free(s, slab, x, _RET_IP_); 4548 } 4549 EXPORT_SYMBOL(kfree); 4550 4551 struct detached_freelist { 4552 struct slab *slab; 4553 void *tail; 4554 void *freelist; 4555 int cnt; 4556 struct kmem_cache *s; 4557 }; 4558 4559 /* 4560 * This function progressively scans the array with free objects (with 4561 * a limited look ahead) and extract objects belonging to the same 4562 * slab. It builds a detached freelist directly within the given 4563 * slab/objects. This can happen without any need for 4564 * synchronization, because the objects are owned by running process. 4565 * The freelist is build up as a single linked list in the objects. 4566 * The idea is, that this detached freelist can then be bulk 4567 * transferred to the real freelist(s), but only requiring a single 4568 * synchronization primitive. Look ahead in the array is limited due 4569 * to performance reasons. 4570 */ 4571 static inline 4572 int build_detached_freelist(struct kmem_cache *s, size_t size, 4573 void **p, struct detached_freelist *df) 4574 { 4575 int lookahead = 3; 4576 void *object; 4577 struct folio *folio; 4578 size_t same; 4579 4580 object = p[--size]; 4581 folio = virt_to_folio(object); 4582 if (!s) { 4583 /* Handle kalloc'ed objects */ 4584 if (unlikely(!folio_test_slab(folio))) { 4585 free_large_kmalloc(folio, object); 4586 df->slab = NULL; 4587 return size; 4588 } 4589 /* Derive kmem_cache from object */ 4590 df->slab = folio_slab(folio); 4591 df->s = df->slab->slab_cache; 4592 } else { 4593 df->slab = folio_slab(folio); 4594 df->s = cache_from_obj(s, object); /* Support for memcg */ 4595 } 4596 4597 /* Start new detached freelist */ 4598 df->tail = object; 4599 df->freelist = object; 4600 df->cnt = 1; 4601 4602 if (is_kfence_address(object)) 4603 return size; 4604 4605 set_freepointer(df->s, object, NULL); 4606 4607 same = size; 4608 while (size) { 4609 object = p[--size]; 4610 /* df->slab is always set at this point */ 4611 if (df->slab == virt_to_slab(object)) { 4612 /* Opportunity build freelist */ 4613 set_freepointer(df->s, object, df->freelist); 4614 df->freelist = object; 4615 df->cnt++; 4616 same--; 4617 if (size != same) 4618 swap(p[size], p[same]); 4619 continue; 4620 } 4621 4622 /* Limit look ahead search */ 4623 if (!--lookahead) 4624 break; 4625 } 4626 4627 return same; 4628 } 4629 4630 /* 4631 * Internal bulk free of objects that were not initialised by the post alloc 4632 * hooks and thus should not be processed by the free hooks 4633 */ 4634 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4635 { 4636 if (!size) 4637 return; 4638 4639 do { 4640 struct detached_freelist df; 4641 4642 size = build_detached_freelist(s, size, p, &df); 4643 if (!df.slab) 4644 continue; 4645 4646 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, 4647 _RET_IP_); 4648 } while (likely(size)); 4649 } 4650 4651 /* Note that interrupts must be enabled when calling this function. */ 4652 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4653 { 4654 if (!size) 4655 return; 4656 4657 do { 4658 struct detached_freelist df; 4659 4660 size = build_detached_freelist(s, size, p, &df); 4661 if (!df.slab) 4662 continue; 4663 4664 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size], 4665 df.cnt, _RET_IP_); 4666 } while (likely(size)); 4667 } 4668 EXPORT_SYMBOL(kmem_cache_free_bulk); 4669 4670 #ifndef CONFIG_SLUB_TINY 4671 static inline 4672 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 4673 void **p) 4674 { 4675 struct kmem_cache_cpu *c; 4676 unsigned long irqflags; 4677 int i; 4678 4679 /* 4680 * Drain objects in the per cpu slab, while disabling local 4681 * IRQs, which protects against PREEMPT and interrupts 4682 * handlers invoking normal fastpath. 4683 */ 4684 c = slub_get_cpu_ptr(s->cpu_slab); 4685 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 4686 4687 for (i = 0; i < size; i++) { 4688 void *object = kfence_alloc(s, s->object_size, flags); 4689 4690 if (unlikely(object)) { 4691 p[i] = object; 4692 continue; 4693 } 4694 4695 object = c->freelist; 4696 if (unlikely(!object)) { 4697 /* 4698 * We may have removed an object from c->freelist using 4699 * the fastpath in the previous iteration; in that case, 4700 * c->tid has not been bumped yet. 4701 * Since ___slab_alloc() may reenable interrupts while 4702 * allocating memory, we should bump c->tid now. 4703 */ 4704 c->tid = next_tid(c->tid); 4705 4706 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 4707 4708 /* 4709 * Invoking slow path likely have side-effect 4710 * of re-populating per CPU c->freelist 4711 */ 4712 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 4713 _RET_IP_, c, s->object_size); 4714 if (unlikely(!p[i])) 4715 goto error; 4716 4717 c = this_cpu_ptr(s->cpu_slab); 4718 maybe_wipe_obj_freeptr(s, p[i]); 4719 4720 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 4721 4722 continue; /* goto for-loop */ 4723 } 4724 c->freelist = get_freepointer(s, object); 4725 p[i] = object; 4726 maybe_wipe_obj_freeptr(s, p[i]); 4727 stat(s, ALLOC_FASTPATH); 4728 } 4729 c->tid = next_tid(c->tid); 4730 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 4731 slub_put_cpu_ptr(s->cpu_slab); 4732 4733 return i; 4734 4735 error: 4736 slub_put_cpu_ptr(s->cpu_slab); 4737 __kmem_cache_free_bulk(s, i, p); 4738 return 0; 4739 4740 } 4741 #else /* CONFIG_SLUB_TINY */ 4742 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 4743 size_t size, void **p) 4744 { 4745 int i; 4746 4747 for (i = 0; i < size; i++) { 4748 void *object = kfence_alloc(s, s->object_size, flags); 4749 4750 if (unlikely(object)) { 4751 p[i] = object; 4752 continue; 4753 } 4754 4755 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE, 4756 _RET_IP_, s->object_size); 4757 if (unlikely(!p[i])) 4758 goto error; 4759 4760 maybe_wipe_obj_freeptr(s, p[i]); 4761 } 4762 4763 return i; 4764 4765 error: 4766 __kmem_cache_free_bulk(s, i, p); 4767 return 0; 4768 } 4769 #endif /* CONFIG_SLUB_TINY */ 4770 4771 /* Note that interrupts must be enabled when calling this function. */ 4772 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 4773 void **p) 4774 { 4775 int i; 4776 struct obj_cgroup *objcg = NULL; 4777 4778 if (!size) 4779 return 0; 4780 4781 /* memcg and kmem_cache debug support */ 4782 s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); 4783 if (unlikely(!s)) 4784 return 0; 4785 4786 i = __kmem_cache_alloc_bulk(s, flags, size, p); 4787 4788 /* 4789 * memcg and kmem_cache debug support and memory initialization. 4790 * Done outside of the IRQ disabled fastpath loop. 4791 */ 4792 if (likely(i != 0)) { 4793 slab_post_alloc_hook(s, objcg, flags, size, p, 4794 slab_want_init_on_alloc(flags, s), s->object_size); 4795 } else { 4796 memcg_slab_alloc_error_hook(s, size, objcg); 4797 } 4798 4799 return i; 4800 } 4801 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 4802 4803 4804 /* 4805 * Object placement in a slab is made very easy because we always start at 4806 * offset 0. If we tune the size of the object to the alignment then we can 4807 * get the required alignment by putting one properly sized object after 4808 * another. 4809 * 4810 * Notice that the allocation order determines the sizes of the per cpu 4811 * caches. Each processor has always one slab available for allocations. 4812 * Increasing the allocation order reduces the number of times that slabs 4813 * must be moved on and off the partial lists and is therefore a factor in 4814 * locking overhead. 4815 */ 4816 4817 /* 4818 * Minimum / Maximum order of slab pages. This influences locking overhead 4819 * and slab fragmentation. A higher order reduces the number of partial slabs 4820 * and increases the number of allocations possible without having to 4821 * take the list_lock. 4822 */ 4823 static unsigned int slub_min_order; 4824 static unsigned int slub_max_order = 4825 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; 4826 static unsigned int slub_min_objects; 4827 4828 /* 4829 * Calculate the order of allocation given an slab object size. 4830 * 4831 * The order of allocation has significant impact on performance and other 4832 * system components. Generally order 0 allocations should be preferred since 4833 * order 0 does not cause fragmentation in the page allocator. Larger objects 4834 * be problematic to put into order 0 slabs because there may be too much 4835 * unused space left. We go to a higher order if more than 1/16th of the slab 4836 * would be wasted. 4837 * 4838 * In order to reach satisfactory performance we must ensure that a minimum 4839 * number of objects is in one slab. Otherwise we may generate too much 4840 * activity on the partial lists which requires taking the list_lock. This is 4841 * less a concern for large slabs though which are rarely used. 4842 * 4843 * slab_max_order specifies the order where we begin to stop considering the 4844 * number of objects in a slab as critical. If we reach slab_max_order then 4845 * we try to keep the page order as low as possible. So we accept more waste 4846 * of space in favor of a small page order. 4847 * 4848 * Higher order allocations also allow the placement of more objects in a 4849 * slab and thereby reduce object handling overhead. If the user has 4850 * requested a higher minimum order then we start with that one instead of 4851 * the smallest order which will fit the object. 4852 */ 4853 static inline unsigned int calc_slab_order(unsigned int size, 4854 unsigned int min_order, unsigned int max_order, 4855 unsigned int fract_leftover) 4856 { 4857 unsigned int order; 4858 4859 for (order = min_order; order <= max_order; order++) { 4860 4861 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 4862 unsigned int rem; 4863 4864 rem = slab_size % size; 4865 4866 if (rem <= slab_size / fract_leftover) 4867 break; 4868 } 4869 4870 return order; 4871 } 4872 4873 static inline int calculate_order(unsigned int size) 4874 { 4875 unsigned int order; 4876 unsigned int min_objects; 4877 unsigned int max_objects; 4878 unsigned int min_order; 4879 4880 min_objects = slub_min_objects; 4881 if (!min_objects) { 4882 /* 4883 * Some architectures will only update present cpus when 4884 * onlining them, so don't trust the number if it's just 1. But 4885 * we also don't want to use nr_cpu_ids always, as on some other 4886 * architectures, there can be many possible cpus, but never 4887 * onlined. Here we compromise between trying to avoid too high 4888 * order on systems that appear larger than they are, and too 4889 * low order on systems that appear smaller than they are. 4890 */ 4891 unsigned int nr_cpus = num_present_cpus(); 4892 if (nr_cpus <= 1) 4893 nr_cpus = nr_cpu_ids; 4894 min_objects = 4 * (fls(nr_cpus) + 1); 4895 } 4896 /* min_objects can't be 0 because get_order(0) is undefined */ 4897 max_objects = max(order_objects(slub_max_order, size), 1U); 4898 min_objects = min(min_objects, max_objects); 4899 4900 min_order = max_t(unsigned int, slub_min_order, 4901 get_order(min_objects * size)); 4902 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 4903 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 4904 4905 /* 4906 * Attempt to find best configuration for a slab. This works by first 4907 * attempting to generate a layout with the best possible configuration 4908 * and backing off gradually. 4909 * 4910 * We start with accepting at most 1/16 waste and try to find the 4911 * smallest order from min_objects-derived/slab_min_order up to 4912 * slab_max_order that will satisfy the constraint. Note that increasing 4913 * the order can only result in same or less fractional waste, not more. 4914 * 4915 * If that fails, we increase the acceptable fraction of waste and try 4916 * again. The last iteration with fraction of 1/2 would effectively 4917 * accept any waste and give us the order determined by min_objects, as 4918 * long as at least single object fits within slab_max_order. 4919 */ 4920 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { 4921 order = calc_slab_order(size, min_order, slub_max_order, 4922 fraction); 4923 if (order <= slub_max_order) 4924 return order; 4925 } 4926 4927 /* 4928 * Doh this slab cannot be placed using slab_max_order. 4929 */ 4930 order = get_order(size); 4931 if (order <= MAX_PAGE_ORDER) 4932 return order; 4933 return -ENOSYS; 4934 } 4935 4936 static void 4937 init_kmem_cache_node(struct kmem_cache_node *n) 4938 { 4939 n->nr_partial = 0; 4940 spin_lock_init(&n->list_lock); 4941 INIT_LIST_HEAD(&n->partial); 4942 #ifdef CONFIG_SLUB_DEBUG 4943 atomic_long_set(&n->nr_slabs, 0); 4944 atomic_long_set(&n->total_objects, 0); 4945 INIT_LIST_HEAD(&n->full); 4946 #endif 4947 } 4948 4949 #ifndef CONFIG_SLUB_TINY 4950 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 4951 { 4952 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 4953 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * 4954 sizeof(struct kmem_cache_cpu)); 4955 4956 /* 4957 * Must align to double word boundary for the double cmpxchg 4958 * instructions to work; see __pcpu_double_call_return_bool(). 4959 */ 4960 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 4961 2 * sizeof(void *)); 4962 4963 if (!s->cpu_slab) 4964 return 0; 4965 4966 init_kmem_cache_cpus(s); 4967 4968 return 1; 4969 } 4970 #else 4971 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 4972 { 4973 return 1; 4974 } 4975 #endif /* CONFIG_SLUB_TINY */ 4976 4977 static struct kmem_cache *kmem_cache_node; 4978 4979 /* 4980 * No kmalloc_node yet so do it by hand. We know that this is the first 4981 * slab on the node for this slabcache. There are no concurrent accesses 4982 * possible. 4983 * 4984 * Note that this function only works on the kmem_cache_node 4985 * when allocating for the kmem_cache_node. This is used for bootstrapping 4986 * memory on a fresh node that has no slab structures yet. 4987 */ 4988 static void early_kmem_cache_node_alloc(int node) 4989 { 4990 struct slab *slab; 4991 struct kmem_cache_node *n; 4992 4993 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 4994 4995 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); 4996 4997 BUG_ON(!slab); 4998 if (slab_nid(slab) != node) { 4999 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 5000 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 5001 } 5002 5003 n = slab->freelist; 5004 BUG_ON(!n); 5005 #ifdef CONFIG_SLUB_DEBUG 5006 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 5007 init_tracking(kmem_cache_node, n); 5008 #endif 5009 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 5010 slab->freelist = get_freepointer(kmem_cache_node, n); 5011 slab->inuse = 1; 5012 kmem_cache_node->node[node] = n; 5013 init_kmem_cache_node(n); 5014 inc_slabs_node(kmem_cache_node, node, slab->objects); 5015 5016 /* 5017 * No locks need to be taken here as it has just been 5018 * initialized and there is no concurrent access. 5019 */ 5020 __add_partial(n, slab, DEACTIVATE_TO_HEAD); 5021 } 5022 5023 static void free_kmem_cache_nodes(struct kmem_cache *s) 5024 { 5025 int node; 5026 struct kmem_cache_node *n; 5027 5028 for_each_kmem_cache_node(s, node, n) { 5029 s->node[node] = NULL; 5030 kmem_cache_free(kmem_cache_node, n); 5031 } 5032 } 5033 5034 void __kmem_cache_release(struct kmem_cache *s) 5035 { 5036 cache_random_seq_destroy(s); 5037 #ifndef CONFIG_SLUB_TINY 5038 free_percpu(s->cpu_slab); 5039 #endif 5040 free_kmem_cache_nodes(s); 5041 } 5042 5043 static int init_kmem_cache_nodes(struct kmem_cache *s) 5044 { 5045 int node; 5046 5047 for_each_node_mask(node, slab_nodes) { 5048 struct kmem_cache_node *n; 5049 5050 if (slab_state == DOWN) { 5051 early_kmem_cache_node_alloc(node); 5052 continue; 5053 } 5054 n = kmem_cache_alloc_node(kmem_cache_node, 5055 GFP_KERNEL, node); 5056 5057 if (!n) { 5058 free_kmem_cache_nodes(s); 5059 return 0; 5060 } 5061 5062 init_kmem_cache_node(n); 5063 s->node[node] = n; 5064 } 5065 return 1; 5066 } 5067 5068 static void set_cpu_partial(struct kmem_cache *s) 5069 { 5070 #ifdef CONFIG_SLUB_CPU_PARTIAL 5071 unsigned int nr_objects; 5072 5073 /* 5074 * cpu_partial determined the maximum number of objects kept in the 5075 * per cpu partial lists of a processor. 5076 * 5077 * Per cpu partial lists mainly contain slabs that just have one 5078 * object freed. If they are used for allocation then they can be 5079 * filled up again with minimal effort. The slab will never hit the 5080 * per node partial lists and therefore no locking will be required. 5081 * 5082 * For backwards compatibility reasons, this is determined as number 5083 * of objects, even though we now limit maximum number of pages, see 5084 * slub_set_cpu_partial() 5085 */ 5086 if (!kmem_cache_has_cpu_partial(s)) 5087 nr_objects = 0; 5088 else if (s->size >= PAGE_SIZE) 5089 nr_objects = 6; 5090 else if (s->size >= 1024) 5091 nr_objects = 24; 5092 else if (s->size >= 256) 5093 nr_objects = 52; 5094 else 5095 nr_objects = 120; 5096 5097 slub_set_cpu_partial(s, nr_objects); 5098 #endif 5099 } 5100 5101 /* 5102 * calculate_sizes() determines the order and the distribution of data within 5103 * a slab object. 5104 */ 5105 static int calculate_sizes(struct kmem_cache *s) 5106 { 5107 slab_flags_t flags = s->flags; 5108 unsigned int size = s->object_size; 5109 unsigned int order; 5110 5111 /* 5112 * Round up object size to the next word boundary. We can only 5113 * place the free pointer at word boundaries and this determines 5114 * the possible location of the free pointer. 5115 */ 5116 size = ALIGN(size, sizeof(void *)); 5117 5118 #ifdef CONFIG_SLUB_DEBUG 5119 /* 5120 * Determine if we can poison the object itself. If the user of 5121 * the slab may touch the object after free or before allocation 5122 * then we should never poison the object itself. 5123 */ 5124 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 5125 !s->ctor) 5126 s->flags |= __OBJECT_POISON; 5127 else 5128 s->flags &= ~__OBJECT_POISON; 5129 5130 5131 /* 5132 * If we are Redzoning then check if there is some space between the 5133 * end of the object and the free pointer. If not then add an 5134 * additional word to have some bytes to store Redzone information. 5135 */ 5136 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 5137 size += sizeof(void *); 5138 #endif 5139 5140 /* 5141 * With that we have determined the number of bytes in actual use 5142 * by the object and redzoning. 5143 */ 5144 s->inuse = size; 5145 5146 if (slub_debug_orig_size(s) || 5147 (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 5148 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || 5149 s->ctor) { 5150 /* 5151 * Relocate free pointer after the object if it is not 5152 * permitted to overwrite the first word of the object on 5153 * kmem_cache_free. 5154 * 5155 * This is the case if we do RCU, have a constructor or 5156 * destructor, are poisoning the objects, or are 5157 * redzoning an object smaller than sizeof(void *). 5158 * 5159 * The assumption that s->offset >= s->inuse means free 5160 * pointer is outside of the object is used in the 5161 * freeptr_outside_object() function. If that is no 5162 * longer true, the function needs to be modified. 5163 */ 5164 s->offset = size; 5165 size += sizeof(void *); 5166 } else { 5167 /* 5168 * Store freelist pointer near middle of object to keep 5169 * it away from the edges of the object to avoid small 5170 * sized over/underflows from neighboring allocations. 5171 */ 5172 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 5173 } 5174 5175 #ifdef CONFIG_SLUB_DEBUG 5176 if (flags & SLAB_STORE_USER) { 5177 /* 5178 * Need to store information about allocs and frees after 5179 * the object. 5180 */ 5181 size += 2 * sizeof(struct track); 5182 5183 /* Save the original kmalloc request size */ 5184 if (flags & SLAB_KMALLOC) 5185 size += sizeof(unsigned int); 5186 } 5187 #endif 5188 5189 kasan_cache_create(s, &size, &s->flags); 5190 #ifdef CONFIG_SLUB_DEBUG 5191 if (flags & SLAB_RED_ZONE) { 5192 /* 5193 * Add some empty padding so that we can catch 5194 * overwrites from earlier objects rather than let 5195 * tracking information or the free pointer be 5196 * corrupted if a user writes before the start 5197 * of the object. 5198 */ 5199 size += sizeof(void *); 5200 5201 s->red_left_pad = sizeof(void *); 5202 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 5203 size += s->red_left_pad; 5204 } 5205 #endif 5206 5207 /* 5208 * SLUB stores one object immediately after another beginning from 5209 * offset 0. In order to align the objects we have to simply size 5210 * each object to conform to the alignment. 5211 */ 5212 size = ALIGN(size, s->align); 5213 s->size = size; 5214 s->reciprocal_size = reciprocal_value(size); 5215 order = calculate_order(size); 5216 5217 if ((int)order < 0) 5218 return 0; 5219 5220 s->allocflags = 0; 5221 if (order) 5222 s->allocflags |= __GFP_COMP; 5223 5224 if (s->flags & SLAB_CACHE_DMA) 5225 s->allocflags |= GFP_DMA; 5226 5227 if (s->flags & SLAB_CACHE_DMA32) 5228 s->allocflags |= GFP_DMA32; 5229 5230 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5231 s->allocflags |= __GFP_RECLAIMABLE; 5232 5233 /* 5234 * Determine the number of objects per slab 5235 */ 5236 s->oo = oo_make(order, size); 5237 s->min = oo_make(get_order(size), size); 5238 5239 return !!oo_objects(s->oo); 5240 } 5241 5242 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 5243 { 5244 s->flags = kmem_cache_flags(flags, s->name); 5245 #ifdef CONFIG_SLAB_FREELIST_HARDENED 5246 s->random = get_random_long(); 5247 #endif 5248 5249 if (!calculate_sizes(s)) 5250 goto error; 5251 if (disable_higher_order_debug) { 5252 /* 5253 * Disable debugging flags that store metadata if the min slab 5254 * order increased. 5255 */ 5256 if (get_order(s->size) > get_order(s->object_size)) { 5257 s->flags &= ~DEBUG_METADATA_FLAGS; 5258 s->offset = 0; 5259 if (!calculate_sizes(s)) 5260 goto error; 5261 } 5262 } 5263 5264 #ifdef system_has_freelist_aba 5265 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { 5266 /* Enable fast mode */ 5267 s->flags |= __CMPXCHG_DOUBLE; 5268 } 5269 #endif 5270 5271 /* 5272 * The larger the object size is, the more slabs we want on the partial 5273 * list to avoid pounding the page allocator excessively. 5274 */ 5275 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); 5276 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); 5277 5278 set_cpu_partial(s); 5279 5280 #ifdef CONFIG_NUMA 5281 s->remote_node_defrag_ratio = 1000; 5282 #endif 5283 5284 /* Initialize the pre-computed randomized freelist if slab is up */ 5285 if (slab_state >= UP) { 5286 if (init_cache_random_seq(s)) 5287 goto error; 5288 } 5289 5290 if (!init_kmem_cache_nodes(s)) 5291 goto error; 5292 5293 if (alloc_kmem_cache_cpus(s)) 5294 return 0; 5295 5296 error: 5297 __kmem_cache_release(s); 5298 return -EINVAL; 5299 } 5300 5301 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, 5302 const char *text) 5303 { 5304 #ifdef CONFIG_SLUB_DEBUG 5305 void *addr = slab_address(slab); 5306 void *p; 5307 5308 slab_err(s, slab, text, s->name); 5309 5310 spin_lock(&object_map_lock); 5311 __fill_map(object_map, s, slab); 5312 5313 for_each_object(p, s, addr, slab->objects) { 5314 5315 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { 5316 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 5317 print_tracking(s, p); 5318 } 5319 } 5320 spin_unlock(&object_map_lock); 5321 #endif 5322 } 5323 5324 /* 5325 * Attempt to free all partial slabs on a node. 5326 * This is called from __kmem_cache_shutdown(). We must take list_lock 5327 * because sysfs file might still access partial list after the shutdowning. 5328 */ 5329 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 5330 { 5331 LIST_HEAD(discard); 5332 struct slab *slab, *h; 5333 5334 BUG_ON(irqs_disabled()); 5335 spin_lock_irq(&n->list_lock); 5336 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { 5337 if (!slab->inuse) { 5338 remove_partial(n, slab); 5339 list_add(&slab->slab_list, &discard); 5340 } else { 5341 list_slab_objects(s, slab, 5342 "Objects remaining in %s on __kmem_cache_shutdown()"); 5343 } 5344 } 5345 spin_unlock_irq(&n->list_lock); 5346 5347 list_for_each_entry_safe(slab, h, &discard, slab_list) 5348 discard_slab(s, slab); 5349 } 5350 5351 bool __kmem_cache_empty(struct kmem_cache *s) 5352 { 5353 int node; 5354 struct kmem_cache_node *n; 5355 5356 for_each_kmem_cache_node(s, node, n) 5357 if (n->nr_partial || node_nr_slabs(n)) 5358 return false; 5359 return true; 5360 } 5361 5362 /* 5363 * Release all resources used by a slab cache. 5364 */ 5365 int __kmem_cache_shutdown(struct kmem_cache *s) 5366 { 5367 int node; 5368 struct kmem_cache_node *n; 5369 5370 flush_all_cpus_locked(s); 5371 /* Attempt to free all objects */ 5372 for_each_kmem_cache_node(s, node, n) { 5373 free_partial(s, n); 5374 if (n->nr_partial || node_nr_slabs(n)) 5375 return 1; 5376 } 5377 return 0; 5378 } 5379 5380 #ifdef CONFIG_PRINTK 5381 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 5382 { 5383 void *base; 5384 int __maybe_unused i; 5385 unsigned int objnr; 5386 void *objp; 5387 void *objp0; 5388 struct kmem_cache *s = slab->slab_cache; 5389 struct track __maybe_unused *trackp; 5390 5391 kpp->kp_ptr = object; 5392 kpp->kp_slab = slab; 5393 kpp->kp_slab_cache = s; 5394 base = slab_address(slab); 5395 objp0 = kasan_reset_tag(object); 5396 #ifdef CONFIG_SLUB_DEBUG 5397 objp = restore_red_left(s, objp0); 5398 #else 5399 objp = objp0; 5400 #endif 5401 objnr = obj_to_index(s, slab, objp); 5402 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 5403 objp = base + s->size * objnr; 5404 kpp->kp_objp = objp; 5405 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size 5406 || (objp - base) % s->size) || 5407 !(s->flags & SLAB_STORE_USER)) 5408 return; 5409 #ifdef CONFIG_SLUB_DEBUG 5410 objp = fixup_red_left(s, objp); 5411 trackp = get_track(s, objp, TRACK_ALLOC); 5412 kpp->kp_ret = (void *)trackp->addr; 5413 #ifdef CONFIG_STACKDEPOT 5414 { 5415 depot_stack_handle_t handle; 5416 unsigned long *entries; 5417 unsigned int nr_entries; 5418 5419 handle = READ_ONCE(trackp->handle); 5420 if (handle) { 5421 nr_entries = stack_depot_fetch(handle, &entries); 5422 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5423 kpp->kp_stack[i] = (void *)entries[i]; 5424 } 5425 5426 trackp = get_track(s, objp, TRACK_FREE); 5427 handle = READ_ONCE(trackp->handle); 5428 if (handle) { 5429 nr_entries = stack_depot_fetch(handle, &entries); 5430 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5431 kpp->kp_free_stack[i] = (void *)entries[i]; 5432 } 5433 } 5434 #endif 5435 #endif 5436 } 5437 #endif 5438 5439 /******************************************************************** 5440 * Kmalloc subsystem 5441 *******************************************************************/ 5442 5443 static int __init setup_slub_min_order(char *str) 5444 { 5445 get_option(&str, (int *)&slub_min_order); 5446 5447 if (slub_min_order > slub_max_order) 5448 slub_max_order = slub_min_order; 5449 5450 return 1; 5451 } 5452 5453 __setup("slab_min_order=", setup_slub_min_order); 5454 __setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0); 5455 5456 5457 static int __init setup_slub_max_order(char *str) 5458 { 5459 get_option(&str, (int *)&slub_max_order); 5460 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER); 5461 5462 if (slub_min_order > slub_max_order) 5463 slub_min_order = slub_max_order; 5464 5465 return 1; 5466 } 5467 5468 __setup("slab_max_order=", setup_slub_max_order); 5469 __setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0); 5470 5471 static int __init setup_slub_min_objects(char *str) 5472 { 5473 get_option(&str, (int *)&slub_min_objects); 5474 5475 return 1; 5476 } 5477 5478 __setup("slab_min_objects=", setup_slub_min_objects); 5479 __setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0); 5480 5481 #ifdef CONFIG_HARDENED_USERCOPY 5482 /* 5483 * Rejects incorrectly sized objects and objects that are to be copied 5484 * to/from userspace but do not fall entirely within the containing slab 5485 * cache's usercopy region. 5486 * 5487 * Returns NULL if check passes, otherwise const char * to name of cache 5488 * to indicate an error. 5489 */ 5490 void __check_heap_object(const void *ptr, unsigned long n, 5491 const struct slab *slab, bool to_user) 5492 { 5493 struct kmem_cache *s; 5494 unsigned int offset; 5495 bool is_kfence = is_kfence_address(ptr); 5496 5497 ptr = kasan_reset_tag(ptr); 5498 5499 /* Find object and usable object size. */ 5500 s = slab->slab_cache; 5501 5502 /* Reject impossible pointers. */ 5503 if (ptr < slab_address(slab)) 5504 usercopy_abort("SLUB object not in SLUB page?!", NULL, 5505 to_user, 0, n); 5506 5507 /* Find offset within object. */ 5508 if (is_kfence) 5509 offset = ptr - kfence_object_start(ptr); 5510 else 5511 offset = (ptr - slab_address(slab)) % s->size; 5512 5513 /* Adjust for redzone and reject if within the redzone. */ 5514 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 5515 if (offset < s->red_left_pad) 5516 usercopy_abort("SLUB object in left red zone", 5517 s->name, to_user, offset, n); 5518 offset -= s->red_left_pad; 5519 } 5520 5521 /* Allow address range falling entirely within usercopy region. */ 5522 if (offset >= s->useroffset && 5523 offset - s->useroffset <= s->usersize && 5524 n <= s->useroffset - offset + s->usersize) 5525 return; 5526 5527 usercopy_abort("SLUB object", s->name, to_user, offset, n); 5528 } 5529 #endif /* CONFIG_HARDENED_USERCOPY */ 5530 5531 #define SHRINK_PROMOTE_MAX 32 5532 5533 /* 5534 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 5535 * up most to the head of the partial lists. New allocations will then 5536 * fill those up and thus they can be removed from the partial lists. 5537 * 5538 * The slabs with the least items are placed last. This results in them 5539 * being allocated from last increasing the chance that the last objects 5540 * are freed in them. 5541 */ 5542 static int __kmem_cache_do_shrink(struct kmem_cache *s) 5543 { 5544 int node; 5545 int i; 5546 struct kmem_cache_node *n; 5547 struct slab *slab; 5548 struct slab *t; 5549 struct list_head discard; 5550 struct list_head promote[SHRINK_PROMOTE_MAX]; 5551 unsigned long flags; 5552 int ret = 0; 5553 5554 for_each_kmem_cache_node(s, node, n) { 5555 INIT_LIST_HEAD(&discard); 5556 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 5557 INIT_LIST_HEAD(promote + i); 5558 5559 spin_lock_irqsave(&n->list_lock, flags); 5560 5561 /* 5562 * Build lists of slabs to discard or promote. 5563 * 5564 * Note that concurrent frees may occur while we hold the 5565 * list_lock. slab->inuse here is the upper limit. 5566 */ 5567 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { 5568 int free = slab->objects - slab->inuse; 5569 5570 /* Do not reread slab->inuse */ 5571 barrier(); 5572 5573 /* We do not keep full slabs on the list */ 5574 BUG_ON(free <= 0); 5575 5576 if (free == slab->objects) { 5577 list_move(&slab->slab_list, &discard); 5578 slab_clear_node_partial(slab); 5579 n->nr_partial--; 5580 dec_slabs_node(s, node, slab->objects); 5581 } else if (free <= SHRINK_PROMOTE_MAX) 5582 list_move(&slab->slab_list, promote + free - 1); 5583 } 5584 5585 /* 5586 * Promote the slabs filled up most to the head of the 5587 * partial list. 5588 */ 5589 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 5590 list_splice(promote + i, &n->partial); 5591 5592 spin_unlock_irqrestore(&n->list_lock, flags); 5593 5594 /* Release empty slabs */ 5595 list_for_each_entry_safe(slab, t, &discard, slab_list) 5596 free_slab(s, slab); 5597 5598 if (node_nr_slabs(n)) 5599 ret = 1; 5600 } 5601 5602 return ret; 5603 } 5604 5605 int __kmem_cache_shrink(struct kmem_cache *s) 5606 { 5607 flush_all(s); 5608 return __kmem_cache_do_shrink(s); 5609 } 5610 5611 static int slab_mem_going_offline_callback(void *arg) 5612 { 5613 struct kmem_cache *s; 5614 5615 mutex_lock(&slab_mutex); 5616 list_for_each_entry(s, &slab_caches, list) { 5617 flush_all_cpus_locked(s); 5618 __kmem_cache_do_shrink(s); 5619 } 5620 mutex_unlock(&slab_mutex); 5621 5622 return 0; 5623 } 5624 5625 static void slab_mem_offline_callback(void *arg) 5626 { 5627 struct memory_notify *marg = arg; 5628 int offline_node; 5629 5630 offline_node = marg->status_change_nid_normal; 5631 5632 /* 5633 * If the node still has available memory. we need kmem_cache_node 5634 * for it yet. 5635 */ 5636 if (offline_node < 0) 5637 return; 5638 5639 mutex_lock(&slab_mutex); 5640 node_clear(offline_node, slab_nodes); 5641 /* 5642 * We no longer free kmem_cache_node structures here, as it would be 5643 * racy with all get_node() users, and infeasible to protect them with 5644 * slab_mutex. 5645 */ 5646 mutex_unlock(&slab_mutex); 5647 } 5648 5649 static int slab_mem_going_online_callback(void *arg) 5650 { 5651 struct kmem_cache_node *n; 5652 struct kmem_cache *s; 5653 struct memory_notify *marg = arg; 5654 int nid = marg->status_change_nid_normal; 5655 int ret = 0; 5656 5657 /* 5658 * If the node's memory is already available, then kmem_cache_node is 5659 * already created. Nothing to do. 5660 */ 5661 if (nid < 0) 5662 return 0; 5663 5664 /* 5665 * We are bringing a node online. No memory is available yet. We must 5666 * allocate a kmem_cache_node structure in order to bring the node 5667 * online. 5668 */ 5669 mutex_lock(&slab_mutex); 5670 list_for_each_entry(s, &slab_caches, list) { 5671 /* 5672 * The structure may already exist if the node was previously 5673 * onlined and offlined. 5674 */ 5675 if (get_node(s, nid)) 5676 continue; 5677 /* 5678 * XXX: kmem_cache_alloc_node will fallback to other nodes 5679 * since memory is not yet available from the node that 5680 * is brought up. 5681 */ 5682 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 5683 if (!n) { 5684 ret = -ENOMEM; 5685 goto out; 5686 } 5687 init_kmem_cache_node(n); 5688 s->node[nid] = n; 5689 } 5690 /* 5691 * Any cache created after this point will also have kmem_cache_node 5692 * initialized for the new node. 5693 */ 5694 node_set(nid, slab_nodes); 5695 out: 5696 mutex_unlock(&slab_mutex); 5697 return ret; 5698 } 5699 5700 static int slab_memory_callback(struct notifier_block *self, 5701 unsigned long action, void *arg) 5702 { 5703 int ret = 0; 5704 5705 switch (action) { 5706 case MEM_GOING_ONLINE: 5707 ret = slab_mem_going_online_callback(arg); 5708 break; 5709 case MEM_GOING_OFFLINE: 5710 ret = slab_mem_going_offline_callback(arg); 5711 break; 5712 case MEM_OFFLINE: 5713 case MEM_CANCEL_ONLINE: 5714 slab_mem_offline_callback(arg); 5715 break; 5716 case MEM_ONLINE: 5717 case MEM_CANCEL_OFFLINE: 5718 break; 5719 } 5720 if (ret) 5721 ret = notifier_from_errno(ret); 5722 else 5723 ret = NOTIFY_OK; 5724 return ret; 5725 } 5726 5727 /******************************************************************** 5728 * Basic setup of slabs 5729 *******************************************************************/ 5730 5731 /* 5732 * Used for early kmem_cache structures that were allocated using 5733 * the page allocator. Allocate them properly then fix up the pointers 5734 * that may be pointing to the wrong kmem_cache structure. 5735 */ 5736 5737 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 5738 { 5739 int node; 5740 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 5741 struct kmem_cache_node *n; 5742 5743 memcpy(s, static_cache, kmem_cache->object_size); 5744 5745 /* 5746 * This runs very early, and only the boot processor is supposed to be 5747 * up. Even if it weren't true, IRQs are not up so we couldn't fire 5748 * IPIs around. 5749 */ 5750 __flush_cpu_slab(s, smp_processor_id()); 5751 for_each_kmem_cache_node(s, node, n) { 5752 struct slab *p; 5753 5754 list_for_each_entry(p, &n->partial, slab_list) 5755 p->slab_cache = s; 5756 5757 #ifdef CONFIG_SLUB_DEBUG 5758 list_for_each_entry(p, &n->full, slab_list) 5759 p->slab_cache = s; 5760 #endif 5761 } 5762 list_add(&s->list, &slab_caches); 5763 return s; 5764 } 5765 5766 void __init kmem_cache_init(void) 5767 { 5768 static __initdata struct kmem_cache boot_kmem_cache, 5769 boot_kmem_cache_node; 5770 int node; 5771 5772 if (debug_guardpage_minorder()) 5773 slub_max_order = 0; 5774 5775 /* Print slub debugging pointers without hashing */ 5776 if (__slub_debug_enabled()) 5777 no_hash_pointers_enable(NULL); 5778 5779 kmem_cache_node = &boot_kmem_cache_node; 5780 kmem_cache = &boot_kmem_cache; 5781 5782 /* 5783 * Initialize the nodemask for which we will allocate per node 5784 * structures. Here we don't need taking slab_mutex yet. 5785 */ 5786 for_each_node_state(node, N_NORMAL_MEMORY) 5787 node_set(node, slab_nodes); 5788 5789 create_boot_cache(kmem_cache_node, "kmem_cache_node", 5790 sizeof(struct kmem_cache_node), 5791 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 5792 5793 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 5794 5795 /* Able to allocate the per node structures */ 5796 slab_state = PARTIAL; 5797 5798 create_boot_cache(kmem_cache, "kmem_cache", 5799 offsetof(struct kmem_cache, node) + 5800 nr_node_ids * sizeof(struct kmem_cache_node *), 5801 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 5802 5803 kmem_cache = bootstrap(&boot_kmem_cache); 5804 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 5805 5806 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 5807 setup_kmalloc_cache_index_table(); 5808 create_kmalloc_caches(); 5809 5810 /* Setup random freelists for each cache */ 5811 init_freelist_randomization(); 5812 5813 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 5814 slub_cpu_dead); 5815 5816 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 5817 cache_line_size(), 5818 slub_min_order, slub_max_order, slub_min_objects, 5819 nr_cpu_ids, nr_node_ids); 5820 } 5821 5822 void __init kmem_cache_init_late(void) 5823 { 5824 #ifndef CONFIG_SLUB_TINY 5825 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); 5826 WARN_ON(!flushwq); 5827 #endif 5828 } 5829 5830 struct kmem_cache * 5831 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 5832 slab_flags_t flags, void (*ctor)(void *)) 5833 { 5834 struct kmem_cache *s; 5835 5836 s = find_mergeable(size, align, flags, name, ctor); 5837 if (s) { 5838 if (sysfs_slab_alias(s, name)) 5839 return NULL; 5840 5841 s->refcount++; 5842 5843 /* 5844 * Adjust the object sizes so that we clear 5845 * the complete object on kzalloc. 5846 */ 5847 s->object_size = max(s->object_size, size); 5848 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 5849 } 5850 5851 return s; 5852 } 5853 5854 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 5855 { 5856 int err; 5857 5858 err = kmem_cache_open(s, flags); 5859 if (err) 5860 return err; 5861 5862 /* Mutex is not taken during early boot */ 5863 if (slab_state <= UP) 5864 return 0; 5865 5866 err = sysfs_slab_add(s); 5867 if (err) { 5868 __kmem_cache_release(s); 5869 return err; 5870 } 5871 5872 if (s->flags & SLAB_STORE_USER) 5873 debugfs_slab_add(s); 5874 5875 return 0; 5876 } 5877 5878 #ifdef SLAB_SUPPORTS_SYSFS 5879 static int count_inuse(struct slab *slab) 5880 { 5881 return slab->inuse; 5882 } 5883 5884 static int count_total(struct slab *slab) 5885 { 5886 return slab->objects; 5887 } 5888 #endif 5889 5890 #ifdef CONFIG_SLUB_DEBUG 5891 static void validate_slab(struct kmem_cache *s, struct slab *slab, 5892 unsigned long *obj_map) 5893 { 5894 void *p; 5895 void *addr = slab_address(slab); 5896 5897 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) 5898 return; 5899 5900 /* Now we know that a valid freelist exists */ 5901 __fill_map(obj_map, s, slab); 5902 for_each_object(p, s, addr, slab->objects) { 5903 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 5904 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 5905 5906 if (!check_object(s, slab, p, val)) 5907 break; 5908 } 5909 } 5910 5911 static int validate_slab_node(struct kmem_cache *s, 5912 struct kmem_cache_node *n, unsigned long *obj_map) 5913 { 5914 unsigned long count = 0; 5915 struct slab *slab; 5916 unsigned long flags; 5917 5918 spin_lock_irqsave(&n->list_lock, flags); 5919 5920 list_for_each_entry(slab, &n->partial, slab_list) { 5921 validate_slab(s, slab, obj_map); 5922 count++; 5923 } 5924 if (count != n->nr_partial) { 5925 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 5926 s->name, count, n->nr_partial); 5927 slab_add_kunit_errors(); 5928 } 5929 5930 if (!(s->flags & SLAB_STORE_USER)) 5931 goto out; 5932 5933 list_for_each_entry(slab, &n->full, slab_list) { 5934 validate_slab(s, slab, obj_map); 5935 count++; 5936 } 5937 if (count != node_nr_slabs(n)) { 5938 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 5939 s->name, count, node_nr_slabs(n)); 5940 slab_add_kunit_errors(); 5941 } 5942 5943 out: 5944 spin_unlock_irqrestore(&n->list_lock, flags); 5945 return count; 5946 } 5947 5948 long validate_slab_cache(struct kmem_cache *s) 5949 { 5950 int node; 5951 unsigned long count = 0; 5952 struct kmem_cache_node *n; 5953 unsigned long *obj_map; 5954 5955 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 5956 if (!obj_map) 5957 return -ENOMEM; 5958 5959 flush_all(s); 5960 for_each_kmem_cache_node(s, node, n) 5961 count += validate_slab_node(s, n, obj_map); 5962 5963 bitmap_free(obj_map); 5964 5965 return count; 5966 } 5967 EXPORT_SYMBOL(validate_slab_cache); 5968 5969 #ifdef CONFIG_DEBUG_FS 5970 /* 5971 * Generate lists of code addresses where slabcache objects are allocated 5972 * and freed. 5973 */ 5974 5975 struct location { 5976 depot_stack_handle_t handle; 5977 unsigned long count; 5978 unsigned long addr; 5979 unsigned long waste; 5980 long long sum_time; 5981 long min_time; 5982 long max_time; 5983 long min_pid; 5984 long max_pid; 5985 DECLARE_BITMAP(cpus, NR_CPUS); 5986 nodemask_t nodes; 5987 }; 5988 5989 struct loc_track { 5990 unsigned long max; 5991 unsigned long count; 5992 struct location *loc; 5993 loff_t idx; 5994 }; 5995 5996 static struct dentry *slab_debugfs_root; 5997 5998 static void free_loc_track(struct loc_track *t) 5999 { 6000 if (t->max) 6001 free_pages((unsigned long)t->loc, 6002 get_order(sizeof(struct location) * t->max)); 6003 } 6004 6005 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 6006 { 6007 struct location *l; 6008 int order; 6009 6010 order = get_order(sizeof(struct location) * max); 6011 6012 l = (void *)__get_free_pages(flags, order); 6013 if (!l) 6014 return 0; 6015 6016 if (t->count) { 6017 memcpy(l, t->loc, sizeof(struct location) * t->count); 6018 free_loc_track(t); 6019 } 6020 t->max = max; 6021 t->loc = l; 6022 return 1; 6023 } 6024 6025 static int add_location(struct loc_track *t, struct kmem_cache *s, 6026 const struct track *track, 6027 unsigned int orig_size) 6028 { 6029 long start, end, pos; 6030 struct location *l; 6031 unsigned long caddr, chandle, cwaste; 6032 unsigned long age = jiffies - track->when; 6033 depot_stack_handle_t handle = 0; 6034 unsigned int waste = s->object_size - orig_size; 6035 6036 #ifdef CONFIG_STACKDEPOT 6037 handle = READ_ONCE(track->handle); 6038 #endif 6039 start = -1; 6040 end = t->count; 6041 6042 for ( ; ; ) { 6043 pos = start + (end - start + 1) / 2; 6044 6045 /* 6046 * There is nothing at "end". If we end up there 6047 * we need to add something to before end. 6048 */ 6049 if (pos == end) 6050 break; 6051 6052 l = &t->loc[pos]; 6053 caddr = l->addr; 6054 chandle = l->handle; 6055 cwaste = l->waste; 6056 if ((track->addr == caddr) && (handle == chandle) && 6057 (waste == cwaste)) { 6058 6059 l->count++; 6060 if (track->when) { 6061 l->sum_time += age; 6062 if (age < l->min_time) 6063 l->min_time = age; 6064 if (age > l->max_time) 6065 l->max_time = age; 6066 6067 if (track->pid < l->min_pid) 6068 l->min_pid = track->pid; 6069 if (track->pid > l->max_pid) 6070 l->max_pid = track->pid; 6071 6072 cpumask_set_cpu(track->cpu, 6073 to_cpumask(l->cpus)); 6074 } 6075 node_set(page_to_nid(virt_to_page(track)), l->nodes); 6076 return 1; 6077 } 6078 6079 if (track->addr < caddr) 6080 end = pos; 6081 else if (track->addr == caddr && handle < chandle) 6082 end = pos; 6083 else if (track->addr == caddr && handle == chandle && 6084 waste < cwaste) 6085 end = pos; 6086 else 6087 start = pos; 6088 } 6089 6090 /* 6091 * Not found. Insert new tracking element. 6092 */ 6093 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 6094 return 0; 6095 6096 l = t->loc + pos; 6097 if (pos < t->count) 6098 memmove(l + 1, l, 6099 (t->count - pos) * sizeof(struct location)); 6100 t->count++; 6101 l->count = 1; 6102 l->addr = track->addr; 6103 l->sum_time = age; 6104 l->min_time = age; 6105 l->max_time = age; 6106 l->min_pid = track->pid; 6107 l->max_pid = track->pid; 6108 l->handle = handle; 6109 l->waste = waste; 6110 cpumask_clear(to_cpumask(l->cpus)); 6111 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 6112 nodes_clear(l->nodes); 6113 node_set(page_to_nid(virt_to_page(track)), l->nodes); 6114 return 1; 6115 } 6116 6117 static void process_slab(struct loc_track *t, struct kmem_cache *s, 6118 struct slab *slab, enum track_item alloc, 6119 unsigned long *obj_map) 6120 { 6121 void *addr = slab_address(slab); 6122 bool is_alloc = (alloc == TRACK_ALLOC); 6123 void *p; 6124 6125 __fill_map(obj_map, s, slab); 6126 6127 for_each_object(p, s, addr, slab->objects) 6128 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 6129 add_location(t, s, get_track(s, p, alloc), 6130 is_alloc ? get_orig_size(s, p) : 6131 s->object_size); 6132 } 6133 #endif /* CONFIG_DEBUG_FS */ 6134 #endif /* CONFIG_SLUB_DEBUG */ 6135 6136 #ifdef SLAB_SUPPORTS_SYSFS 6137 enum slab_stat_type { 6138 SL_ALL, /* All slabs */ 6139 SL_PARTIAL, /* Only partially allocated slabs */ 6140 SL_CPU, /* Only slabs used for cpu caches */ 6141 SL_OBJECTS, /* Determine allocated objects not slabs */ 6142 SL_TOTAL /* Determine object capacity not slabs */ 6143 }; 6144 6145 #define SO_ALL (1 << SL_ALL) 6146 #define SO_PARTIAL (1 << SL_PARTIAL) 6147 #define SO_CPU (1 << SL_CPU) 6148 #define SO_OBJECTS (1 << SL_OBJECTS) 6149 #define SO_TOTAL (1 << SL_TOTAL) 6150 6151 static ssize_t show_slab_objects(struct kmem_cache *s, 6152 char *buf, unsigned long flags) 6153 { 6154 unsigned long total = 0; 6155 int node; 6156 int x; 6157 unsigned long *nodes; 6158 int len = 0; 6159 6160 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 6161 if (!nodes) 6162 return -ENOMEM; 6163 6164 if (flags & SO_CPU) { 6165 int cpu; 6166 6167 for_each_possible_cpu(cpu) { 6168 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 6169 cpu); 6170 int node; 6171 struct slab *slab; 6172 6173 slab = READ_ONCE(c->slab); 6174 if (!slab) 6175 continue; 6176 6177 node = slab_nid(slab); 6178 if (flags & SO_TOTAL) 6179 x = slab->objects; 6180 else if (flags & SO_OBJECTS) 6181 x = slab->inuse; 6182 else 6183 x = 1; 6184 6185 total += x; 6186 nodes[node] += x; 6187 6188 #ifdef CONFIG_SLUB_CPU_PARTIAL 6189 slab = slub_percpu_partial_read_once(c); 6190 if (slab) { 6191 node = slab_nid(slab); 6192 if (flags & SO_TOTAL) 6193 WARN_ON_ONCE(1); 6194 else if (flags & SO_OBJECTS) 6195 WARN_ON_ONCE(1); 6196 else 6197 x = slab->slabs; 6198 total += x; 6199 nodes[node] += x; 6200 } 6201 #endif 6202 } 6203 } 6204 6205 /* 6206 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 6207 * already held which will conflict with an existing lock order: 6208 * 6209 * mem_hotplug_lock->slab_mutex->kernfs_mutex 6210 * 6211 * We don't really need mem_hotplug_lock (to hold off 6212 * slab_mem_going_offline_callback) here because slab's memory hot 6213 * unplug code doesn't destroy the kmem_cache->node[] data. 6214 */ 6215 6216 #ifdef CONFIG_SLUB_DEBUG 6217 if (flags & SO_ALL) { 6218 struct kmem_cache_node *n; 6219 6220 for_each_kmem_cache_node(s, node, n) { 6221 6222 if (flags & SO_TOTAL) 6223 x = node_nr_objs(n); 6224 else if (flags & SO_OBJECTS) 6225 x = node_nr_objs(n) - count_partial(n, count_free); 6226 else 6227 x = node_nr_slabs(n); 6228 total += x; 6229 nodes[node] += x; 6230 } 6231 6232 } else 6233 #endif 6234 if (flags & SO_PARTIAL) { 6235 struct kmem_cache_node *n; 6236 6237 for_each_kmem_cache_node(s, node, n) { 6238 if (flags & SO_TOTAL) 6239 x = count_partial(n, count_total); 6240 else if (flags & SO_OBJECTS) 6241 x = count_partial(n, count_inuse); 6242 else 6243 x = n->nr_partial; 6244 total += x; 6245 nodes[node] += x; 6246 } 6247 } 6248 6249 len += sysfs_emit_at(buf, len, "%lu", total); 6250 #ifdef CONFIG_NUMA 6251 for (node = 0; node < nr_node_ids; node++) { 6252 if (nodes[node]) 6253 len += sysfs_emit_at(buf, len, " N%d=%lu", 6254 node, nodes[node]); 6255 } 6256 #endif 6257 len += sysfs_emit_at(buf, len, "\n"); 6258 kfree(nodes); 6259 6260 return len; 6261 } 6262 6263 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 6264 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 6265 6266 struct slab_attribute { 6267 struct attribute attr; 6268 ssize_t (*show)(struct kmem_cache *s, char *buf); 6269 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 6270 }; 6271 6272 #define SLAB_ATTR_RO(_name) \ 6273 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) 6274 6275 #define SLAB_ATTR(_name) \ 6276 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) 6277 6278 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 6279 { 6280 return sysfs_emit(buf, "%u\n", s->size); 6281 } 6282 SLAB_ATTR_RO(slab_size); 6283 6284 static ssize_t align_show(struct kmem_cache *s, char *buf) 6285 { 6286 return sysfs_emit(buf, "%u\n", s->align); 6287 } 6288 SLAB_ATTR_RO(align); 6289 6290 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 6291 { 6292 return sysfs_emit(buf, "%u\n", s->object_size); 6293 } 6294 SLAB_ATTR_RO(object_size); 6295 6296 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 6297 { 6298 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 6299 } 6300 SLAB_ATTR_RO(objs_per_slab); 6301 6302 static ssize_t order_show(struct kmem_cache *s, char *buf) 6303 { 6304 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 6305 } 6306 SLAB_ATTR_RO(order); 6307 6308 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 6309 { 6310 return sysfs_emit(buf, "%lu\n", s->min_partial); 6311 } 6312 6313 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 6314 size_t length) 6315 { 6316 unsigned long min; 6317 int err; 6318 6319 err = kstrtoul(buf, 10, &min); 6320 if (err) 6321 return err; 6322 6323 s->min_partial = min; 6324 return length; 6325 } 6326 SLAB_ATTR(min_partial); 6327 6328 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 6329 { 6330 unsigned int nr_partial = 0; 6331 #ifdef CONFIG_SLUB_CPU_PARTIAL 6332 nr_partial = s->cpu_partial; 6333 #endif 6334 6335 return sysfs_emit(buf, "%u\n", nr_partial); 6336 } 6337 6338 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 6339 size_t length) 6340 { 6341 unsigned int objects; 6342 int err; 6343 6344 err = kstrtouint(buf, 10, &objects); 6345 if (err) 6346 return err; 6347 if (objects && !kmem_cache_has_cpu_partial(s)) 6348 return -EINVAL; 6349 6350 slub_set_cpu_partial(s, objects); 6351 flush_all(s); 6352 return length; 6353 } 6354 SLAB_ATTR(cpu_partial); 6355 6356 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 6357 { 6358 if (!s->ctor) 6359 return 0; 6360 return sysfs_emit(buf, "%pS\n", s->ctor); 6361 } 6362 SLAB_ATTR_RO(ctor); 6363 6364 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 6365 { 6366 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 6367 } 6368 SLAB_ATTR_RO(aliases); 6369 6370 static ssize_t partial_show(struct kmem_cache *s, char *buf) 6371 { 6372 return show_slab_objects(s, buf, SO_PARTIAL); 6373 } 6374 SLAB_ATTR_RO(partial); 6375 6376 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 6377 { 6378 return show_slab_objects(s, buf, SO_CPU); 6379 } 6380 SLAB_ATTR_RO(cpu_slabs); 6381 6382 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 6383 { 6384 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 6385 } 6386 SLAB_ATTR_RO(objects_partial); 6387 6388 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 6389 { 6390 int objects = 0; 6391 int slabs = 0; 6392 int cpu __maybe_unused; 6393 int len = 0; 6394 6395 #ifdef CONFIG_SLUB_CPU_PARTIAL 6396 for_each_online_cpu(cpu) { 6397 struct slab *slab; 6398 6399 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6400 6401 if (slab) 6402 slabs += slab->slabs; 6403 } 6404 #endif 6405 6406 /* Approximate half-full slabs, see slub_set_cpu_partial() */ 6407 objects = (slabs * oo_objects(s->oo)) / 2; 6408 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); 6409 6410 #ifdef CONFIG_SLUB_CPU_PARTIAL 6411 for_each_online_cpu(cpu) { 6412 struct slab *slab; 6413 6414 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6415 if (slab) { 6416 slabs = READ_ONCE(slab->slabs); 6417 objects = (slabs * oo_objects(s->oo)) / 2; 6418 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 6419 cpu, objects, slabs); 6420 } 6421 } 6422 #endif 6423 len += sysfs_emit_at(buf, len, "\n"); 6424 6425 return len; 6426 } 6427 SLAB_ATTR_RO(slabs_cpu_partial); 6428 6429 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 6430 { 6431 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 6432 } 6433 SLAB_ATTR_RO(reclaim_account); 6434 6435 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 6436 { 6437 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 6438 } 6439 SLAB_ATTR_RO(hwcache_align); 6440 6441 #ifdef CONFIG_ZONE_DMA 6442 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 6443 { 6444 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 6445 } 6446 SLAB_ATTR_RO(cache_dma); 6447 #endif 6448 6449 #ifdef CONFIG_HARDENED_USERCOPY 6450 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 6451 { 6452 return sysfs_emit(buf, "%u\n", s->usersize); 6453 } 6454 SLAB_ATTR_RO(usersize); 6455 #endif 6456 6457 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 6458 { 6459 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 6460 } 6461 SLAB_ATTR_RO(destroy_by_rcu); 6462 6463 #ifdef CONFIG_SLUB_DEBUG 6464 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 6465 { 6466 return show_slab_objects(s, buf, SO_ALL); 6467 } 6468 SLAB_ATTR_RO(slabs); 6469 6470 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 6471 { 6472 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 6473 } 6474 SLAB_ATTR_RO(total_objects); 6475 6476 static ssize_t objects_show(struct kmem_cache *s, char *buf) 6477 { 6478 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 6479 } 6480 SLAB_ATTR_RO(objects); 6481 6482 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 6483 { 6484 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 6485 } 6486 SLAB_ATTR_RO(sanity_checks); 6487 6488 static ssize_t trace_show(struct kmem_cache *s, char *buf) 6489 { 6490 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 6491 } 6492 SLAB_ATTR_RO(trace); 6493 6494 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 6495 { 6496 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 6497 } 6498 6499 SLAB_ATTR_RO(red_zone); 6500 6501 static ssize_t poison_show(struct kmem_cache *s, char *buf) 6502 { 6503 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 6504 } 6505 6506 SLAB_ATTR_RO(poison); 6507 6508 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 6509 { 6510 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 6511 } 6512 6513 SLAB_ATTR_RO(store_user); 6514 6515 static ssize_t validate_show(struct kmem_cache *s, char *buf) 6516 { 6517 return 0; 6518 } 6519 6520 static ssize_t validate_store(struct kmem_cache *s, 6521 const char *buf, size_t length) 6522 { 6523 int ret = -EINVAL; 6524 6525 if (buf[0] == '1' && kmem_cache_debug(s)) { 6526 ret = validate_slab_cache(s); 6527 if (ret >= 0) 6528 ret = length; 6529 } 6530 return ret; 6531 } 6532 SLAB_ATTR(validate); 6533 6534 #endif /* CONFIG_SLUB_DEBUG */ 6535 6536 #ifdef CONFIG_FAILSLAB 6537 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 6538 { 6539 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 6540 } 6541 6542 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 6543 size_t length) 6544 { 6545 if (s->refcount > 1) 6546 return -EINVAL; 6547 6548 if (buf[0] == '1') 6549 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); 6550 else 6551 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); 6552 6553 return length; 6554 } 6555 SLAB_ATTR(failslab); 6556 #endif 6557 6558 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 6559 { 6560 return 0; 6561 } 6562 6563 static ssize_t shrink_store(struct kmem_cache *s, 6564 const char *buf, size_t length) 6565 { 6566 if (buf[0] == '1') 6567 kmem_cache_shrink(s); 6568 else 6569 return -EINVAL; 6570 return length; 6571 } 6572 SLAB_ATTR(shrink); 6573 6574 #ifdef CONFIG_NUMA 6575 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 6576 { 6577 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 6578 } 6579 6580 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 6581 const char *buf, size_t length) 6582 { 6583 unsigned int ratio; 6584 int err; 6585 6586 err = kstrtouint(buf, 10, &ratio); 6587 if (err) 6588 return err; 6589 if (ratio > 100) 6590 return -ERANGE; 6591 6592 s->remote_node_defrag_ratio = ratio * 10; 6593 6594 return length; 6595 } 6596 SLAB_ATTR(remote_node_defrag_ratio); 6597 #endif 6598 6599 #ifdef CONFIG_SLUB_STATS 6600 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 6601 { 6602 unsigned long sum = 0; 6603 int cpu; 6604 int len = 0; 6605 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 6606 6607 if (!data) 6608 return -ENOMEM; 6609 6610 for_each_online_cpu(cpu) { 6611 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 6612 6613 data[cpu] = x; 6614 sum += x; 6615 } 6616 6617 len += sysfs_emit_at(buf, len, "%lu", sum); 6618 6619 #ifdef CONFIG_SMP 6620 for_each_online_cpu(cpu) { 6621 if (data[cpu]) 6622 len += sysfs_emit_at(buf, len, " C%d=%u", 6623 cpu, data[cpu]); 6624 } 6625 #endif 6626 kfree(data); 6627 len += sysfs_emit_at(buf, len, "\n"); 6628 6629 return len; 6630 } 6631 6632 static void clear_stat(struct kmem_cache *s, enum stat_item si) 6633 { 6634 int cpu; 6635 6636 for_each_online_cpu(cpu) 6637 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 6638 } 6639 6640 #define STAT_ATTR(si, text) \ 6641 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 6642 { \ 6643 return show_stat(s, buf, si); \ 6644 } \ 6645 static ssize_t text##_store(struct kmem_cache *s, \ 6646 const char *buf, size_t length) \ 6647 { \ 6648 if (buf[0] != '0') \ 6649 return -EINVAL; \ 6650 clear_stat(s, si); \ 6651 return length; \ 6652 } \ 6653 SLAB_ATTR(text); \ 6654 6655 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 6656 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 6657 STAT_ATTR(FREE_FASTPATH, free_fastpath); 6658 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 6659 STAT_ATTR(FREE_FROZEN, free_frozen); 6660 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 6661 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 6662 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 6663 STAT_ATTR(ALLOC_SLAB, alloc_slab); 6664 STAT_ATTR(ALLOC_REFILL, alloc_refill); 6665 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 6666 STAT_ATTR(FREE_SLAB, free_slab); 6667 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 6668 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 6669 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 6670 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 6671 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 6672 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 6673 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 6674 STAT_ATTR(ORDER_FALLBACK, order_fallback); 6675 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 6676 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 6677 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 6678 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 6679 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 6680 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 6681 #endif /* CONFIG_SLUB_STATS */ 6682 6683 #ifdef CONFIG_KFENCE 6684 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) 6685 { 6686 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); 6687 } 6688 6689 static ssize_t skip_kfence_store(struct kmem_cache *s, 6690 const char *buf, size_t length) 6691 { 6692 int ret = length; 6693 6694 if (buf[0] == '0') 6695 s->flags &= ~SLAB_SKIP_KFENCE; 6696 else if (buf[0] == '1') 6697 s->flags |= SLAB_SKIP_KFENCE; 6698 else 6699 ret = -EINVAL; 6700 6701 return ret; 6702 } 6703 SLAB_ATTR(skip_kfence); 6704 #endif 6705 6706 static struct attribute *slab_attrs[] = { 6707 &slab_size_attr.attr, 6708 &object_size_attr.attr, 6709 &objs_per_slab_attr.attr, 6710 &order_attr.attr, 6711 &min_partial_attr.attr, 6712 &cpu_partial_attr.attr, 6713 &objects_partial_attr.attr, 6714 &partial_attr.attr, 6715 &cpu_slabs_attr.attr, 6716 &ctor_attr.attr, 6717 &aliases_attr.attr, 6718 &align_attr.attr, 6719 &hwcache_align_attr.attr, 6720 &reclaim_account_attr.attr, 6721 &destroy_by_rcu_attr.attr, 6722 &shrink_attr.attr, 6723 &slabs_cpu_partial_attr.attr, 6724 #ifdef CONFIG_SLUB_DEBUG 6725 &total_objects_attr.attr, 6726 &objects_attr.attr, 6727 &slabs_attr.attr, 6728 &sanity_checks_attr.attr, 6729 &trace_attr.attr, 6730 &red_zone_attr.attr, 6731 &poison_attr.attr, 6732 &store_user_attr.attr, 6733 &validate_attr.attr, 6734 #endif 6735 #ifdef CONFIG_ZONE_DMA 6736 &cache_dma_attr.attr, 6737 #endif 6738 #ifdef CONFIG_NUMA 6739 &remote_node_defrag_ratio_attr.attr, 6740 #endif 6741 #ifdef CONFIG_SLUB_STATS 6742 &alloc_fastpath_attr.attr, 6743 &alloc_slowpath_attr.attr, 6744 &free_fastpath_attr.attr, 6745 &free_slowpath_attr.attr, 6746 &free_frozen_attr.attr, 6747 &free_add_partial_attr.attr, 6748 &free_remove_partial_attr.attr, 6749 &alloc_from_partial_attr.attr, 6750 &alloc_slab_attr.attr, 6751 &alloc_refill_attr.attr, 6752 &alloc_node_mismatch_attr.attr, 6753 &free_slab_attr.attr, 6754 &cpuslab_flush_attr.attr, 6755 &deactivate_full_attr.attr, 6756 &deactivate_empty_attr.attr, 6757 &deactivate_to_head_attr.attr, 6758 &deactivate_to_tail_attr.attr, 6759 &deactivate_remote_frees_attr.attr, 6760 &deactivate_bypass_attr.attr, 6761 &order_fallback_attr.attr, 6762 &cmpxchg_double_fail_attr.attr, 6763 &cmpxchg_double_cpu_fail_attr.attr, 6764 &cpu_partial_alloc_attr.attr, 6765 &cpu_partial_free_attr.attr, 6766 &cpu_partial_node_attr.attr, 6767 &cpu_partial_drain_attr.attr, 6768 #endif 6769 #ifdef CONFIG_FAILSLAB 6770 &failslab_attr.attr, 6771 #endif 6772 #ifdef CONFIG_HARDENED_USERCOPY 6773 &usersize_attr.attr, 6774 #endif 6775 #ifdef CONFIG_KFENCE 6776 &skip_kfence_attr.attr, 6777 #endif 6778 6779 NULL 6780 }; 6781 6782 static const struct attribute_group slab_attr_group = { 6783 .attrs = slab_attrs, 6784 }; 6785 6786 static ssize_t slab_attr_show(struct kobject *kobj, 6787 struct attribute *attr, 6788 char *buf) 6789 { 6790 struct slab_attribute *attribute; 6791 struct kmem_cache *s; 6792 6793 attribute = to_slab_attr(attr); 6794 s = to_slab(kobj); 6795 6796 if (!attribute->show) 6797 return -EIO; 6798 6799 return attribute->show(s, buf); 6800 } 6801 6802 static ssize_t slab_attr_store(struct kobject *kobj, 6803 struct attribute *attr, 6804 const char *buf, size_t len) 6805 { 6806 struct slab_attribute *attribute; 6807 struct kmem_cache *s; 6808 6809 attribute = to_slab_attr(attr); 6810 s = to_slab(kobj); 6811 6812 if (!attribute->store) 6813 return -EIO; 6814 6815 return attribute->store(s, buf, len); 6816 } 6817 6818 static void kmem_cache_release(struct kobject *k) 6819 { 6820 slab_kmem_cache_release(to_slab(k)); 6821 } 6822 6823 static const struct sysfs_ops slab_sysfs_ops = { 6824 .show = slab_attr_show, 6825 .store = slab_attr_store, 6826 }; 6827 6828 static const struct kobj_type slab_ktype = { 6829 .sysfs_ops = &slab_sysfs_ops, 6830 .release = kmem_cache_release, 6831 }; 6832 6833 static struct kset *slab_kset; 6834 6835 static inline struct kset *cache_kset(struct kmem_cache *s) 6836 { 6837 return slab_kset; 6838 } 6839 6840 #define ID_STR_LENGTH 32 6841 6842 /* Create a unique string id for a slab cache: 6843 * 6844 * Format :[flags-]size 6845 */ 6846 static char *create_unique_id(struct kmem_cache *s) 6847 { 6848 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 6849 char *p = name; 6850 6851 if (!name) 6852 return ERR_PTR(-ENOMEM); 6853 6854 *p++ = ':'; 6855 /* 6856 * First flags affecting slabcache operations. We will only 6857 * get here for aliasable slabs so we do not need to support 6858 * too many flags. The flags here must cover all flags that 6859 * are matched during merging to guarantee that the id is 6860 * unique. 6861 */ 6862 if (s->flags & SLAB_CACHE_DMA) 6863 *p++ = 'd'; 6864 if (s->flags & SLAB_CACHE_DMA32) 6865 *p++ = 'D'; 6866 if (s->flags & SLAB_RECLAIM_ACCOUNT) 6867 *p++ = 'a'; 6868 if (s->flags & SLAB_CONSISTENCY_CHECKS) 6869 *p++ = 'F'; 6870 if (s->flags & SLAB_ACCOUNT) 6871 *p++ = 'A'; 6872 if (p != name + 1) 6873 *p++ = '-'; 6874 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); 6875 6876 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { 6877 kfree(name); 6878 return ERR_PTR(-EINVAL); 6879 } 6880 kmsan_unpoison_memory(name, p - name); 6881 return name; 6882 } 6883 6884 static int sysfs_slab_add(struct kmem_cache *s) 6885 { 6886 int err; 6887 const char *name; 6888 struct kset *kset = cache_kset(s); 6889 int unmergeable = slab_unmergeable(s); 6890 6891 if (!unmergeable && disable_higher_order_debug && 6892 (slub_debug & DEBUG_METADATA_FLAGS)) 6893 unmergeable = 1; 6894 6895 if (unmergeable) { 6896 /* 6897 * Slabcache can never be merged so we can use the name proper. 6898 * This is typically the case for debug situations. In that 6899 * case we can catch duplicate names easily. 6900 */ 6901 sysfs_remove_link(&slab_kset->kobj, s->name); 6902 name = s->name; 6903 } else { 6904 /* 6905 * Create a unique name for the slab as a target 6906 * for the symlinks. 6907 */ 6908 name = create_unique_id(s); 6909 if (IS_ERR(name)) 6910 return PTR_ERR(name); 6911 } 6912 6913 s->kobj.kset = kset; 6914 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 6915 if (err) 6916 goto out; 6917 6918 err = sysfs_create_group(&s->kobj, &slab_attr_group); 6919 if (err) 6920 goto out_del_kobj; 6921 6922 if (!unmergeable) { 6923 /* Setup first alias */ 6924 sysfs_slab_alias(s, s->name); 6925 } 6926 out: 6927 if (!unmergeable) 6928 kfree(name); 6929 return err; 6930 out_del_kobj: 6931 kobject_del(&s->kobj); 6932 goto out; 6933 } 6934 6935 void sysfs_slab_unlink(struct kmem_cache *s) 6936 { 6937 kobject_del(&s->kobj); 6938 } 6939 6940 void sysfs_slab_release(struct kmem_cache *s) 6941 { 6942 kobject_put(&s->kobj); 6943 } 6944 6945 /* 6946 * Need to buffer aliases during bootup until sysfs becomes 6947 * available lest we lose that information. 6948 */ 6949 struct saved_alias { 6950 struct kmem_cache *s; 6951 const char *name; 6952 struct saved_alias *next; 6953 }; 6954 6955 static struct saved_alias *alias_list; 6956 6957 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 6958 { 6959 struct saved_alias *al; 6960 6961 if (slab_state == FULL) { 6962 /* 6963 * If we have a leftover link then remove it. 6964 */ 6965 sysfs_remove_link(&slab_kset->kobj, name); 6966 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 6967 } 6968 6969 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 6970 if (!al) 6971 return -ENOMEM; 6972 6973 al->s = s; 6974 al->name = name; 6975 al->next = alias_list; 6976 alias_list = al; 6977 kmsan_unpoison_memory(al, sizeof(*al)); 6978 return 0; 6979 } 6980 6981 static int __init slab_sysfs_init(void) 6982 { 6983 struct kmem_cache *s; 6984 int err; 6985 6986 mutex_lock(&slab_mutex); 6987 6988 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 6989 if (!slab_kset) { 6990 mutex_unlock(&slab_mutex); 6991 pr_err("Cannot register slab subsystem.\n"); 6992 return -ENOMEM; 6993 } 6994 6995 slab_state = FULL; 6996 6997 list_for_each_entry(s, &slab_caches, list) { 6998 err = sysfs_slab_add(s); 6999 if (err) 7000 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 7001 s->name); 7002 } 7003 7004 while (alias_list) { 7005 struct saved_alias *al = alias_list; 7006 7007 alias_list = alias_list->next; 7008 err = sysfs_slab_alias(al->s, al->name); 7009 if (err) 7010 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 7011 al->name); 7012 kfree(al); 7013 } 7014 7015 mutex_unlock(&slab_mutex); 7016 return 0; 7017 } 7018 late_initcall(slab_sysfs_init); 7019 #endif /* SLAB_SUPPORTS_SYSFS */ 7020 7021 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 7022 static int slab_debugfs_show(struct seq_file *seq, void *v) 7023 { 7024 struct loc_track *t = seq->private; 7025 struct location *l; 7026 unsigned long idx; 7027 7028 idx = (unsigned long) t->idx; 7029 if (idx < t->count) { 7030 l = &t->loc[idx]; 7031 7032 seq_printf(seq, "%7ld ", l->count); 7033 7034 if (l->addr) 7035 seq_printf(seq, "%pS", (void *)l->addr); 7036 else 7037 seq_puts(seq, "<not-available>"); 7038 7039 if (l->waste) 7040 seq_printf(seq, " waste=%lu/%lu", 7041 l->count * l->waste, l->waste); 7042 7043 if (l->sum_time != l->min_time) { 7044 seq_printf(seq, " age=%ld/%llu/%ld", 7045 l->min_time, div_u64(l->sum_time, l->count), 7046 l->max_time); 7047 } else 7048 seq_printf(seq, " age=%ld", l->min_time); 7049 7050 if (l->min_pid != l->max_pid) 7051 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 7052 else 7053 seq_printf(seq, " pid=%ld", 7054 l->min_pid); 7055 7056 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 7057 seq_printf(seq, " cpus=%*pbl", 7058 cpumask_pr_args(to_cpumask(l->cpus))); 7059 7060 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 7061 seq_printf(seq, " nodes=%*pbl", 7062 nodemask_pr_args(&l->nodes)); 7063 7064 #ifdef CONFIG_STACKDEPOT 7065 { 7066 depot_stack_handle_t handle; 7067 unsigned long *entries; 7068 unsigned int nr_entries, j; 7069 7070 handle = READ_ONCE(l->handle); 7071 if (handle) { 7072 nr_entries = stack_depot_fetch(handle, &entries); 7073 seq_puts(seq, "\n"); 7074 for (j = 0; j < nr_entries; j++) 7075 seq_printf(seq, " %pS\n", (void *)entries[j]); 7076 } 7077 } 7078 #endif 7079 seq_puts(seq, "\n"); 7080 } 7081 7082 if (!idx && !t->count) 7083 seq_puts(seq, "No data\n"); 7084 7085 return 0; 7086 } 7087 7088 static void slab_debugfs_stop(struct seq_file *seq, void *v) 7089 { 7090 } 7091 7092 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 7093 { 7094 struct loc_track *t = seq->private; 7095 7096 t->idx = ++(*ppos); 7097 if (*ppos <= t->count) 7098 return ppos; 7099 7100 return NULL; 7101 } 7102 7103 static int cmp_loc_by_count(const void *a, const void *b, const void *data) 7104 { 7105 struct location *loc1 = (struct location *)a; 7106 struct location *loc2 = (struct location *)b; 7107 7108 if (loc1->count > loc2->count) 7109 return -1; 7110 else 7111 return 1; 7112 } 7113 7114 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 7115 { 7116 struct loc_track *t = seq->private; 7117 7118 t->idx = *ppos; 7119 return ppos; 7120 } 7121 7122 static const struct seq_operations slab_debugfs_sops = { 7123 .start = slab_debugfs_start, 7124 .next = slab_debugfs_next, 7125 .stop = slab_debugfs_stop, 7126 .show = slab_debugfs_show, 7127 }; 7128 7129 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 7130 { 7131 7132 struct kmem_cache_node *n; 7133 enum track_item alloc; 7134 int node; 7135 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 7136 sizeof(struct loc_track)); 7137 struct kmem_cache *s = file_inode(filep)->i_private; 7138 unsigned long *obj_map; 7139 7140 if (!t) 7141 return -ENOMEM; 7142 7143 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 7144 if (!obj_map) { 7145 seq_release_private(inode, filep); 7146 return -ENOMEM; 7147 } 7148 7149 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 7150 alloc = TRACK_ALLOC; 7151 else 7152 alloc = TRACK_FREE; 7153 7154 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 7155 bitmap_free(obj_map); 7156 seq_release_private(inode, filep); 7157 return -ENOMEM; 7158 } 7159 7160 for_each_kmem_cache_node(s, node, n) { 7161 unsigned long flags; 7162 struct slab *slab; 7163 7164 if (!node_nr_slabs(n)) 7165 continue; 7166 7167 spin_lock_irqsave(&n->list_lock, flags); 7168 list_for_each_entry(slab, &n->partial, slab_list) 7169 process_slab(t, s, slab, alloc, obj_map); 7170 list_for_each_entry(slab, &n->full, slab_list) 7171 process_slab(t, s, slab, alloc, obj_map); 7172 spin_unlock_irqrestore(&n->list_lock, flags); 7173 } 7174 7175 /* Sort locations by count */ 7176 sort_r(t->loc, t->count, sizeof(struct location), 7177 cmp_loc_by_count, NULL, NULL); 7178 7179 bitmap_free(obj_map); 7180 return 0; 7181 } 7182 7183 static int slab_debug_trace_release(struct inode *inode, struct file *file) 7184 { 7185 struct seq_file *seq = file->private_data; 7186 struct loc_track *t = seq->private; 7187 7188 free_loc_track(t); 7189 return seq_release_private(inode, file); 7190 } 7191 7192 static const struct file_operations slab_debugfs_fops = { 7193 .open = slab_debug_trace_open, 7194 .read = seq_read, 7195 .llseek = seq_lseek, 7196 .release = slab_debug_trace_release, 7197 }; 7198 7199 static void debugfs_slab_add(struct kmem_cache *s) 7200 { 7201 struct dentry *slab_cache_dir; 7202 7203 if (unlikely(!slab_debugfs_root)) 7204 return; 7205 7206 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 7207 7208 debugfs_create_file("alloc_traces", 0400, 7209 slab_cache_dir, s, &slab_debugfs_fops); 7210 7211 debugfs_create_file("free_traces", 0400, 7212 slab_cache_dir, s, &slab_debugfs_fops); 7213 } 7214 7215 void debugfs_slab_release(struct kmem_cache *s) 7216 { 7217 debugfs_lookup_and_remove(s->name, slab_debugfs_root); 7218 } 7219 7220 static int __init slab_debugfs_init(void) 7221 { 7222 struct kmem_cache *s; 7223 7224 slab_debugfs_root = debugfs_create_dir("slab", NULL); 7225 7226 list_for_each_entry(s, &slab_caches, list) 7227 if (s->flags & SLAB_STORE_USER) 7228 debugfs_slab_add(s); 7229 7230 return 0; 7231 7232 } 7233 __initcall(slab_debugfs_init); 7234 #endif 7235 /* 7236 * The /proc/slabinfo ABI 7237 */ 7238 #ifdef CONFIG_SLUB_DEBUG 7239 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 7240 { 7241 unsigned long nr_slabs = 0; 7242 unsigned long nr_objs = 0; 7243 unsigned long nr_free = 0; 7244 int node; 7245 struct kmem_cache_node *n; 7246 7247 for_each_kmem_cache_node(s, node, n) { 7248 nr_slabs += node_nr_slabs(n); 7249 nr_objs += node_nr_objs(n); 7250 nr_free += count_partial(n, count_free); 7251 } 7252 7253 sinfo->active_objs = nr_objs - nr_free; 7254 sinfo->num_objs = nr_objs; 7255 sinfo->active_slabs = nr_slabs; 7256 sinfo->num_slabs = nr_slabs; 7257 sinfo->objects_per_slab = oo_objects(s->oo); 7258 sinfo->cache_order = oo_order(s->oo); 7259 } 7260 7261 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) 7262 { 7263 } 7264 7265 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 7266 size_t count, loff_t *ppos) 7267 { 7268 return -EIO; 7269 } 7270 #endif /* CONFIG_SLUB_DEBUG */ 7271